diff options
Diffstat (limited to 'test')
40 files changed, 927 insertions, 868 deletions
diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py index 65685c48b7..36761d359e 100755 --- a/test/functional/assumevalid.py +++ b/test/functional/assumevalid.py @@ -39,13 +39,12 @@ from test_framework.mininode import (CBlockHeader, CTxIn, CTxOut, NetworkThread, - NodeConn, NodeConnCB, msg_block, msg_headers) from test_framework.script import (CScript, OP_TRUE) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import (p2p_port, assert_equal) +from test_framework.util import assert_equal class BaseNode(NodeConnCB): def send_header_for_blocks(self, new_blocks): @@ -65,13 +64,13 @@ class AssumeValidTest(BitcoinTestFramework): # signature so we can pass in the block hash as assumevalid. self.start_node(0) - def send_blocks_until_disconnected(self, node): + def send_blocks_until_disconnected(self, p2p_conn): """Keep sending blocks to the node until we're disconnected.""" for i in range(len(self.blocks)): - if not node.connection: + if not p2p_conn.connection: break try: - node.send_message(msg_block(self.blocks[i])) + p2p_conn.send_message(msg_block(self.blocks[i])) except IOError as e: assert str(e) == 'Not connected, no pushbuf' break @@ -97,13 +96,10 @@ class AssumeValidTest(BitcoinTestFramework): def run_test(self): # Connect to node0 - node0 = BaseNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) - node0.add_connection(connections[0]) + p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) NetworkThread().start() # Start up network handling in another thread - node0.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) @@ -165,37 +161,33 @@ class AssumeValidTest(BitcoinTestFramework): # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) - node1 = BaseNode() # connects to node1 - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) - node1.add_connection(connections[1]) - node1.wait_for_verack() + p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) + p2p1.wait_for_verack() self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) - node2 = BaseNode() # connects to node2 - connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) - node2.add_connection(connections[2]) - node2.wait_for_verack() + p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) + p2p2.wait_for_verack() # send header lists to all three nodes - node0.send_header_for_blocks(self.blocks[0:2000]) - node0.send_header_for_blocks(self.blocks[2000:]) - node1.send_header_for_blocks(self.blocks[0:2000]) - node1.send_header_for_blocks(self.blocks[2000:]) - node2.send_header_for_blocks(self.blocks[0:200]) + p2p0.send_header_for_blocks(self.blocks[0:2000]) + p2p0.send_header_for_blocks(self.blocks[2000:]) + p2p1.send_header_for_blocks(self.blocks[0:2000]) + p2p1.send_header_for_blocks(self.blocks[2000:]) + p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. - self.send_blocks_until_disconnected(node0) + self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): - node1.send_message(msg_block(self.blocks[i])) + p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. - node1.sync_with_ping(120) + p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. - self.send_blocks_until_disconnected(node2) + self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101) if __name__ == '__main__': diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py index 2cd6df6e37..3073324798 100755 --- a/test/functional/bip65-cltv-p2p.py +++ b/test/functional/bip65-cltv-p2p.py @@ -66,15 +66,12 @@ class BIP65Test(BitcoinTestFramework): self.setup_clean_chain = True def run_test(self): - node0 = NodeConnCB() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) - node0.add_connection(connections[0]) + self.nodes[0].add_p2p_connection(NodeConnCB()) NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. - node0.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) @@ -95,7 +92,7 @@ class BIP65Test(BitcoinTestFramework): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") @@ -104,15 +101,15 @@ class BIP65Test(BitcoinTestFramework): block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: - assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000003)') - assert_equal(node0.last_message["reject"].data, block.sha256) - del node0.last_message["reject"] + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 @@ -125,7 +122,7 @@ class BIP65Test(BitcoinTestFramework): # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). - node0.send_and_ping(msg_tx(spendtx)) + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. @@ -133,18 +130,18 @@ class BIP65Test(BitcoinTestFramework): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: - assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(node0.last_message["reject"].data, block.sha256) - if node0.last_message["reject"].code == REJECT_INVALID: + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid - assert_equal(node0.last_message["reject"].reason, b'block-validation-failed') + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: - assert b'Negative locktime' in node0.last_message["reject"].reason + assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) @@ -155,7 +152,7 @@ class BIP65Test(BitcoinTestFramework): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py index c620d3e155..e5febde42d 100755 --- a/test/functional/bipdersig-p2p.py +++ b/test/functional/bipdersig-p2p.py @@ -54,14 +54,12 @@ class BIP66Test(BitcoinTestFramework): self.setup_clean_chain = True def run_test(self): - node0 = NodeConnCB() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) - node0.add_connection(connections[0]) + self.nodes[0].add_p2p_connection(NodeConnCB()) + NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. - node0.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) @@ -83,7 +81,7 @@ class BIP66Test(BitcoinTestFramework): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") @@ -93,15 +91,15 @@ class BIP66Test(BitcoinTestFramework): block.nVersion = 2 block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: - assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000002)') - assert_equal(node0.last_message["reject"].data, block.sha256) - del node0.last_message["reject"] + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] self.log.info("Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 @@ -114,7 +112,7 @@ class BIP66Test(BitcoinTestFramework): # First we show that this tx is valid except for DERSIG by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). - node0.send_and_ping(msg_tx(spendtx)) + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. @@ -123,23 +121,23 @@ class BIP66Test(BitcoinTestFramework): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # bitcoind is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and bitcoind produces more specific reject # reasons. - assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(node0.last_message["reject"].data, block.sha256) - if node0.last_message["reject"].code == REJECT_INVALID: + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid - assert_equal(node0.last_message["reject"].reason, b'block-validation-failed') + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: - assert b'Non-canonical DER signature' in node0.last_message["reject"].reason + assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], @@ -148,7 +146,7 @@ class BIP66Test(BitcoinTestFramework): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py index 4576cb036a..49fafbc9aa 100755 --- a/test/functional/blockchain.py +++ b/test/functional/blockchain.py @@ -5,6 +5,7 @@ """Test RPCs related to blockchainstate. Test the following RPCs: + - getblockchaininfo - gettxoutsetinfo - getdifficulty - getbestblockhash @@ -58,6 +59,7 @@ class BlockchainTest(BitcoinTestFramework): 'chainwork', 'difficulty', 'headers', + 'initialblockdownload', 'mediantime', 'pruned', 'size_on_disk', diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 87d73ad14a..ba40f33016 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -18,7 +18,6 @@ from test_framework.blocktools import (create_block, create_coinbase) from test_framework.mininode import ( CInv, NetworkThread, - NodeConn, NodeConnCB, mininode_lock, msg_block, @@ -28,7 +27,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, - p2p_port, wait_until, ) @@ -134,16 +132,13 @@ class ExampleTest(BitcoinTestFramework): """Main test logic""" # Create a P2P connection to one of the nodes - node0 = BaseNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) - node0.add_connection(connections[0]) + self.nodes[0].add_p2p_connection(BaseNode()) # Start up network handling in another thread. This needs to be called # after the P2P connections have been created. NetworkThread().start() # wait_for_verack ensures that the P2P connection is fully up. - node0.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] @@ -180,7 +175,7 @@ class ExampleTest(BitcoinTestFramework): block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our NodeConn connection - node0.send_message(block_message) + self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 @@ -193,28 +188,26 @@ class ExampleTest(BitcoinTestFramework): connect_nodes(self.nodes[1], 2) self.log.info("Add P2P connection to node2") - node2 = BaseNode() - connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) - node2.add_connection(connections[1]) - node2.wait_for_verack() + self.nodes[2].add_p2p_connection(BaseNode()) + self.nodes[2].p2p.wait_for_verack() self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) - node2.send_message(getdata_request) + self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # NodeConnCB objects. - wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5, lock=mininode_lock) + wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: - for block in node2.block_receive_map.values(): + for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': diff --git a/test/functional/importmulti.py b/test/functional/importmulti.py index c1a42870ec..a691595f15 100755 --- a/test/functional/importmulti.py +++ b/test/functional/importmulti.py @@ -160,6 +160,18 @@ class ImportMultiTest (BitcoinTestFramework): assert_equal(address_assert['ismine'], True) assert_equal(address_assert['timestamp'], timestamp) + self.log.info("Should not import an address with private key if is already imported") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ] + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -4) + assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') + # Address + Private key + watchonly self.log.info("Should not import an address with private key and with watchonly") address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) diff --git a/test/functional/listsinceblock.py b/test/functional/listsinceblock.py index 6f428388ec..67e7744bf8 100755 --- a/test/functional/listsinceblock.py +++ b/test/functional/listsinceblock.py @@ -5,7 +5,7 @@ """Test the listsincelast RPC.""" from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal +from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error class ListSinceBlockTest (BitcoinTestFramework): def set_test_params(self): @@ -16,10 +16,43 @@ class ListSinceBlockTest (BitcoinTestFramework): self.nodes[2].generate(101) self.sync_all() + self.test_no_blockhash() + self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() + def test_no_blockhash(self): + txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) + blockhash, = self.nodes[2].generate(1) + self.sync_all() + + txs = self.nodes[0].listtransactions() + assert_array_result(txs, {"txid": txid}, { + "category": "receive", + "amount": 1, + "blockhash": blockhash, + "confirmations": 1, + }) + assert_equal( + self.nodes[0].listsinceblock(), + {"lastblock": blockhash, + "removed": [], + "transactions": txs}) + assert_equal( + self.nodes[0].listsinceblock(""), + {"lastblock": blockhash, + "removed": [], + "transactions": txs}) + + def test_invalid_blockhash(self): + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "0000000000000000000000000000000000000000000000000000000000000000") + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "invalid-hex") + def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py index 1f402798e7..9c92aa1dc0 100755 --- a/test/functional/maxuploadtarget.py +++ b/test/functional/maxuploadtarget.py @@ -49,19 +49,17 @@ class MaxUploadTest(BitcoinTestFramework): # Generate some old blocks self.nodes[0].generate(130) - # test_nodes[0] will only request old blocks - # test_nodes[1] will only request new blocks - # test_nodes[2] will test resetting the counters - test_nodes = [] - connections = [] + # p2p_conns[0] will only request old blocks + # p2p_conns[1] will only request new blocks + # p2p_conns[2] will test resetting the counters + p2p_conns = [] for i in range(3): - test_nodes.append(TestNode()) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) - test_nodes[i].add_connection(connections[i]) + p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) NetworkThread().start() # Start up network handling in another thread - [x.wait_for_verack() for x in test_nodes] + for p2pc in p2p_conns: + p2pc.wait_for_verack() # Test logic begins here @@ -83,7 +81,7 @@ class MaxUploadTest(BitcoinTestFramework): big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) - # test_nodes[0] will test what happens if we just keep requesting the + # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() @@ -97,34 +95,34 @@ class MaxUploadTest(BitcoinTestFramework): # 576MB will be reserved for relaying new blocks, so expect this to # succeed for ~235 tries. for i in range(success_count): - test_nodes[0].send_message(getdata_request) - test_nodes[0].sync_with_ping() - assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1) + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].sync_with_ping() + assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): - test_nodes[0].send_message(getdata_request) - test_nodes[0].wait_for_disconnect() + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info("Peer 0 disconnected after downloading old block too many times") - # Requesting the current block on test_nodes[1] should succeed indefinitely, + # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 800 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(800): - test_nodes[1].send_message(getdata_request) - test_nodes[1].sync_with_ping() - assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].sync_with_ping() + assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) self.log.info("Peer 1 able to repeatedly download new block") - # But if test_nodes[1] tries for an old block, it gets disconnected too. + # But if p2p_conns[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] - test_nodes[1].send_message(getdata_request) - test_nodes[1].wait_for_disconnect() + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") @@ -132,39 +130,38 @@ class MaxUploadTest(BitcoinTestFramework): self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, - # and test_nodes[2] should be able to retrieve the old block. + # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) - test_nodes[2].sync_with_ping() - test_nodes[2].send_message(getdata_request) - test_nodes[2].sync_with_ping() - assert_equal(test_nodes[2].block_receive_map[big_old_block], 1) + p2p_conns[2].sync_with_ping() + p2p_conns[2].send_message(getdata_request) + p2p_conns[2].sync_with_ping() + assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") - [c.disconnect_node() for c in connections] + for i in range(3): + self.nodes[0].disconnect_p2p() #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) - #recreate/reconnect a test node - test_nodes = [TestNode()] - connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])] - test_nodes[0].add_connection(connections[0]) + # Reconnect to self.nodes[0] + self.nodes[0].add_p2p_connection(TestNode()) NetworkThread().start() # Start up network handling in another thread - test_nodes[0].wait_for_verack() + self.nodes[0].p2p.wait_for_verack() #retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): - test_nodes[0].send_message(getdata_request) - test_nodes[0].sync_with_ping() - assert_equal(test_nodes[0].block_receive_map[big_new_block], i+1) + self.nodes[0].p2p.send_message(getdata_request) + self.nodes[0].p2p.sync_with_ping() + assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) getdata_request.inv = [CInv(2, big_old_block)] - test_nodes[0].send_and_ping(getdata_request) + self.nodes[0].p2p.send_and_ping(getdata_request) assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist self.log.info("Peer still connected after trying to download old block (whitelisted)") diff --git a/test/functional/minchainwork.py b/test/functional/minchainwork.py index c7579d2548..35cd7ad141 100755 --- a/test/functional/minchainwork.py +++ b/test/functional/minchainwork.py @@ -27,6 +27,7 @@ class MinimumChainWorkTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 + self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] self.node_min_work = [0, 101, 101] @@ -74,6 +75,13 @@ class MinimumChainWorkTest(BitcoinTestFramework): self.nodes[0].generate(1) self.log.info("Verifying nodes are all synced") + + # Because nodes in regtest are all manual connections (eg using + # addnode), node1 should not have disconnected node0. If not for that, + # we'd expect node1 to have disconnected node0 for serving an + # insufficient work chain, in which case we'd need to reconnect them to + # continue the test. + self.sync_all() self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) diff --git a/test/functional/multiwallet.py b/test/functional/multiwallet.py index f55da76819..7a0fbce477 100755 --- a/test/functional/multiwallet.py +++ b/test/functional/multiwallet.py @@ -7,6 +7,7 @@ Verify that a bitcoind node can load multiple wallet files """ import os +import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error @@ -29,6 +30,11 @@ class MultiWalletTest(BitcoinTestFramework): os.mkdir(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w11')) self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.') + # should not initialize if one wallet is a copy of another + shutil.copyfile(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w2'), + os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w22')) + self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid') + # should not initialize if wallet file is a symlink os.symlink(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w1'), os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w12')) self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.') diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py index 91c4550653..7bc7c168f4 100755 --- a/test/functional/nulldummy.py +++ b/test/functional/nulldummy.py @@ -40,7 +40,9 @@ class NULLDUMMYTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True - self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']] + # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through + # normal segwit activation here (and don't use the default always-on behaviour). + self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999']] def run_test(self): self.address = self.nodes[0].getnewaddress() diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py index 5b6429b410..fbe5a78029 100755 --- a/test/functional/p2p-acceptblock.py +++ b/test/functional/p2p-acceptblock.py @@ -4,37 +4,32 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. -Since behavior differs when receiving unrequested blocks from whitelisted peers -versus non-whitelisted peers, this tests the behavior of both (effectively two -separate tests running in parallel). +Setup: two nodes, node0+node1, not connected to each other. Node1 will have +nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. -Setup: two nodes, node0 and node1, not connected to each other. Node0 does not -whitelist localhost, but node1 does. They will each be on their own chain for -this test. - -We have one NodeConn connection to each, test_node and white_node respectively. +We have one NodeConn connection to node0 called test_node, and one to node1 +called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. - The tip should advance. + The tip should advance for node0, but node1 should skip processing due to + nMinimumChainWork. + +Node1 is unused in tests 3-7: -3. Mine a block that forks the previous block, and deliver to each node from - corresponding peer. - Node0 should not process this block (just accept the header), because it is - unrequested and doesn't have more work than the tip. - Node1 should process because this is coming from a whitelisted peer. +3. Mine a block that forks from the genesis block, and deliver to test_node. + Node0 should not process this block (just accept the header), because it + is unrequested and doesn't have more or equal work to the tip. -4. Send another block that builds on the forking block. - Node0 should process this block but be stuck on the shorter chain, because - it's missing an intermediate block. - Node1 should reorg to this longer chain. +4a,b. Send another two blocks that build on the forking block. + Node0 should process the second block but be stuck on the shorter chain, + because it's missing an intermediate block. -4b.Send 288 more blocks on the longer chain. +4c.Send 288 more blocks on the longer chain (the number of blocks ahead + we currently store). Node0 should process all but the last block (too far ahead in height). - Send all headers to Node1, and then send the last block in that chain. - Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on @@ -46,13 +41,21 @@ The test: 7. Send Node0 the missing block again. Node0 should process and the tip should advance. + +8. Create a fork which is invalid at a height longer than the current chain + (ie to which the node will try to reorg) but which has headers built on top + of the invalid block. Check that we get disconnected if we send more headers + on the chain the node now knows to be invalid. + +9. Test Node1 is able to sync when connected to node0 (which should have sufficient + work on its chain). """ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time -from test_framework.blocktools import create_block, create_coinbase +from test_framework.blocktools import create_block, create_coinbase, create_transaction class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): @@ -63,37 +66,35 @@ class AcceptBlockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 - self.extra_args = [[], ["-whitelist=127.0.0.1"]] + self.extra_args = [[], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. + # Node2 will be used for non-whitelisted peers to test the interaction + # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections and start up the network thread. - test_node = NodeConnCB() # connects to node0 (not whitelisted) - white_node = NodeConnCB() # connects to node1 (whitelisted) - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) - test_node.add_connection(connections[0]) - white_node.add_connection(connections[1]) + # test_node connects to node0 (not whitelisted) + test_node = self.nodes[0].add_p2p_connection(NodeConnCB()) + # min_work_node connects to node1 + min_work_node = self.nodes[1].add_p2p_connection(NodeConnCB()) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() - white_node.wait_for_verack() + min_work_node.wait_for_verack() - # 1. Have both nodes mine a block (leave IBD) + # 1. Have nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. - # This should be accepted. + # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): @@ -101,95 +102,116 @@ class AcceptBlockTest(BitcoinTestFramework): blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) - white_node.send_message(msg_block(blocks_h2[1])) + min_work_node.send_message(msg_block(blocks_h2[1])) - [ x.sync_with_ping() for x in [test_node, white_node] ] + for x in [test_node, min_work_node]: + x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) - assert_equal(self.nodes[1].getblockcount(), 2) - self.log.info("First height 2 block accepted by both nodes") + assert_equal(self.nodes[1].getblockcount(), 1) + self.log.info("First height 2 block accepted by node0; correctly rejected by node1") - # 3. Send another block that builds on the original tip. - blocks_h2f = [] # Blocks at height 2 that fork off the main chain - for i in range(2): - blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) - blocks_h2f[i].solve() - test_node.send_message(msg_block(blocks_h2f[0])) - white_node.send_message(msg_block(blocks_h2f[1])) + # 3. Send another block that builds on genesis. + block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) + block_time += 1 + block_h1f.solve() + test_node.send_message(msg_block(block_h1f)) - [ x.sync_with_ping() for x in [test_node, white_node] ] + test_node.sync_with_ping() + tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == blocks_h2f[0].hash: + if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) + + # 4. Send another two block that build on the fork. + block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) + block_time += 1 + block_h2f.solve() + test_node.send_message(msg_block(block_h2f)) - for x in self.nodes[1].getchaintips(): - if x['hash'] == blocks_h2f[1].hash: - assert_equal(x['status'], "valid-headers") + test_node.sync_with_ping() + # Since the earlier block was not processed by node, the new block + # can't be fully validated. + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_h2f.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) - self.log.info("Second height 2 block accepted only from whitelisted peer") + # But this block should be accepted by node since it has equal work. + self.nodes[0].getblock(block_h2f.hash) + self.log.info("Second height 2 block accepted, but not reorg'ed to") - # 4. Now send another block that builds on the forking chain. - blocks_h3 = [] - for i in range(2): - blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) - blocks_h3[i].solve() - test_node.send_message(msg_block(blocks_h3[0])) - white_node.send_message(msg_block(blocks_h3[1])) + # 4b. Now send another block that builds on the forking chain. + block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) + block_h3.solve() + test_node.send_message(msg_block(block_h3)) - [ x.sync_with_ping() for x in [test_node, white_node] ] - # Since the earlier block was not processed by node0, the new block + test_node.sync_with_ping() + # Since the earlier block was not processed by node, the new block # can't be fully validated. + tip_entry_found = False for x in self.nodes[0].getchaintips(): - if x['hash'] == blocks_h3[0].hash: + if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + self.nodes[0].getblock(block_h3.hash) + + # But this block should be accepted by node since it has more work. + self.nodes[0].getblock(block_h3.hash) + self.log.info("Unrequested more-work block accepted") + + # 4c. Now mine 288 more blocks and deliver; all should be processed but + # the last (height-too-high) on node (as long as its not missing any headers) + tip = block_h3 + all_blocks = [] + for i in range(288): + next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) + next_block.solve() + all_blocks.append(next_block) + tip = next_block + + # Now send the block at height 5 and check that it wasn't accepted (missing header) + test_node.send_message(msg_block(all_blocks[1])) + test_node.sync_with_ping() + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) - # But this block should be accepted by node0 since it has more work. - self.nodes[0].getblock(blocks_h3[0].hash) - self.log.info("Unrequested more-work block accepted from non-whitelisted peer") + # The block at height 5 should be accepted if we provide the missing header, though + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(all_blocks[0])) + test_node.send_message(headers_message) + test_node.send_message(msg_block(all_blocks[1])) + test_node.sync_with_ping() + self.nodes[0].getblock(all_blocks[1].hash) - # Node1 should have accepted and reorged. - assert_equal(self.nodes[1].getblockcount(), 3) - self.log.info("Successfully reorged to length 3 chain from whitelisted peer") + # Now send the blocks in all_blocks + for i in range(288): + test_node.send_message(msg_block(all_blocks[i])) + test_node.sync_with_ping() - # 4b. Now mine 288 more blocks and deliver; all should be processed but - # the last (height-too-high) on node0. Node1 should process the tip if - # we give it the headers chain leading to the tip. - tips = blocks_h3 - headers_message = msg_headers() - all_blocks = [] # node0's blocks - for j in range(2): - for i in range(288): - next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) - next_block.solve() - if j==0: - test_node.send_message(msg_block(next_block)) - all_blocks.append(next_block) - else: - headers_message.headers.append(CBlockHeader(next_block)) - tips[j] = next_block - - time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) - headers_message.headers.pop() # Ensure the last block is unrequested - white_node.send_message(headers_message) # Send headers leading to tip - white_node.send_message(msg_block(tips[1])) # Now deliver the tip - white_node.sync_with_ping() - self.nodes[1].getblock(tips[1].hash) - self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer") - # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). - test_node.send_message(msg_block(blocks_h2f[0])) - # Here, if the sleep is too short, the test could falsely succeed (if the - # node hasn't processed the block by the time the sleep returns, and then - # the node processes it and incorrectly advances the tip). - # But this would be caught later on, when we verify that an inv triggers - # a getdata request for this block. + # The node should have requested the blocks at some point, so + # disconnect/reconnect first + + self.nodes[0].disconnect_p2p() + test_node = self.nodes[0].add_p2p_connection(NodeConnCB()) + + test_node.wait_for_verack() + test_node.send_message(msg_block(block_h1f)) + test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") @@ -200,24 +222,98 @@ class AcceptBlockTest(BitcoinTestFramework): with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) - test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) + test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block - assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) + assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) - test_node.send_message(msg_block(blocks_h2f[0])) + test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) + self.nodes[0].getblock(all_blocks[286].hash) + assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") - [ c.disconnect_node() for c in connections ] + # 8. Create a chain which is invalid at a height longer than the + # current chain, but which has more blocks on top of that + block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) + block_289f.solve() + block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) + block_290f.solve() + block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) + # block_291 spends a coinbase below maturity! + block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) + block_291.hashMerkleRoot = block_291.calc_merkle_root() + block_291.solve() + block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) + block_292.solve() + + # Now send all the headers on the chain and enough blocks to trigger reorg + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(block_289f)) + headers_message.headers.append(CBlockHeader(block_290f)) + headers_message.headers.append(CBlockHeader(block_291)) + headers_message.headers.append(CBlockHeader(block_292)) + test_node.send_message(headers_message) + + test_node.sync_with_ping() + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_292.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) + + test_node.send_message(msg_block(block_289f)) + test_node.send_message(msg_block(block_290f)) + + test_node.sync_with_ping() + self.nodes[0].getblock(block_289f.hash) + self.nodes[0].getblock(block_290f.hash) + + test_node.send_message(msg_block(block_291)) + + # At this point we've sent an obviously-bogus block, wait for full processing + # without assuming whether we will be disconnected or not + try: + # Only wait a short while so the test doesn't take forever if we do get + # disconnected + test_node.sync_with_ping(timeout=1) + except AssertionError: + test_node.wait_for_disconnect() + + self.nodes[0].disconnect_p2p() + test_node = self.nodes[0].add_p2p_connection(NodeConnCB()) + + NetworkThread().start() # Start up network handling in another thread + test_node.wait_for_verack() + + # We should have failed reorg and switched back to 290 (but have block 291) + assert_equal(self.nodes[0].getblockcount(), 290) + assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) + assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) + + # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected + block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) + block_293.solve() + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(block_293)) + test_node.send_message(headers_message) + test_node.wait_for_disconnect() + + # 9. Connect node1 to node0 and ensure it is able to sync + connect_nodes(self.nodes[0], 1) + sync_blocks([self.nodes[0], self.nodes[1]]) + self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main() diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py index 94513d3f43..d2c4d39305 100755 --- a/test/functional/p2p-compactblocks.py +++ b/test/functional/p2p-compactblocks.py @@ -93,7 +93,9 @@ class CompactBlocksTest(BitcoinTestFramework): self.setup_clean_chain = True # Node0 = pre-segwit, node1 = segwit-aware self.num_nodes = 2 - self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]] + # This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window). + # TODO: Rewrite this test to support SegWit being always active. + self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex"]] self.utxos = [] def build_block_on_tip(self, node, segwit=False): @@ -786,23 +788,12 @@ class CompactBlocksTest(BitcoinTestFramework): def run_test(self): # Setup the p2p connections and start up the network thread. - self.test_node = TestNode() - self.segwit_node = TestNode() - self.old_node = TestNode() # version 1 peer <--> segwit node - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], - self.segwit_node, services=NODE_NETWORK|NODE_WITNESS)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], - self.old_node, services=NODE_NETWORK)) - self.test_node.add_connection(connections[0]) - self.segwit_node.add_connection(connections[1]) - self.old_node.add_connection(connections[2]) + self.test_node = self.nodes[0].add_p2p_connection(TestNode()) + self.segwit_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) + self.old_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK) NetworkThread().start() # Start up network handling in another thread - # Test logic begins here self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py index 8c92365ced..624278df40 100755 --- a/test/functional/p2p-feefilter.py +++ b/test/functional/p2p-feefilter.py @@ -48,25 +48,23 @@ class FeeFilterTest(BitcoinTestFramework): sync_blocks(self.nodes) # Setup the p2p connections and start up the network thread. - test_node = TestNode() - connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node) - test_node.add_connection(connection) + self.nodes[0].add_p2p_connection(TestNode()) NetworkThread().start() - test_node.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) - test_node.clear_invs() + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() # Set a filter of 15 sat/byte - test_node.send_and_ping(msg_feefilter(15000)) + self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) - test_node.clear_invs() + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer received node1.settxfee(Decimal("0.00010000")) @@ -82,14 +80,14 @@ class FeeFilterTest(BitcoinTestFramework): # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] - assert(allInvsMatch(txids, test_node)) - test_node.clear_invs() + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() # Remove fee filter and check that txs are received again - test_node.send_and_ping(msg_feefilter(0)) + self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) - test_node.clear_invs() + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() if __name__ == '__main__': FeeFilterTest().main() diff --git a/test/functional/p2p-fingerprint.py b/test/functional/p2p-fingerprint.py index fe60c6cd46..4b6446fc5b 100755 --- a/test/functional/p2p-fingerprint.py +++ b/test/functional/p2p-fingerprint.py @@ -14,7 +14,6 @@ from test_framework.blocktools import (create_block, create_coinbase) from test_framework.mininode import ( CInv, NetworkThread, - NodeConn, NodeConnCB, msg_headers, msg_block, @@ -77,11 +76,7 @@ class P2PFingerprintTest(BitcoinTestFramework): # This does not currently test that stale blocks timestamped within the # last month but that have over a month's worth of work are also withheld. def run_test(self): - node0 = NodeConnCB() - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) - node0.add_connection(connections[0]) + node0 = self.nodes[0].add_p2p_connection(NodeConnCB()) NetworkThread().start() node0.wait_for_verack() diff --git a/test/functional/p2p-fullblocktest.py b/test/functional/p2p-fullblocktest.py index 1d969fc7c1..f19b845a32 100755 --- a/test/functional/p2p-fullblocktest.py +++ b/test/functional/p2p-fullblocktest.py @@ -20,7 +20,7 @@ from test_framework.key import CECKey from test_framework.script import * import struct -class PreviousSpendableOutput(object): +class PreviousSpendableOutput(): def __init__(self, tx = CTransaction(), n = -1): self.tx = tx self.n = n # the output we're spending diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py index 1dc8f72cd6..a6e47b5df6 100755 --- a/test/functional/p2p-leaktests.py +++ b/test/functional/p2p-leaktests.py @@ -39,7 +39,6 @@ class CLazyNode(NodeConnCB): def on_reject(self, conn, message): self.bad_message(message) def on_inv(self, conn, message): self.bad_message(message) def on_addr(self, conn, message): self.bad_message(message) - def on_alert(self, conn, message): self.bad_message(message) def on_getdata(self, conn, message): self.bad_message(message) def on_getblocks(self, conn, message): self.bad_message(message) def on_tx(self, conn, message): self.bad_message(message) @@ -97,24 +96,13 @@ class P2PLeakTest(BitcoinTestFramework): self.extra_args = [['-banscore='+str(banscore)]] def run_test(self): - no_version_bannode = CNodeNoVersionBan() - no_version_idlenode = CNodeNoVersionIdle() - no_verack_idlenode = CNodeNoVerackIdle() - unsupported_service_bit5_node = CLazyNode() - unsupported_service_bit7_node = CLazyNode() - self.nodes[0].setmocktime(1501545600) # August 1st 2017 - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)) - no_version_bannode.add_connection(connections[0]) - no_version_idlenode.add_connection(connections[1]) - no_verack_idlenode.add_connection(connections[2]) - unsupported_service_bit5_node.add_connection(connections[3]) - unsupported_service_bit7_node.add_connection(connections[4]) + + no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False) + no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False) + no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle()) + unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) + unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) NetworkThread().start() # Start up network handling in another thread @@ -137,7 +125,8 @@ class P2PLeakTest(BitcoinTestFramework): assert not unsupported_service_bit5_node.connected assert not unsupported_service_bit7_node.connected - [conn.disconnect_node() for conn in connections] + for _ in range(5): + self.nodes[0].disconnect_p2p() # Wait until all connections are closed wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) @@ -152,13 +141,8 @@ class P2PLeakTest(BitcoinTestFramework): self.log.info("Service bits 5 and 7 are allowed after August 1st 2018") self.nodes[0].setmocktime(1533168000) # August 2nd 2018 - allowed_service_bit5_node = NodeConnCB() - allowed_service_bit7_node = NodeConnCB() - - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)) - allowed_service_bit5_node.add_connection(connections[5]) - allowed_service_bit7_node.add_connection(connections[6]) + allowed_service_bit5_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) + allowed_service_bit7_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) NetworkThread().start() # Network thread stopped when all previous NodeConnCBs disconnected. Restart it diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py index 40fcde2605..be467c4223 100755 --- a/test/functional/p2p-mempool.py +++ b/test/functional/p2p-mempool.py @@ -19,16 +19,14 @@ class P2PMempoolTests(BitcoinTestFramework): self.extra_args = [["-peerbloomfilters=0"]] def run_test(self): - #connect a mininode - aTestNode = NodeConnCB() - node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode) - aTestNode.add_connection(node) + # Add a p2p connection + self.nodes[0].add_p2p_connection(NodeConnCB()) NetworkThread().start() - aTestNode.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() #request mempool - aTestNode.send_message(msg_mempool()) - aTestNode.wait_for_disconnect() + self.nodes[0].p2p.send_message(msg_mempool()) + self.nodes[0].p2p.wait_for_disconnect() #mininode must be disconnected at this point assert_equal(len(self.nodes[0].getpeerinfo()), 0) diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py index a9ef47559b..22da7f2db1 100755 --- a/test/functional/p2p-segwit.py +++ b/test/functional/p2p-segwit.py @@ -89,7 +89,7 @@ class TestNode(NodeConnCB): assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted) # Used to keep track of anyone-can-spend outputs that we can use in the tests -class UTXO(object): +class UTXO(): def __init__(self, sha256, n, nValue): self.sha256 = sha256 self.n = n @@ -111,7 +111,8 @@ class SegWitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 - self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. + self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] def setup_network(self): self.setup_nodes() @@ -1493,7 +1494,7 @@ class SegWitTest(BitcoinTestFramework): # Restart with the new binary self.stop_node(node_id) - self.start_node(node_id, extra_args=[]) + self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"]) connect_nodes(self.nodes[0], node_id) sync_blocks(self.nodes) @@ -1867,19 +1868,12 @@ class SegWitTest(BitcoinTestFramework): def run_test(self): # Setup the p2p connections and start up the network thread. - self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK - self.old_node = TestNode() # only NODE_NETWORK - self.std_node = TestNode() # for testing node1 (fRequireStandard=true) - - self.p2p_connections = [self.test_node, self.old_node] - - self.connections = [] - self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS)) - self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK)) - self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS)) - self.test_node.add_connection(self.connections[0]) - self.old_node.add_connection(self.connections[1]) - self.std_node.add_connection(self.connections[2]) + # self.test_node sets NODE_WITNESS|NODE_NETWORK + self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) + # self.old_node sets only NODE_NETWORK + self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK) + # self.std_node is for testing node1 (fRequireStandard=true) + self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) NetworkThread().start() # Start up network handling in another thread diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py index 51d4769efc..14a3bf48fb 100755 --- a/test/functional/p2p-timeouts.py +++ b/test/functional/p2p-timeouts.py @@ -39,46 +39,37 @@ class TimeoutsTest(BitcoinTestFramework): def run_test(self): # Setup the p2p connections and start up the network thread. - self.no_verack_node = TestNode() # never send verack - self.no_version_node = TestNode() # never send version (just ping) - self.no_send_node = TestNode() # never send anything - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False)) - self.no_verack_node.add_connection(connections[0]) - self.no_version_node.add_connection(connections[1]) - self.no_send_node.add_connection(connections[2]) + no_verack_node = self.nodes[0].add_p2p_connection(TestNode()) + no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) + no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) NetworkThread().start() # Start up network handling in another thread sleep(1) - assert(self.no_verack_node.connected) - assert(self.no_version_node.connected) - assert(self.no_send_node.connected) + assert no_verack_node.connected + assert no_version_node.connected + assert no_send_node.connected - ping_msg = msg_ping() - connections[0].send_message(ping_msg) - connections[1].send_message(ping_msg) + no_verack_node.send_message(msg_ping()) + no_version_node.send_message(msg_ping()) sleep(30) - assert "version" in self.no_verack_node.last_message + assert "version" in no_verack_node.last_message - assert(self.no_verack_node.connected) - assert(self.no_version_node.connected) - assert(self.no_send_node.connected) + assert no_verack_node.connected + assert no_version_node.connected + assert no_send_node.connected - connections[0].send_message(ping_msg) - connections[1].send_message(ping_msg) + no_verack_node.send_message(msg_ping()) + no_version_node.send_message(msg_ping()) sleep(31) - assert(not self.no_verack_node.connected) - assert(not self.no_version_node.connected) - assert(not self.no_send_node.connected) + assert not no_verack_node.connected + assert not no_version_node.connected + assert not no_send_node.connected if __name__ == '__main__': TimeoutsTest().main() diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py index f9bef2580a..464ca5a312 100755 --- a/test/functional/p2p-versionbits-warning.py +++ b/test/functional/p2p-versionbits-warning.py @@ -64,16 +64,12 @@ class VersionBitsWarningTest(BitcoinTestFramework): def run_test(self): # Setup the p2p connection and start up the network thread. - test_node = TestNode() - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) - test_node.add_connection(connections[0]) + self.nodes[0].add_p2p_connection(TestNode()) NetworkThread().start() # Start up network handling in another thread # Test logic begins here - test_node.wait_for_verack() + self.nodes[0].p2p.wait_for_verack() # 1. Have the node mine one period worth of blocks self.nodes[0].generate(VB_PERIOD) @@ -81,7 +77,7 @@ class VersionBitsWarningTest(BitcoinTestFramework): # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD # blocks signaling some unknown bit. nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT) - self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion) + self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD-1, nVersion) # Fill rest of period with regular version blocks self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1) @@ -92,7 +88,7 @@ class VersionBitsWarningTest(BitcoinTestFramework): # 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling # some unknown bit - self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion) + self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) # Might not get a versionbits-related alert yet, as we should # have gotten a different alert due to more than 51/100 blocks diff --git a/test/functional/receivedby.py b/test/functional/receivedby.py index db6fc86b82..97da19546f 100755 --- a/test/functional/receivedby.py +++ b/test/functional/receivedby.py @@ -3,97 +3,83 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listreceivedbyaddress RPC.""" +from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -def get_sub_array_from_array(object_array, to_match): - ''' - Finds and returns a sub array from an array of arrays. - to_match should be a unique idetifier of a sub array - ''' - for item in object_array: - all_match = True - for key,value in to_match.items(): - if item[key] != value: - all_match = False - if not all_match: - continue - return item - return [] +from test_framework.util import (assert_array_result, + assert_equal, + assert_raises_rpc_error, + ) class ReceivedByTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 - self.enable_mocktime() def run_test(self): - ''' - listreceivedbyaddress Test - ''' + # Generate block to get out of IBD + self.nodes[0].generate(1) + + self.log.info("listreceivedbyaddress Test") + # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() - #Check not listed in listreceivedbyaddress because has 0 confirmations + # Check not listed in listreceivedbyaddress because has 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaddress(), - {"address":addr}, - { }, - True) - #Bury Tx under 10 block so it will be returned by listreceivedbyaddress + {"address": addr}, + {}, + True) + # Bury Tx under 10 block so it will be returned by listreceivedbyaddress self.nodes[1].generate(10) self.sync_all() assert_array_result(self.nodes[1].listreceivedbyaddress(), - {"address":addr}, - {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]}) - #With min confidence < 10 + {"address": addr}, + {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + # With min confidence < 10 assert_array_result(self.nodes[1].listreceivedbyaddress(5), - {"address":addr}, - {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]}) - #With min confidence > 10, should not find Tx - assert_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True) + {"address": addr}, + {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + # With min confidence > 10, should not find Tx + assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True) - #Empty Tx + # Empty Tx addr = self.nodes[1].getnewaddress() - assert_array_result(self.nodes[1].listreceivedbyaddress(0,True), - {"address":addr}, - {"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]}) + assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), + {"address": addr}, + {"address": addr, "account": "", "amount": 0, "confirmations": 0, "txids": []}) + + self.log.info("getreceivedbyaddress Test") - ''' - getreceivedbyaddress Test - ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() - #Check balance is 0 because of 0 confirmations + # Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) - if balance != Decimal("0.0"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + assert_equal(balance, Decimal("0.0")) - #Check balance is 0.1 - balance = self.nodes[1].getreceivedbyaddress(addr,0) - if balance != Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + # Check balance is 0.1 + balance = self.nodes[1].getreceivedbyaddress(addr, 0) + assert_equal(balance, Decimal("0.1")) - #Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress + # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress self.nodes[1].generate(10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) - if balance != Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + assert_equal(balance, Decimal("0.1")) + + # Trying to getreceivedby for an address the wallet doesn't own should return an error + assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) + + self.log.info("listreceivedbyaccount + getreceivedbyaccount Test") - ''' - listreceivedbyaccount + getreceivedbyaccount Test - ''' - #set pre-state + # set pre-state addrArr = self.nodes[1].getnewaddress() account = self.nodes[1].getaccount(addrArr) - received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account}) - if len(received_by_account_json) == 0: - raise AssertionError("No accounts found in node") + received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount() if r["account"] == account][0] balance_by_account = self.nodes[1].getreceivedbyaccount(account) txid = self.nodes[0].sendtoaddress(addr, 0.1) @@ -101,40 +87,34 @@ class ReceivedByTest(BitcoinTestFramework): # listreceivedbyaccount should return received_by_account_json because of 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account":account}, - received_by_account_json) + {"account": account}, + received_by_account_json) # getreceivedbyaddress should return same balance because of 0 confirmations balance = self.nodes[1].getreceivedbyaccount(account) - if balance != balance_by_account: - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + assert_equal(balance, balance_by_account) self.nodes[1].generate(10) self.sync_all() # listreceivedbyaccount should return updated account balance assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account":account}, - {"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))}) + {"account": account}, + {"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))}) # getreceivedbyaddress should return updates balance balance = self.nodes[1].getreceivedbyaccount(account) - if balance != balance_by_account + Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + assert_equal(balance, balance_by_account + Decimal("0.1")) - #Create a new account named "mynewaccount" that has a 0 balance + # Create a new account named "mynewaccount" that has a 0 balance self.nodes[1].getaccountaddress("mynewaccount") - received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"}) - if len(received_by_account_json) == 0: - raise AssertionError("No accounts found in node") + received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount(0, True) if r["account"] == "mynewaccount"][0] # Test includeempty of listreceivedbyaccount - if received_by_account_json["amount"] != Decimal("0.0"): - raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"])) + assert_equal(received_by_account_json["amount"], Decimal("0.0")) # Test getreceivedbyaccount for 0 amount accounts balance = self.nodes[1].getreceivedbyaccount("mynewaccount") - if balance != Decimal("0.0"): - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + assert_equal(balance, Decimal("0.0")) if __name__ == '__main__': ReceivedByTest().main() diff --git a/test/functional/replace-by-fee.py b/test/functional/replace-by-fee.py index 269d57775c..815e964848 100755 --- a/test/functional/replace-by-fee.py +++ b/test/functional/replace-by-fee.py @@ -72,8 +72,14 @@ class ReplaceByFeeTest(BitcoinTestFramework): ["-mempoolreplacement=0"]] def run_test(self): + # Leave IBD + self.nodes[0].generate(1) + make_utxo(self.nodes[0], 1*COIN) + # Ensure nodes are synced + self.sync_all() + self.log.info("Running test simple doublespend...") self.test_simple_doublespend() @@ -110,13 +116,18 @@ class ReplaceByFeeTest(BitcoinTestFramework): """Simple doublespend""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + # make_utxo may have generated a bunch of blocks, so we need to sync + # before we can spend the coins generated, or else the resulting + # transactions might not be accepted by our peers. + self.sync_all() + tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - self.sync_all([self.nodes]) + self.sync_all() # Should fail because we haven't changed the fee tx1b = CTransaction() diff --git a/test/functional/segwit.py b/test/functional/segwit.py index 6ecade7cb6..338fa1bc52 100755 --- a/test/functional/segwit.py +++ b/test/functional/segwit.py @@ -77,9 +77,10 @@ class SegWitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 - self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0"], - ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"], - ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]] + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. + self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999"], + ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999"], + ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999"]] def setup_network(self): super().setup_network() diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py index fe577dc20a..55bb80ea00 100755 --- a/test/functional/sendheaders.py +++ b/test/functional/sendheaders.py @@ -10,6 +10,17 @@ Setup: receive inv's (omitted from testing description below, this is our control). Second node is used for creating reorgs. +test_null_locators +================== + +Sends two getheaders requests with null locator values. First request's hashstop +value refers to validated block, while second request's hashstop value refers to +a block which hasn't been validated. Verifies only the first request returns +headers. + +test_nonnull_locators +===================== + Part 1: No headers announcements before "sendheaders" a. node mines a block [expect: inv] send getdata for the block [expect: block] @@ -181,7 +192,7 @@ class SendHeadersTest(BitcoinTestFramework): # mine count blocks and return the new tip def mine_blocks(self, count): # Clear out last block announcement from each p2p listener - [ x.clear_last_announcement() for x in self.p2p_connections ] + [x.clear_last_announcement() for x in self.nodes[0].p2ps] self.nodes[0].generate(count) return int(self.nodes[0].getbestblockhash(), 16) @@ -193,7 +204,7 @@ class SendHeadersTest(BitcoinTestFramework): def mine_reorg(self, length): self.nodes[0].generate(length) # make sure all invalidated blocks are node0's sync_blocks(self.nodes, wait=0.1) - for x in self.p2p_connections: + for x in self.nodes[0].p2ps: x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) x.clear_last_announcement() @@ -206,18 +217,10 @@ class SendHeadersTest(BitcoinTestFramework): def run_test(self): # Setup the p2p connections and start up the network thread. - inv_node = TestNode() - test_node = TestNode() - - self.p2p_connections = [inv_node, test_node] - - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) + inv_node = self.nodes[0].add_p2p_connection(TestNode()) # Set nServices to 0 for test_node, so no block download will occur outside of # direct fetching - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) - inv_node.add_connection(connections[0]) - test_node.add_connection(connections[1]) + test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_WITNESS) NetworkThread().start() # Start up network handling in another thread @@ -225,6 +228,33 @@ class SendHeadersTest(BitcoinTestFramework): inv_node.wait_for_verack() test_node.wait_for_verack() + # Ensure verack's have been processed by our peer + inv_node.sync_with_ping() + test_node.sync_with_ping() + + self.test_null_locators(test_node) + self.test_nonnull_locators(test_node, inv_node) + + def test_null_locators(self, test_node): + tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0]) + tip_hash = int(tip["hash"], 16) + + self.log.info("Verify getheaders with null locator and valid hashstop returns headers.") + test_node.clear_last_announcement() + test_node.get_headers(locator=[], hashstop=tip_hash) + assert_equal(test_node.check_last_announcement(headers=[tip_hash]), True) + + self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.") + block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1) + block.solve() + test_node.send_header_for_blocks([block]) + test_node.clear_last_announcement() + test_node.get_headers(locator=[], hashstop=int(block.hash, 16)) + test_node.sync_with_ping() + assert_equal(test_node.block_announced, False) + test_node.send_message(msg_block(block)) + + def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 747bda309c..bd3a3b3fab 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -33,24 +33,17 @@ ServiceProxy class: - uses standard Python json lib """ -try: - import http.client as httplib -except ImportError: - import httplib import base64 import decimal +import http.client import json import logging import socket import time -try: - import urllib.parse as urlparse -except ImportError: - import urlparse - -USER_AGENT = "AuthServiceProxy/0.1" +import urllib.parse HTTP_TIMEOUT = 30 +USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") @@ -60,7 +53,7 @@ class JSONRPCException(Exception): errmsg = '%(message)s (%(code)i)' % rpc_error except (KeyError, TypeError): errmsg = '' - Exception.__init__(self, errmsg) + super().__init__(errmsg) self.error = rpc_error @@ -69,28 +62,18 @@ def EncodeDecimal(o): return str(o) raise TypeError(repr(o) + " is not JSON serializable") -class AuthServiceProxy(object): +class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name - self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests - self.__url = urlparse.urlparse(service_url) - if self.__url.port is None: - port = 80 - else: - port = self.__url.port - (user, passwd) = (self.__url.username, self.__url.password) - try: - user = user.encode('utf8') - except AttributeError: - pass - try: - passwd = passwd.encode('utf8') - except AttributeError: - pass + self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests + self.__url = urllib.parse.urlparse(service_url) + port = 80 if self.__url.port is None else self.__url.port + user = None if self.__url.username is None else self.__url.username.encode('utf8') + passwd = None if self.__url.password is None else self.__url.password.encode('utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) @@ -98,11 +81,9 @@ class AuthServiceProxy(object): # Callables re-use the connection of the original proxy self.__conn = connection elif self.__url.scheme == 'https': - self.__conn = httplib.HTTPSConnection(self.__url.hostname, port, - timeout=timeout) + self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout) else: - self.__conn = httplib.HTTPConnection(self.__url.hostname, port, - timeout=timeout) + self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): @@ -124,14 +105,14 @@ class AuthServiceProxy(object): try: self.__conn.request(method, path, postdata, headers) return self._get_response() - except httplib.BadStatusLine as e: - if e.line == "''": # if connection was closed, try again + except http.client.BadStatusLine as e: + if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise - except (BrokenPipeError,ConnectionResetError): + except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() @@ -141,8 +122,8 @@ class AuthServiceProxy(object): def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 - log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name, - json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, + json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError('Cannot handle both named and positional arguments') return {'version': '1.1', @@ -163,7 +144,7 @@ class AuthServiceProxy(object): def batch(self, rpc_call_list): postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) - log.debug("--> "+postdata) + log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) def _get_response(self): @@ -190,9 +171,9 @@ class AuthServiceProxy(object): response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-%s- [%.6f] %s"%(response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: - log.debug("<-- [%.6f] %s"%(elapsed,responsedata)) + log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) return response def __truediv__(self, relative_uri): diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py index 4b2170a03f..051c57a6c7 100644 --- a/test/functional/test_framework/blockstore.py +++ b/test/functional/test_framework/blockstore.py @@ -10,7 +10,7 @@ import dbm.dumb as dbmd logger = logging.getLogger("TestFramework.blockstore") -class BlockStore(object): +class BlockStore(): """BlockStore helper class. BlockStore keeps a map of blocks and implements helper functions for @@ -100,7 +100,7 @@ class BlockStore(object): def get_blocks(self, inv): responses = [] for i in inv: - if (i.type == 2): # MSG_BLOCK + if (i.type == 2 or i.type == (2 | (1 << 30))): # MSG_BLOCK or MSG_WITNESS_BLOCK data = self.get(i.hash) if data is not None: # Use msg_generic to avoid re-serialization @@ -127,7 +127,7 @@ class BlockStore(object): locator.vHave = r return locator -class TxStore(object): +class TxStore(): def __init__(self, datadir): self.txDB = dbmd.open(datadir + "/transactions", 'c') @@ -153,7 +153,7 @@ class TxStore(object): def get_transactions(self, inv): responses = [] for i in inv: - if (i.type == 1): # MSG_TX + if (i.type == 1 or i.type == (1 | (1 << 30))): # MSG_TX or MSG_WITNESS_TX tx = self.get(i.hash) if tx is not None: responses.append(msg_generic(b"tx", tx)) diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py index bfbc0c3b03..03f967ba71 100755 --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -27,7 +27,7 @@ logger=logging.getLogger("TestFramework.comptool") global mininode_lock -class RejectResult(object): +class RejectResult(): """Outcome that expects rejection of a transaction or block.""" def __init__(self, code, reason=b''): self.code = code @@ -80,9 +80,9 @@ class TestNode(NodeConnCB): [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)] for i in message.inv: - if i.type == 1: + if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX self.tx_request_map[i.hash] = True - elif i.type == 2: + elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK self.block_request_map[i.hash] = True def on_inv(self, conn, message): @@ -156,13 +156,13 @@ class TestNode(NodeConnCB): # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) -class TestInstance(object): +class TestInstance(): def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): self.blocks_and_transactions = objects if objects else [] self.sync_every_block = sync_every_block self.sync_every_tx = sync_every_tx -class TestManager(object): +class TestManager(): def __init__(self, testgen, datadir): self.test_generator = testgen @@ -295,8 +295,11 @@ class TestManager(object): # Wait until verack is received self.wait_for_verack() - test_number = 1 - for test_instance in self.test_generator.get_tests(): + test_number = 0 + tests = self.test_generator.get_tests() + for test_instance in tests: + test_number += 1 + logger.info("Running test %d: %s line %s" % (test_number, tests.gi_code.co_filename, tests.gi_frame.f_lineno)) # We use these variables to keep track of the last block # and last transaction in the tests, which are used # if we're not syncing on every block or every tx. @@ -397,9 +400,6 @@ class TestManager(object): if (not self.check_mempool(tx.sha256, tx_outcome)): raise AssertionError("Mempool test failed at test %d" % test_number) - logger.info("Test %d: PASS" % test_number) - test_number += 1 - [ c.disconnect_node() for c in self.connections ] self.wait_for_disconnections() self.block_store.close() diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index 84049e76bc..ddc3c515b2 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -14,7 +14,7 @@ import os REFERENCE_FILENAME = 'rpc_interface.txt' -class AuthServiceProxyWrapper(object): +class AuthServiceProxyWrapper(): """ An object that wraps AuthServiceProxy to record specific RPC calls. diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 85a6158a2f..aa91fb5b0d 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -84,7 +84,7 @@ def _check_result(val, func, args): ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p ssl.EC_KEY_new_by_curve_name.errcheck = _check_result -class CECKey(object): +class CECKey(): """Wrapper around OpenSSL's EC_KEY""" POINT_CONVERSION_COMPRESSED = 2 diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index c6f596156a..8fbc63fba4 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -37,7 +37,7 @@ from threading import RLock, Thread from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until -BIP0031_VERSION = 60000 +MIN_VERSION_SUPPORTED = 60001 MY_VERSION = 70014 # past bip-31 for ping/pong MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) @@ -219,7 +219,7 @@ def ToHex(obj): # Objects that map to bitcoind objects, which can be serialized/deserialized -class CAddress(object): +class CAddress(): def __init__(self): self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 @@ -246,7 +246,7 @@ class CAddress(object): MSG_WITNESS_FLAG = 1<<30 -class CInv(object): +class CInv(): typemap = { 0: "Error", 1: "TX", @@ -275,7 +275,7 @@ class CInv(object): % (self.typemap[self.type], self.hash) -class CBlockLocator(object): +class CBlockLocator(): def __init__(self): self.nVersion = MY_VERSION self.vHave = [] @@ -295,7 +295,7 @@ class CBlockLocator(object): % (self.nVersion, repr(self.vHave)) -class COutPoint(object): +class COutPoint(): def __init__(self, hash=0, n=0): self.hash = hash self.n = n @@ -314,7 +314,7 @@ class COutPoint(object): return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) -class CTxIn(object): +class CTxIn(): def __init__(self, outpoint=None, scriptSig=b"", nSequence=0): if outpoint is None: self.prevout = COutPoint() @@ -342,7 +342,7 @@ class CTxIn(object): self.nSequence) -class CTxOut(object): +class CTxOut(): def __init__(self, nValue=0, scriptPubKey=b""): self.nValue = nValue self.scriptPubKey = scriptPubKey @@ -363,7 +363,7 @@ class CTxOut(object): bytes_to_hex_str(self.scriptPubKey)) -class CScriptWitness(object): +class CScriptWitness(): def __init__(self): # stack is a vector of strings self.stack = [] @@ -378,7 +378,7 @@ class CScriptWitness(object): return True -class CTxInWitness(object): +class CTxInWitness(): def __init__(self): self.scriptWitness = CScriptWitness() @@ -395,7 +395,7 @@ class CTxInWitness(object): return self.scriptWitness.is_null() -class CTxWitness(object): +class CTxWitness(): def __init__(self): self.vtxinwit = [] @@ -423,7 +423,7 @@ class CTxWitness(object): return True -class CTransaction(object): +class CTransaction(): def __init__(self, tx=None): if tx is None: self.nVersion = 1 @@ -526,7 +526,7 @@ class CTransaction(object): % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) -class CBlockHeader(object): +class CBlockHeader(): def __init__(self, header=None): if header is None: self.set_null() @@ -666,82 +666,7 @@ class CBlock(CBlockHeader): time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) -class CUnsignedAlert(object): - def __init__(self): - self.nVersion = 1 - self.nRelayUntil = 0 - self.nExpiration = 0 - self.nID = 0 - self.nCancel = 0 - self.setCancel = [] - self.nMinVer = 0 - self.nMaxVer = 0 - self.setSubVer = [] - self.nPriority = 0 - self.strComment = b"" - self.strStatusBar = b"" - self.strReserved = b"" - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - self.nRelayUntil = struct.unpack("<q", f.read(8))[0] - self.nExpiration = struct.unpack("<q", f.read(8))[0] - self.nID = struct.unpack("<i", f.read(4))[0] - self.nCancel = struct.unpack("<i", f.read(4))[0] - self.setCancel = deser_int_vector(f) - self.nMinVer = struct.unpack("<i", f.read(4))[0] - self.nMaxVer = struct.unpack("<i", f.read(4))[0] - self.setSubVer = deser_string_vector(f) - self.nPriority = struct.unpack("<i", f.read(4))[0] - self.strComment = deser_string(f) - self.strStatusBar = deser_string(f) - self.strReserved = deser_string(f) - - def serialize(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += struct.pack("<q", self.nRelayUntil) - r += struct.pack("<q", self.nExpiration) - r += struct.pack("<i", self.nID) - r += struct.pack("<i", self.nCancel) - r += ser_int_vector(self.setCancel) - r += struct.pack("<i", self.nMinVer) - r += struct.pack("<i", self.nMaxVer) - r += ser_string_vector(self.setSubVer) - r += struct.pack("<i", self.nPriority) - r += ser_string(self.strComment) - r += ser_string(self.strStatusBar) - r += ser_string(self.strReserved) - return r - - def __repr__(self): - return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \ - % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID, - self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority, - self.strComment, self.strStatusBar, self.strReserved) - - -class CAlert(object): - def __init__(self): - self.vchMsg = b"" - self.vchSig = b"" - - def deserialize(self, f): - self.vchMsg = deser_string(f) - self.vchSig = deser_string(f) - - def serialize(self): - r = b"" - r += ser_string(self.vchMsg) - r += ser_string(self.vchSig) - return r - - def __repr__(self): - return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \ - % (len(self.vchMsg), len(self.vchSig)) - - -class PrefilledTransaction(object): +class PrefilledTransaction(): def __init__(self, index=0, tx = None): self.index = index self.tx = tx @@ -767,7 +692,7 @@ class PrefilledTransaction(object): return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. -class P2PHeaderAndShortIDs(object): +class P2PHeaderAndShortIDs(): def __init__(self): self.header = CBlockHeader() self.nonce = 0 @@ -819,7 +744,7 @@ def calculate_shortid(k0, k1, tx_hash): # This version gets rid of the array lengths, and reinterprets the differential # encoding into indices that can be used for lookup. -class HeaderAndShortIDs(object): +class HeaderAndShortIDs(): def __init__(self, p2pheaders_and_shortids = None): self.header = CBlockHeader() self.nonce = 0 @@ -880,7 +805,7 @@ class HeaderAndShortIDs(object): return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) -class BlockTransactionsRequest(object): +class BlockTransactionsRequest(): def __init__(self, blockhash=0, indexes = None): self.blockhash = blockhash @@ -920,7 +845,7 @@ class BlockTransactionsRequest(object): return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes)) -class BlockTransactions(object): +class BlockTransactions(): def __init__(self, blockhash=0, transactions = None): self.blockhash = blockhash @@ -944,12 +869,12 @@ class BlockTransactions(object): # Objects that correspond to messages on the wire -class msg_version(object): +class msg_version(): command = b"version" def __init__(self): self.nVersion = MY_VERSION - self.nServices = 1 + self.nServices = NODE_NETWORK | NODE_WITNESS self.nTime = int(time.time()) self.addrTo = CAddress() self.addrFrom = CAddress() @@ -1012,7 +937,7 @@ class msg_version(object): self.strSubVer, self.nStartingHeight, self.nRelay) -class msg_verack(object): +class msg_verack(): command = b"verack" def __init__(self): @@ -1028,7 +953,7 @@ class msg_verack(object): return "msg_verack()" -class msg_addr(object): +class msg_addr(): command = b"addr" def __init__(self): @@ -1044,26 +969,7 @@ class msg_addr(object): return "msg_addr(addrs=%s)" % (repr(self.addrs)) -class msg_alert(object): - command = b"alert" - - def __init__(self): - self.alert = CAlert() - - def deserialize(self, f): - self.alert = CAlert() - self.alert.deserialize(f) - - def serialize(self): - r = b"" - r += self.alert.serialize() - return r - - def __repr__(self): - return "msg_alert(alert=%s)" % (repr(self.alert), ) - - -class msg_inv(object): +class msg_inv(): command = b"inv" def __init__(self, inv=None): @@ -1082,7 +988,7 @@ class msg_inv(object): return "msg_inv(inv=%s)" % (repr(self.inv)) -class msg_getdata(object): +class msg_getdata(): command = b"getdata" def __init__(self, inv=None): @@ -1098,7 +1004,7 @@ class msg_getdata(object): return "msg_getdata(inv=%s)" % (repr(self.inv)) -class msg_getblocks(object): +class msg_getblocks(): command = b"getblocks" def __init__(self): @@ -1121,7 +1027,7 @@ class msg_getblocks(object): % (repr(self.locator), self.hashstop) -class msg_tx(object): +class msg_tx(): command = b"tx" def __init__(self, tx=CTransaction()): @@ -1142,7 +1048,7 @@ class msg_witness_tx(msg_tx): return self.tx.serialize_with_witness() -class msg_block(object): +class msg_block(): command = b"block" def __init__(self, block=None): @@ -1162,7 +1068,7 @@ class msg_block(object): # for cases where a user needs tighter control over what is sent over the wire # note that the user must supply the name of the command, and the data -class msg_generic(object): +class msg_generic(): def __init__(self, command, data=None): self.command = command self.data = data @@ -1179,7 +1085,7 @@ class msg_witness_block(msg_block): r = self.block.serialize(with_witness=True) return r -class msg_getaddr(object): +class msg_getaddr(): command = b"getaddr" def __init__(self): @@ -1195,23 +1101,7 @@ class msg_getaddr(object): return "msg_getaddr()" -class msg_ping_prebip31(object): - command = b"ping" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_ping() (pre-bip31)" - - -class msg_ping(object): +class msg_ping(): command = b"ping" def __init__(self, nonce=0): @@ -1229,7 +1119,7 @@ class msg_ping(object): return "msg_ping(nonce=%08x)" % self.nonce -class msg_pong(object): +class msg_pong(): command = b"pong" def __init__(self, nonce=0): @@ -1247,7 +1137,7 @@ class msg_pong(object): return "msg_pong(nonce=%08x)" % self.nonce -class msg_mempool(object): +class msg_mempool(): command = b"mempool" def __init__(self): @@ -1262,7 +1152,7 @@ class msg_mempool(object): def __repr__(self): return "msg_mempool()" -class msg_sendheaders(object): +class msg_sendheaders(): command = b"sendheaders" def __init__(self): @@ -1282,7 +1172,7 @@ class msg_sendheaders(object): # number of entries # vector of hashes # hash_stop (hash of last desired block header, 0 to get as many as possible) -class msg_getheaders(object): +class msg_getheaders(): command = b"getheaders" def __init__(self): @@ -1307,7 +1197,7 @@ class msg_getheaders(object): # headers message has # <count> <vector of block headers> -class msg_headers(object): +class msg_headers(): command = b"headers" def __init__(self, headers=None): @@ -1327,7 +1217,7 @@ class msg_headers(object): return "msg_headers(headers=%s)" % repr(self.headers) -class msg_reject(object): +class msg_reject(): command = b"reject" REJECT_MALFORMED = 1 @@ -1358,7 +1248,7 @@ class msg_reject(object): return "msg_reject: %s %d %s [%064x]" \ % (self.message, self.code, self.reason, self.data) -class msg_feefilter(object): +class msg_feefilter(): command = b"feefilter" def __init__(self, feerate=0): @@ -1375,7 +1265,7 @@ class msg_feefilter(object): def __repr__(self): return "msg_feefilter(feerate=%08x)" % self.feerate -class msg_sendcmpct(object): +class msg_sendcmpct(): command = b"sendcmpct" def __init__(self): @@ -1395,7 +1285,7 @@ class msg_sendcmpct(object): def __repr__(self): return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version) -class msg_cmpctblock(object): +class msg_cmpctblock(): command = b"cmpctblock" def __init__(self, header_and_shortids = None): @@ -1413,7 +1303,7 @@ class msg_cmpctblock(object): def __repr__(self): return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids) -class msg_getblocktxn(object): +class msg_getblocktxn(): command = b"getblocktxn" def __init__(self): @@ -1431,7 +1321,7 @@ class msg_getblocktxn(object): def __repr__(self): return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request)) -class msg_blocktxn(object): +class msg_blocktxn(): command = b"blocktxn" def __init__(self): @@ -1454,13 +1344,11 @@ class msg_witness_blocktxn(msg_blocktxn): r += self.block_transactions.serialize(with_witness=True) return r -class NodeConnCB(object): +class NodeConnCB(): """Callback and helper functions for P2P connection to a bitcoind node. Individual testcases should subclass this and override the on_* methods - if they want to alter message handling behaviour. - """ - + if they want to alter message handling behaviour.""" def __init__(self): # Track whether we have a P2P connection open to the node self.connected = False @@ -1474,25 +1362,13 @@ class NodeConnCB(object): # A count of the number of ping messages we've sent to the node self.ping_counter = 1 - # deliver_sleep_time is helpful for debugging race conditions in p2p - # tests; it causes message delivery to sleep for the specified time - # before acquiring the global lock and delivering the next message. - self.deliver_sleep_time = None - # Message receiving methods def deliver(self, conn, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received - and the most recent message of each type. - - Optionally waits for deliver_sleep_time before dispatching message. - """ - - deliver_sleep = self.get_deliver_sleep_time() - if deliver_sleep is not None: - time.sleep(deliver_sleep) + and the most recent message of each type.""" with mininode_lock: try: command = message.command.decode('ascii') @@ -1504,10 +1380,6 @@ class NodeConnCB(object): sys.exc_info()[0])) raise - def get_deliver_sleep_time(self): - with mininode_lock: - return self.deliver_sleep_time - # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. @@ -1519,7 +1391,6 @@ class NodeConnCB(object): self.connection = None def on_addr(self, conn, message): pass - def on_alert(self, conn, message): pass def on_block(self, conn, message): pass def on_blocktxn(self, conn, message): pass def on_cmpctblock(self, conn, message): pass @@ -1546,19 +1417,15 @@ class NodeConnCB(object): conn.send_message(want) def on_ping(self, conn, message): - if conn.ver_send > BIP0031_VERSION: - conn.send_message(msg_pong(message.nonce)) + conn.send_message(msg_pong(message.nonce)) def on_verack(self, conn, message): conn.ver_recv = conn.ver_send self.verack_received = True def on_version(self, conn, message): - if message.nVersion >= 209: - conn.send_message(msg_verack()) - conn.ver_send = min(MY_VERSION, message.nVersion) - if message.nVersion < 209: - conn.ver_recv = conn.ver_send + assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + conn.send_message(msg_verack()) conn.nServices = message.nServices # Connection helper methods @@ -1615,16 +1482,15 @@ class NodeConnCB(object): test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 - return True -# The actual NodeConn class -# This class provides an interface for a p2p connection to a specified node class NodeConn(asyncore.dispatcher): + """The actual NodeConn class + + This class provides an interface for a p2p connection to a specified node.""" messagemap = { b"version": msg_version, b"verack": msg_verack, b"addr": msg_addr, - b"alert": msg_alert, b"inv": msg_inv, b"getdata": msg_getdata, b"getblocks": msg_getblocks, @@ -1650,7 +1516,7 @@ class NodeConn(asyncore.dispatcher): "regtest": b"\xfa\xbf\xb5\xda", # regtest } - def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True): + def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK|NODE_WITNESS, send_version=True): asyncore.dispatcher.__init__(self, map=mininode_socket_map) self.dstaddr = dstaddr self.dstport = dstport @@ -1741,40 +1607,27 @@ class NodeConn(asyncore.dispatcher): return if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: raise ValueError("got garbage %s" % repr(self.recvbuf)) - if self.ver_recv < 209: - if len(self.recvbuf) < 4 + 12 + 4: - return - command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] - checksum = None - if len(self.recvbuf) < 4 + 12 + 4 + msglen: - return - msg = self.recvbuf[4+12+4:4+12+4+msglen] - self.recvbuf = self.recvbuf[4+12+4+msglen:] - else: - if len(self.recvbuf) < 4 + 12 + 4 + 4: - return - command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] - checksum = self.recvbuf[4+12+4:4+12+4+4] - if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: - return - msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] - th = sha256(msg) - h = sha256(th) - if checksum != h[:4]: - raise ValueError("got bad checksum " + repr(self.recvbuf)) - self.recvbuf = self.recvbuf[4+12+4+4+msglen:] - if command in self.messagemap: - f = BytesIO(msg) - t = self.messagemap[command]() - t.deserialize(f) - self.got_message(t) - else: - logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))) - raise ValueError("Unknown command: '%s'" % (command)) + if len(self.recvbuf) < 4 + 12 + 4 + 4: + return + command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] + msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] + checksum = self.recvbuf[4+12+4:4+12+4+4] + if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: + return + msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] + th = sha256(msg) + h = sha256(th) + if checksum != h[:4]: + raise ValueError("got bad checksum " + repr(self.recvbuf)) + self.recvbuf = self.recvbuf[4+12+4+4+msglen:] + if command not in self.messagemap: + raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))) + f = BytesIO(msg) + t = self.messagemap[command]() + t.deserialize(f) + self.got_message(t) except Exception as e: - logger.exception('got_data:', repr(e)) + logger.exception('Error reading message:', repr(e)) raise def send_message(self, message, pushbuf=False): @@ -1787,10 +1640,9 @@ class NodeConn(asyncore.dispatcher): tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack("<I", len(data)) - if self.ver_send >= 209: - th = sha256(data) - h = sha256(th) - tmsg += h[:4] + th = sha256(data) + h = sha256(th) + tmsg += h[:4] tmsg += data with mininode_lock: if (len(self.sendbuf) == 0 and not pushbuf): @@ -1804,9 +1656,6 @@ class NodeConn(asyncore.dispatcher): self.last_sent = time.time() def got_message(self, message): - if message.command == b"version": - if message.nVersion <= BIP0031_VERSION: - self.messagemap[b'ping'] = msg_ping_prebip31 if self.last_sent + 30 * 60 < time.time(): self.send_message(self.messagemap[b'ping']()) self._log_message("receive", message) @@ -1839,13 +1688,3 @@ class NetworkThread(Thread): [ obj.handle_close() for obj in disconnected ] asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) logger.debug("Network thread closing") - - -# An exception we can raise if we detect a potential disconnect -# (p2p or rpc) before the test is complete -class EarlyDisconnectError(Exception): - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 8f5339a02a..a4c046bd3d 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -370,7 +370,7 @@ class CScriptTruncatedPushDataError(CScriptInvalidError): super(CScriptTruncatedPushDataError, self).__init__(msg) # This is used, eg, for blockchain heights in coinbase scripts (bip34) -class CScriptNum(object): +class CScriptNum(): def __init__(self, d=0): self.value = d diff --git a/test/functional/test_framework/socks5.py b/test/functional/test_framework/socks5.py index 0070844168..7b40c47fbf 100644 --- a/test/functional/test_framework/socks5.py +++ b/test/functional/test_framework/socks5.py @@ -31,7 +31,7 @@ def recvall(s, n): return rv ### Implementation classes -class Socks5Configuration(object): +class Socks5Configuration(): """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) @@ -39,7 +39,7 @@ class Socks5Configuration(object): self.unauth = False # Support unauthenticated self.auth = False # Support authentication -class Socks5Command(object): +class Socks5Command(): """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) @@ -51,7 +51,7 @@ class Socks5Command(object): def __repr__(self): return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) -class Socks5Connection(object): +class Socks5Connection(): def __init__(self, serv, conn, peer): self.serv = serv self.conn = conn @@ -122,7 +122,7 @@ class Socks5Connection(object): finally: self.conn.close() -class Socks5Server(object): +class Socks5Server(): def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 381513ab9e..8df50474f3 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -43,7 +43,7 @@ TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 -class BitcoinTestFramework(object): +class BitcoinTestFramework(): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. @@ -102,8 +102,11 @@ class BitcoinTestFramework(object): check_json_precision() + self.options.cachedir = os.path.abspath(self.options.cachedir) + # Set up temp directory and start logging if self.options.tmpdir: + self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix="test") diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 12dab57a02..8b28064c46 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -13,13 +13,15 @@ import os import subprocess import time +from .authproxy import JSONRPCException +from .mininode import NodeConn from .util import ( assert_equal, get_rpc_proxy, rpc_url, wait_until, + p2p_port, ) -from .authproxy import JSONRPCException BITCOIND_PROC_WAIT_TIMEOUT = 60 @@ -31,9 +33,11 @@ class TestNode(): - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node + - one or more P2P connections to the node + - To make things easier for the test writer, a bit of magic is happening under the covers. - Any unrecognised messages will be dispatched to the RPC connection.""" + To make things easier for the test writer, any unrecognised messages will + be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir): self.index = i @@ -63,10 +67,12 @@ class TestNode(): self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) - def __getattr__(self, *args, **kwargs): + self.p2ps = [] + + def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection.""" assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection" - return self.rpc.__getattr__(*args, **kwargs) + return getattr(self.rpc, name) def start(self, extra_args=None, stderr=None): """Start the node.""" @@ -119,6 +125,7 @@ class TestNode(): self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") + del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. @@ -151,6 +158,38 @@ class TestNode(): self.encryptwallet(passphrase) self.wait_until_stopped() + def add_p2p_connection(self, p2p_conn, **kwargs): + """Add a p2p connection to the node. + + This method adds the p2p connection to the self.p2ps list and also + returns the connection to the caller.""" + if 'dstport' not in kwargs: + kwargs['dstport'] = p2p_port(self.index) + if 'dstaddr' not in kwargs: + kwargs['dstaddr'] = '127.0.0.1' + self.p2ps.append(p2p_conn) + kwargs.update({'rpc': self.rpc, 'callback': p2p_conn}) + p2p_conn.add_connection(NodeConn(**kwargs)) + + return p2p_conn + + @property + def p2p(self): + """Return the first p2p connection + + Convenience property - most tests only use a single p2p connection to each + node, so this saves having to write node.p2ps[0] many times.""" + assert self.p2ps, "No p2p connection" + return self.p2ps[0] + + def disconnect_p2p(self, index=0): + """Close the p2p connection to the node.""" + # Connection could have already been closed by other end. Calling disconnect_p2p() + # on an already disconnected p2p connection is not an error. + if self.p2ps[index].connection is not None: + self.p2ps[index].connection.disconnect_node() + del self.p2ps[index] + class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 8c4651f6e0..ca36426a0a 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -124,6 +124,8 @@ BASE_SCRIPTS= [ 'resendwallettransactions.py', 'minchainwork.py', 'p2p-fingerprint.py', + 'uacomment.py', + 'p2p-acceptblock.py', ] EXTENDED_SCRIPTS = [ @@ -151,7 +153,6 @@ EXTENDED_SCRIPTS = [ 'txn_clone.py --mineblock', 'notifications.py', 'invalidateblock.py', - 'p2p-acceptblock.py', 'replace-by-fee.py', ] @@ -459,7 +460,7 @@ def check_script_list(src_dir): # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) -class RPCCoverage(object): +class RPCCoverage(): """ Coverage reporting utilities for test_runner. diff --git a/test/functional/uacomment.py b/test/functional/uacomment.py new file mode 100755 index 0000000000..0b2c64ab69 --- /dev/null +++ b/test/functional/uacomment.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the -uacomment option.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class UacommentTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + self.log.info("test multiple -uacomment") + test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1] + assert_equal(test_uacomment, "(testnode0)") + + self.restart_node(0, ["-uacomment=foo"]) + foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1] + assert_equal(foo_uacomment, "(testnode0; foo)") + + self.log.info("test -uacomment max length") + self.stop_node(0) + expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments." + self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected) + + self.log.info("test -uacomment unsafe characters") + for unsafe_char in ['/', ':', '(', ')']: + expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters" + self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected) + +if __name__ == '__main__': + UacommentTest().main() diff --git a/test/functional/wallet-accounts.py b/test/functional/wallet-accounts.py index 40726d2a76..bc1efaee15 100755 --- a/test/functional/wallet-accounts.py +++ b/test/functional/wallet-accounts.py @@ -72,62 +72,135 @@ class WalletAccountsTest(BitcoinTestFramework): # otherwise we're off by exactly the fee amount as that's mined # and matures in the next 100 blocks node.sendfrom("", common_address, fee) - accounts = ["a", "b", "c", "d", "e"] amount_to_send = 1.0 - account_addresses = dict() + + # Create accounts and make sure subsequent account API calls + # recognize the account/address associations. + accounts = [Account(name) for name in ("a", "b", "c", "d", "e")] for account in accounts: - address = node.getaccountaddress(account) - account_addresses[account] = address - - node.getnewaddress(account) - assert_equal(node.getaccount(address), account) - assert(address in node.getaddressesbyaccount(account)) - - node.sendfrom("", address, amount_to_send) - + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + + # Send a transaction to each account, and make sure this forces + # getaccountaddress to generate a new receiving address. + for account in accounts: + node.sendtoaddress(account.receive_address, amount_to_send) + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + + # Check the amounts received. node.generate(1) + for account in accounts: + assert_equal( + node.getreceivedbyaddress(account.addresses[0]), amount_to_send) + assert_equal(node.getreceivedbyaccount(account.name), amount_to_send) - for i in range(len(accounts)): - from_account = accounts[i] + # Check that sendfrom account reduces listaccounts balances. + for i, account in enumerate(accounts): to_account = accounts[(i+1) % len(accounts)] - to_address = account_addresses[to_account] - node.sendfrom(from_account, to_address, amount_to_send) - + node.sendfrom(account.name, to_account.receive_address, amount_to_send) node.generate(1) - for account in accounts: - address = node.getaccountaddress(account) - assert(address != account_addresses[account]) - assert_equal(node.getreceivedbyaccount(account), 2) - node.move(account, "", node.getbalance(account)) - + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + assert_equal(node.getreceivedbyaccount(account.name), 2) + node.move(account.name, "", node.getbalance(account.name)) + account.verify(node) node.generate(101) - expected_account_balances = {"": 5200} for account in accounts: - expected_account_balances[account] = 0 - + expected_account_balances[account.name] = 0 assert_equal(node.listaccounts(), expected_account_balances) - assert_equal(node.getbalance(""), 5200) + # Check that setaccount can assign an account to a new unused address. for account in accounts: address = node.getaccountaddress("") - node.setaccount(address, account) - assert(address in node.getaddressesbyaccount(account)) + node.setaccount(address, account.name) + account.add_address(address) + account.verify(node) assert(address not in node.getaddressesbyaccount("")) + # Check that addmultisigaddress can assign accounts. for account in accounts: addresses = [] for x in range(10): addresses.append(node.getnewaddress()) - multisig_address = node.addmultisigaddress(5, addresses, account) + multisig_address = node.addmultisigaddress(5, addresses, account.name) + account.add_address(multisig_address) + account.verify(node) node.sendfrom("", multisig_address, 50) - node.generate(101) - for account in accounts: - assert_equal(node.getbalance(account), 50) + assert_equal(node.getbalance(account.name), 50) + + # Check that setaccount can change the account of an address from a + # different account. + change_account(node, accounts[0].addresses[0], accounts[0], accounts[1]) + + # Check that setaccount can change the account of an address which + # is the receiving address of a different account. + change_account(node, accounts[0].receive_address, accounts[0], accounts[1]) + + # Check that setaccount can set the account of an address already + # in the account. This is a no-op. + change_account(node, accounts[2].addresses[0], accounts[2], accounts[2]) + + # Check that setaccount can set the account of an address which is + # already the receiving address of the account. It would probably make + # sense for this to be a no-op, but right now it resets the receiving + # address, causing getaccountaddress to return a brand new address. + change_account(node, accounts[2].receive_address, accounts[2], accounts[2]) + +class Account: + def __init__(self, name): + # Account name + self.name = name + # Current receiving address associated with this account. + self.receive_address = None + # List of all addresses assigned with this account + self.addresses = [] + + def add_address(self, address): + assert_equal(address not in self.addresses, True) + self.addresses.append(address) + + def add_receive_address(self, address): + self.add_address(address) + self.receive_address = address + + def verify(self, node): + if self.receive_address is not None: + assert self.receive_address in self.addresses + assert_equal(node.getaccountaddress(self.name), self.receive_address) + + for address in self.addresses: + assert_equal(node.getaccount(address), self.name) + + assert_equal( + set(node.getaddressesbyaccount(self.name)), set(self.addresses)) + + +def change_account(node, address, old_account, new_account): + assert_equal(address in old_account.addresses, True) + node.setaccount(address, new_account.name) + + old_account.addresses.remove(address) + new_account.add_address(address) + + # Calling setaccount on an address which was previously the receiving + # address of a different account should reset the receiving address of + # the old account, causing getaccountaddress to return a brand new + # address. + if address == old_account.receive_address: + new_address = node.getaccountaddress(old_account.name) + assert_equal(new_address not in old_account.addresses, True) + assert_equal(new_address not in new_account.addresses, True) + old_account.add_receive_address(new_address) + + old_account.verify(node) + new_account.verify(node) + if __name__ == '__main__': WalletAccountsTest().main() diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py index 15ea26afa1..85a149793e 100755 --- a/test/functional/walletbackup.py +++ b/test/functional/walletbackup.py @@ -190,6 +190,16 @@ class WalletBackupTest(BitcoinTestFramework): assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) + # Backup to source wallet file must fail + sourcePaths = [ + tmpdir + "/node0/regtest/wallet.dat", + tmpdir + "/node0/./regtest/wallet.dat", + tmpdir + "/node0/regtest/", + tmpdir + "/node0/regtest"] + + for sourcePath in sourcePaths: + assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath) + if __name__ == '__main__': WalletBackupTest().main() diff --git a/test/functional/zmq_test.py b/test/functional/zmq_test.py index 382ef5bae2..fa30318416 100755 --- a/test/functional/zmq_test.py +++ b/test/functional/zmq_test.py @@ -2,16 +2,37 @@ # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the ZMQ API.""" +"""Test the ZMQ notification interface.""" import configparser import os import struct from test_framework.test_framework import BitcoinTestFramework, SkipTest +from test_framework.mininode import CTransaction from test_framework.util import (assert_equal, bytes_to_hex_str, hash256, ) +from io import BytesIO + +class ZMQSubscriber: + def __init__(self, socket, topic): + self.sequence = 0 + self.socket = socket + self.topic = topic + + import zmq + self.socket.setsockopt(zmq.SUBSCRIBE, self.topic) + + def receive(self): + topic, body, seq = self.socket.recv_multipart() + # Topic should match the subscriber topic. + assert_equal(topic, self.topic) + # Sequence should be incremental. + assert_equal(struct.unpack('<I', seq)[-1], self.sequence) + self.sequence += 1 + return body + class ZMQTest (BitcoinTestFramework): def set_test_params(self): @@ -24,26 +45,33 @@ class ZMQTest (BitcoinTestFramework): except ImportError: raise SkipTest("python3-zmq module not available.") - # Check that bitcoin has been built with ZMQ enabled + # Check that bitcoin has been built with ZMQ enabled. config = configparser.ConfigParser() if not self.options.configfile: - self.options.configfile = os.path.dirname(__file__) + "/../config.ini" + self.options.configfile = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config.ini")) config.read_file(open(self.options.configfile)) if not config["components"].getboolean("ENABLE_ZMQ"): raise SkipTest("bitcoind has not been built with zmq enabled.") - self.zmqContext = zmq.Context() - self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) - self.zmqSubSocket.set(zmq.RCVTIMEO, 60000) - self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock") - self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx") - self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawblock") - self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawtx") - ip_address = "tcp://127.0.0.1:28332" - self.zmqSubSocket.connect(ip_address) - self.extra_args = [['-zmqpubhashblock=%s' % ip_address, '-zmqpubhashtx=%s' % ip_address, - '-zmqpubrawblock=%s' % ip_address, '-zmqpubrawtx=%s' % ip_address], []] + # Initialize ZMQ context and socket. + # All messages are received in the same socket which means + # that this test fails if the publishing order changes. + # Note that the publishing order is not defined in the documentation and + # is subject to change. + address = "tcp://127.0.0.1:28332" + self.zmq_context = zmq.Context() + socket = self.zmq_context.socket(zmq.SUB) + socket.set(zmq.RCVTIMEO, 60000) + socket.connect(address) + + # Subscribe to all available topics. + self.hashblock = ZMQSubscriber(socket, b"hashblock") + self.hashtx = ZMQSubscriber(socket, b"hashtx") + self.rawblock = ZMQSubscriber(socket, b"rawblock") + self.rawtx = ZMQSubscriber(socket, b"rawtx") + + self.extra_args = [["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]], []] self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() @@ -51,103 +79,48 @@ class ZMQTest (BitcoinTestFramework): try: self._zmq_test() finally: - # Destroy the zmq context - self.log.debug("Destroying zmq context") - self.zmqContext.destroy(linger=None) + # Destroy the ZMQ context. + self.log.debug("Destroying ZMQ context") + self.zmq_context.destroy(linger=None) def _zmq_test(self): - genhashes = self.nodes[0].generate(1) + num_blocks = 5 + self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks}) + genhashes = self.nodes[0].generate(num_blocks) self.sync_all() - self.log.info("Wait for tx") - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"hashtx") - txhash = msg[1] - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, 0) # must be sequence 0 on hashtx - - # rawtx - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"rawtx") - body = msg[1] - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, 0) # must be sequence 0 on rawtx - - # Check that the rawtx hashes to the hashtx - assert_equal(hash256(body), txhash) - - self.log.info("Wait for block") - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"hashblock") - body = msg[1] - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, 0) # must be sequence 0 on hashblock - blkhash = bytes_to_hex_str(body) - assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq - - # rawblock - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"rawblock") - body = msg[1] - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, 0) #must be sequence 0 on rawblock - - # Check the hash of the rawblock's header matches generate - assert_equal(genhashes[0], bytes_to_hex_str(hash256(body[:80]))) - - self.log.info("Generate 10 blocks (and 10 coinbase txes)") - n = 10 - genhashes = self.nodes[1].generate(n) - self.sync_all() + for x in range(num_blocks): + # Should receive the coinbase txid. + txid = self.hashtx.receive() + + # Should receive the coinbase raw transaction. + hex = self.rawtx.receive() + tx = CTransaction() + tx.deserialize(BytesIO(hex)) + tx.calc_sha256() + assert_equal(tx.hash, bytes_to_hex_str(txid)) - zmqHashes = [] - zmqRawHashed = [] - blockcount = 0 - for x in range(n * 4): - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - body = msg[1] - if topic == b"hashblock": - zmqHashes.append(bytes_to_hex_str(body)) - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, blockcount + 1) - blockcount += 1 - if topic == b"rawblock": - zmqRawHashed.append(bytes_to_hex_str(hash256(body[:80]))) - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, blockcount) - - for x in range(n): - assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq - assert_equal(genhashes[x], zmqRawHashed[x]) + # Should receive the generated block hash. + hash = bytes_to_hex_str(self.hashblock.receive()) + assert_equal(genhashes[x], hash) + # The block should only have the coinbase txid. + assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"]) + + # Should receive the generated raw block. + block = self.rawblock.receive() + assert_equal(genhashes[x], bytes_to_hex_str(hash256(block[:80]))) self.log.info("Wait for tx from second node") - # test tx from a second node - hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) + payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) self.sync_all() - # now we should receive a zmq msg because the tx was broadcast - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"hashtx") - body = msg[1] - hashZMQ = bytes_to_hex_str(body) - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, blockcount + 1) - - msg = self.zmqSubSocket.recv_multipart() - topic = msg[0] - assert_equal(topic, b"rawtx") - body = msg[1] - hashedZMQ = bytes_to_hex_str(hash256(body)) - msgSequence = struct.unpack('<I', msg[-1])[-1] - assert_equal(msgSequence, blockcount+1) - assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq - assert_equal(hashRPC, hashedZMQ) + # Should receive the broadcasted txid. + txid = self.hashtx.receive() + assert_equal(payment_txid, bytes_to_hex_str(txid)) + + # Should receive the broadcasted raw transaction. + hex = self.rawtx.receive() + assert_equal(payment_txid, bytes_to_hex_str(hash256(hex))) if __name__ == '__main__': ZMQTest().main() |