From ca6523d0c8a44e0b9193367d1250a7d428d61be3 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:29 +1000 Subject: [tests] Rename feature_* functional tests. --- test/functional/README.md | 2 +- test/functional/assumevalid.py | 204 ---- test/functional/bip65-cltv-p2p.py | 160 --- test/functional/bip68-112-113-p2p.py | 534 ---------- test/functional/bip68-sequence.py | 395 -------- test/functional/bip9-softforks.py | 283 ------ test/functional/bipdersig-p2p.py | 153 --- test/functional/conf_args.py | 49 - test/functional/dbcrash.py | 282 ------ test/functional/feature_assumevalid.py | 204 ++++ test/functional/feature_bip68_sequence.py | 395 ++++++++ test/functional/feature_bip9_softforks.py | 283 ++++++ test/functional/feature_block.py | 1293 ++++++++++++++++++++++++ test/functional/feature_cltv.py | 160 +++ test/functional/feature_config_args.py | 49 + test/functional/feature_csv_activation.py | 534 ++++++++++ test/functional/feature_dbcrash.py | 282 ++++++ test/functional/feature_dersig.py | 153 +++ test/functional/feature_fee_estimation.py | 262 +++++ test/functional/feature_maxuploadtarget.py | 169 ++++ test/functional/feature_minchainwork.py | 89 ++ test/functional/feature_notifications.py | 86 ++ test/functional/feature_nulldummy.py | 133 +++ test/functional/feature_proxy.py | 201 ++++ test/functional/feature_pruning.py | 454 +++++++++ test/functional/feature_rbf.py | 565 +++++++++++ test/functional/feature_reindex.py | 40 + test/functional/feature_segwit.py | 638 ++++++++++++ test/functional/feature_uacomment.py | 35 + test/functional/feature_versionbits_warning.py | 121 +++ test/functional/maxuploadtarget.py | 169 ---- test/functional/minchainwork.py | 89 -- test/functional/notifications.py | 86 -- test/functional/nulldummy.py | 133 --- test/functional/p2p-fullblocktest.py | 1293 ------------------------ test/functional/p2p-versionbits-warning.py | 121 --- test/functional/proxy_test.py | 201 ---- test/functional/pruning.py | 454 --------- test/functional/reindex.py | 40 - test/functional/replace-by-fee.py | 565 ----------- test/functional/segwit.py | 638 ------------ test/functional/smartfees.py | 262 ----- test/functional/test_runner.py | 44 +- test/functional/uacomment.py | 35 - 44 files changed, 6169 insertions(+), 6169 deletions(-) delete mode 100755 test/functional/assumevalid.py delete mode 100755 test/functional/bip65-cltv-p2p.py delete mode 100755 test/functional/bip68-112-113-p2p.py delete mode 100755 test/functional/bip68-sequence.py delete mode 100755 test/functional/bip9-softforks.py delete mode 100755 test/functional/bipdersig-p2p.py delete mode 100755 test/functional/conf_args.py delete mode 100755 test/functional/dbcrash.py create mode 100755 test/functional/feature_assumevalid.py create mode 100755 test/functional/feature_bip68_sequence.py create mode 100755 test/functional/feature_bip9_softforks.py create mode 100755 test/functional/feature_block.py create mode 100755 test/functional/feature_cltv.py create mode 100755 test/functional/feature_config_args.py create mode 100755 test/functional/feature_csv_activation.py create mode 100755 test/functional/feature_dbcrash.py create mode 100755 test/functional/feature_dersig.py create mode 100755 test/functional/feature_fee_estimation.py create mode 100755 test/functional/feature_maxuploadtarget.py create mode 100755 test/functional/feature_minchainwork.py create mode 100755 test/functional/feature_notifications.py create mode 100755 test/functional/feature_nulldummy.py create mode 100755 test/functional/feature_proxy.py create mode 100755 test/functional/feature_pruning.py create mode 100755 test/functional/feature_rbf.py create mode 100755 test/functional/feature_reindex.py create mode 100755 test/functional/feature_segwit.py create mode 100755 test/functional/feature_uacomment.py create mode 100755 test/functional/feature_versionbits_warning.py delete mode 100755 test/functional/maxuploadtarget.py delete mode 100755 test/functional/minchainwork.py delete mode 100755 test/functional/notifications.py delete mode 100755 test/functional/nulldummy.py delete mode 100755 test/functional/p2p-fullblocktest.py delete mode 100755 test/functional/p2p-versionbits-warning.py delete mode 100755 test/functional/proxy_test.py delete mode 100755 test/functional/pruning.py delete mode 100755 test/functional/reindex.py delete mode 100755 test/functional/replace-by-fee.py delete mode 100755 test/functional/segwit.py delete mode 100755 test/functional/smartfees.py delete mode 100755 test/functional/uacomment.py (limited to 'test') diff --git a/test/functional/README.md b/test/functional/README.md index d6ce490ab3..b3d34c72fd 100644 --- a/test/functional/README.md +++ b/test/functional/README.md @@ -133,7 +133,7 @@ Each `TestInstance` consists of: acceptance is tested against the given outcome. - For examples of tests written in this framework, see - `invalidblockrequest.py` and `p2p-fullblocktest.py`. + `invalidblockrequest.py` and `feature_block.py`. ### test-framework modules diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py deleted file mode 100755 index 5a09142412..0000000000 --- a/test/functional/assumevalid.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for skipping signature validation on old blocks. - -Test logic for skipping signature validation on blocks which we've assumed -valid (https://github.com/bitcoin/bitcoin/pull/9484) - -We build a chain that includes and invalid signature for one of the -transactions: - - 0: genesis block - 1: block 1 with coinbase transaction output. - 2-101: bury that block with 100 blocks so the coinbase transaction - output can be spent - 102: a block containing a transaction spending the coinbase - transaction output. The transaction has an invalid signature. - 103-2202: bury the bad block with just over two weeks' worth of blocks - (2100 blocks) - -Start three nodes: - - - node0 has no -assumevalid parameter. Try to sync to block 2202. It will - reject block 102 and only sync as far as block 101 - - node1 has -assumevalid set to the hash of block 102. Try to sync to - block 2202. node1 will sync all the way to block 2202. - - node2 has -assumevalid set to the hash of block 102. Try to sync to - block 200. node2 will reject block 102 since it's assumed valid, but it - isn't buried by at least two weeks' work. -""" -import time - -from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.key import CECKey -from test_framework.mininode import (CBlockHeader, - COutPoint, - CTransaction, - CTxIn, - CTxOut, - network_thread_join, - network_thread_start, - P2PInterface, - msg_block, - msg_headers) -from test_framework.script import (CScript, OP_TRUE) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class BaseNode(P2PInterface): - def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() - headers_message.headers = [CBlockHeader(b) for b in new_blocks] - self.send_message(headers_message) - -class AssumeValidTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - def setup_network(self): - self.add_nodes(3) - # Start node0. We don't start the other nodes yet since - # we need to pre-mine a block with an invalid transaction - # signature so we can pass in the block hash as assumevalid. - self.start_node(0) - - def send_blocks_until_disconnected(self, p2p_conn): - """Keep sending blocks to the node until we're disconnected.""" - for i in range(len(self.blocks)): - if p2p_conn.state != "connected": - break - try: - p2p_conn.send_message(msg_block(self.blocks[i])) - except IOError as e: - assert str(e) == 'Not connected, no pushbuf' - break - - def assert_blockchain_height(self, node, height): - """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" - last_height = node.getblock(node.getbestblockhash())['height'] - timeout = 10 - while True: - time.sleep(0.25) - current_height = node.getblock(node.getbestblockhash())['height'] - if current_height != last_height: - last_height = current_height - if timeout < 0: - assert False, "blockchain too short after timeout: %d" % current_height - timeout - 0.25 - continue - elif current_height > height: - assert False, "blockchain too long: %d" % current_height - elif current_height == height: - break - - def run_test(self): - - # Connect to node0 - p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) - - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - # Build the blockchain - self.tip = int(self.nodes[0].getbestblockhash(), 16) - self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 - - self.blocks = [] - - # Get a pubkey for the coinbase TXO - coinbase_key = CECKey() - coinbase_key.set_secretbytes(b"horsebattery") - coinbase_pubkey = coinbase_key.get_pubkey() - - # Create the first block with a coinbase output to our key - height = 1 - block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) - self.blocks.append(block) - self.block_time += 1 - block.solve() - # Save the coinbase for later - self.block1 = block - self.tip = block.sha256 - height += 1 - - # Bury the block 100 deep so the coinbase output is spendable - for i in range(100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.solve() - self.blocks.append(block) - self.tip = block.sha256 - self.block_time += 1 - height += 1 - - # Create a transaction spending the coinbase output with an invalid (null) signature - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) - tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) - tx.calc_sha256() - - block102 = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - block102.vtx.extend([tx]) - block102.hashMerkleRoot = block102.calc_merkle_root() - block102.rehash() - block102.solve() - self.blocks.append(block102) - self.tip = block102.sha256 - self.block_time += 1 - height += 1 - - # Bury the assumed valid block 2100 deep - for i in range(2100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.nVersion = 4 - block.solve() - self.blocks.append(block) - self.tip = block.sha256 - self.block_time += 1 - height += 1 - - # We're adding new connections so terminate the network thread - self.nodes[0].disconnect_p2ps() - network_thread_join() - - # Start node1 and node2 with assumevalid so they accept a block with a bad signature. - self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) - self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) - - p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) - p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) - p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) - - network_thread_start() - - p2p0.wait_for_verack() - p2p1.wait_for_verack() - p2p2.wait_for_verack() - - # send header lists to all three nodes - p2p0.send_header_for_blocks(self.blocks[0:2000]) - p2p0.send_header_for_blocks(self.blocks[2000:]) - p2p1.send_header_for_blocks(self.blocks[0:2000]) - p2p1.send_header_for_blocks(self.blocks[2000:]) - p2p2.send_header_for_blocks(self.blocks[0:200]) - - # Send blocks to node0. Block 102 will be rejected. - self.send_blocks_until_disconnected(p2p0) - self.assert_blockchain_height(self.nodes[0], 101) - - # Send all blocks to node1. All blocks will be accepted. - for i in range(2202): - p2p1.send_message(msg_block(self.blocks[i])) - # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. - p2p1.sync_with_ping(120) - assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) - - # Send blocks to node2. Block 102 will be rejected. - self.send_blocks_until_disconnected(p2p2) - self.assert_blockchain_height(self.nodes[2], 101) - -if __name__ == '__main__': - AssumeValidTest().main() diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py deleted file mode 100755 index f62ae31654..0000000000 --- a/test/functional/bip65-cltv-p2p.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP65 (CHECKLOCKTIMEVERIFY). - -Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height -1351. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum -from io import BytesIO - -CLTV_HEIGHT = 1351 - -# Reject codes that we might receive in this test -REJECT_INVALID = 16 -REJECT_OBSOLETE = 17 -REJECT_NONSTANDARD = 64 - -def cltv_invalidate(tx): - '''Modify the signature in vin 0 of the tx to fail CLTV - - Prepends -1 CLTV DROP in the scriptSig itself. - - TODO: test more ways that transactions using CLTV could be invalid (eg - locktime requirements fail, sequence time requirements fail, etc). - ''' - tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + - list(CScript(tx.vin[0].scriptSig))) - -def cltv_validate(node, tx, height): - '''Modify the signature in vin 0 of the tx to pass CLTV - Prepends CLTV DROP in the scriptSig, and sets - the locktime to height''' - tx.vin[0].nSequence = 0 - tx.nLockTime = height - - # Need to re-sign, since nSequence and nLockTime changed - signed_result = node.signrawtransaction(ToHex(tx)) - new_tx = CTransaction() - new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) - - new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + - list(CScript(new_tx.vin[0].scriptSig))) - return new_tx - -def create_transaction(node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) - return tx - -class BIP65Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # wait_for_verack ensures that the P2P connection is fully up. - self.nodes[0].p2p.wait_for_verack() - - self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) - self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) - self.nodeaddress = self.nodes[0].getnewaddress() - - self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block") - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], - self.nodeaddress, 1.0) - cltv_invalidate(spendtx) - spendtx.rehash() - - tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) - block.nVersion = 3 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - self.log.info("Test that blocks must now be at least version 4") - tip = block.sha256 - block_time += 1 - block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) - block.nVersion = 3 - block.solve() - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - del self.nodes[0].p2p.last_message["reject"] - - self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") - block.nVersion = 4 - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], - self.nodeaddress, 1.0) - cltv_invalidate(spendtx) - spendtx.rehash() - - # First we show that this tx is valid except for CLTV by getting it - # accepted to the mempool (which we can achieve with - # -promiscuousmempoolflags). - self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) - assert spendtx.hash in self.nodes[0].getrawmempool() - - # Now we verify that a block with this transaction is invalid. - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: - # Generic rejection when a block is invalid - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') - else: - assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason - - self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") - spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) - spendtx.rehash() - - block.vtx.pop(1) - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) - - -if __name__ == '__main__': - BIP65Test().main() diff --git a/test/functional/bip68-112-113-p2p.py b/test/functional/bip68-112-113-p2p.py deleted file mode 100755 index 82aa0ff891..0000000000 --- a/test/functional/bip68-112-113-p2p.py +++ /dev/null @@ -1,534 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test activation of the first version bits soft fork. - -This soft fork will activate the following BIPS: -BIP 68 - nSequence relative lock times -BIP 112 - CHECKSEQUENCEVERIFY -BIP 113 - MedianTimePast semantics for nLockTime - -regtest lock-in with 108/144 block signalling -activation after a further 144 blocks - -mine 82 blocks whose coinbases will be used to generate inputs for our tests -mine 61 blocks to transition from DEFINED to STARTED -mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period -mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN -mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572 -mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered -mine 1 block and test that enforcement has triggered (which triggers ACTIVE) -Test BIP 113 is enforced -Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height -Mine 1 block so next height is 581 and test BIP 68 now passes time but not height -Mine 1 block so next height is 582 and test BIP 68 now passes time and height -Test that BIP 112 is enforced - -Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates -And that after the soft fork activates transactions pass and fail as they should according to the rules. -For each BIP, transactions of versions 1 and 2 will be tested. ----------------- -BIP 113: -bip113tx - modify the nLocktime variable - -BIP 68: -bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below - -BIP 112: -bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP -bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP -bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP -bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP -bip112tx_special - test negative argument to OP_CSV -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import ToHex, CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import * -from io import BytesIO -import time - -base_relative_locktime = 10 -seq_disable_flag = 1<<31 -seq_random_high_bit = 1<<25 -seq_type_flag = 1<<22 -seq_random_low_bit = 1<<18 - -# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field -# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 -relative_locktimes = [] -for b31 in range(2): - b25times = [] - for b25 in range(2): - b22times = [] - for b22 in range(2): - b18times = [] - for b18 in range(2): - rlt = base_relative_locktime - if (b31): - rlt = rlt | seq_disable_flag - if (b25): - rlt = rlt | seq_random_high_bit - if (b22): - rlt = rlt | seq_type_flag - if (b18): - rlt = rlt | seq_random_low_bit - b18times.append(rlt) - b22times.append(b18times) - b25times.append(b22times) - relative_locktimes.append(b25times) - -def all_rlt_txs(txarray): - txs = [] - for b31 in range(2): - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - txs.append(txarray[b31][b25][b22][b18]) - return txs - -class BIP68_112_113Test(ComparisonTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] - - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - network_thread_start() - test.run() - - def send_generic_input_tx(self, node, coinbases): - amount = Decimal("49.99") - return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - return tx - - def sign_transaction(self, node, unsignedtx): - rawtx = ToHex(unsignedtx) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): - for i in range(number): - block = self.create_test_block([], version) - test_blocks.append([block, True]) - self.last_block_time += 600 - self.tip = block.sha256 - self.tipheight += 1 - return test_blocks - - def create_test_block(self, txs, version = 536870912): - block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) - block.nVersion = version - block.vtx.extend(txs) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - return block - - def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): - txs = [] - assert(len(bip68inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - tx.nVersion = txversion - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - b18txs.append(self.sign_transaction(self.nodes[0], tx)) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def create_bip112special(self, input, txversion): - tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - return signtx - - def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): - txs = [] - assert(len(bip112inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed - tx.vin[0].nSequence = base_relative_locktime + locktime_delta - else: # vary nSequence instead, OP_CSV is fixed - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - if (varyOP_CSV): - signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - else: - signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - b18txs.append(signtx) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def get_tests(self): - long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future - self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time - self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs - self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time - self.tipheight = 82 # height of the next block to build - self.last_block_time = long_past_time - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.nodeaddress = self.nodes[0].getnewaddress() - - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') - test_blocks = self.generate_blocks(61, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 1 - # Advanced from DEFINED to STARTED, height = 143 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 2 - # Failed to advance past STARTED, height = 287 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - - # 108 out of 144 signal bit 0 to achieve lock-in - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 3 - # Advanced from STARTED to LOCKED_IN, height = 431 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - - # 140 more version 4 blocks - test_blocks = self.generate_blocks(140, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 4 - - ### Inputs at height = 572 - # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) - # Note we reuse inputs for v1 and v2 txs so must test these separately - # 16 normal inputs - bip68inputs = [] - for i in range(16): - bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112basicinputs = [] - for j in range(2): - inputs = [] - for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - bip112basicinputs.append(inputs) - # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112diverseinputs = [] - for j in range(2): - inputs = [] - for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - bip112diverseinputs.append(inputs) - # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) - # 1 normal input - bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) - - self.nodes[0].setmocktime(self.last_block_time + 600) - inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 - self.nodes[0].setmocktime(0) - self.tip = int("0x" + inputblockhash, 0) - self.tipheight += 1 - self.last_block_time += 600 - assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) - - # 2 more version 4 blocks - test_blocks = self.generate_blocks(2, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 5 - # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - - # Test both version 1 and version 2 transactions for all tests - # BIP113 test transaction will be modified before each use to put in appropriate block time - bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) - bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE - bip113tx_v1.nVersion = 1 - bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) - bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE - bip113tx_v2.nVersion = 2 - - # For BIP68 test all 16 relative sequence locktimes - bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) - bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) - - # For BIP112 test: - # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) - bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) - # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) - bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) - # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) - bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) - # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) - bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) - # -1 OP_CSV OP_DROP input - bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) - bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) - - - ### TESTING ### - ################################## - ### Before Soft Forks Activate ### - ################################## - # All txs should pass - ### Version 1 txs ### - success_txs = [] - # add BIP113 tx and -1 CSV tx - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - success_txs.append(bip113signed1) - success_txs.append(bip112tx_special_v1) - # add BIP 68 txs - success_txs.extend(all_rlt_txs(bip68txs_v1)) - # add BIP 112 with seq=10 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) - # try BIP 112 with seq=9 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Version 2 txs ### - success_txs = [] - # add BIP113 tx and -1 CSV tx - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - success_txs.append(bip113signed2) - success_txs.append(bip112tx_special_v2) - # add BIP 68 txs - success_txs.extend(all_rlt_txs(bip68txs_v2)) - # add BIP 112 with seq=10 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) - # try BIP 112 with seq=9 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - - # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block - test_blocks = self.generate_blocks(1, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 8 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') - - - ################################# - ### After Soft Forks Activate ### - ################################# - ### BIP 113 ### - # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 - # BIP 113 tests should now pass if the locktime is < MTP - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Next block height = 580 after 4 blocks of random version - test_blocks = self.generate_blocks(4, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 13 - - ### BIP 68 ### - ### Version 1 txs ### - # All still pass - success_txs = [] - success_txs.extend(all_rlt_txs(bip68txs_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Version 2 txs ### - bip68success_txs = [] - # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 - bip68timetxs = [] - for b25 in range(2): - for b18 in range(2): - bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) - for tx in bip68timetxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 - bip68heighttxs = [] - for b25 in range(2): - for b18 in range(2): - bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) - for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 - - # Advance one block to 581 - test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 24 - - # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 - bip68success_txs.extend(bip68timetxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 - - # Advance one block to 582 - test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 30 - - # All BIP 68 txs should pass - bip68success_txs.extend(bip68heighttxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - - ### BIP 112 ### - ### Version 1 txs ### - # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) - success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) - fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) - - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 - - ### Version 2 txs ### - # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV - success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 - - yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## - # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 - - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail - fail_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 - - # If sequencelock types mismatch, tx should fail - fail_txs = [] - for b25 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence - fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 - - # Remaining txs should pass, just test masking works properly - success_txs = [] - for b25 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence - success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV - yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Additional test, of checking that comparison of two time types works properly - time_txs = [] - for b25 in range(2): - for b18 in range(2): - tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] - tx.vin[0].nSequence = base_relative_locktime | seq_type_flag - signtx = self.sign_transaction(self.nodes[0], tx) - time_txs.append(signtx) - yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Missing aspects of test - ## Testing empty stack fails - - -if __name__ == '__main__': - BIP68_112_113Test().main() diff --git a/test/functional/bip68-sequence.py b/test/functional/bip68-sequence.py deleted file mode 100755 index 94b13653b9..0000000000 --- a/test/functional/bip68-sequence.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP68 implementation.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.blocktools import * - -SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) -SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) -SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift -SEQUENCE_LOCKTIME_MASK = 0x0000ffff - -# RPC error for non-BIP68 final transactions -NOT_FINAL_ERROR = "64: non-BIP68-final" - -class BIP68Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.extra_args = [[], ["-acceptnonstdtxn=0"]] - - def run_test(self): - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Generate some coins - self.nodes[0].generate(110) - - self.log.info("Running test disable flag") - self.test_disable_flag() - - self.log.info("Running test sequence-lock-confirmed-inputs") - self.test_sequence_lock_confirmed_inputs() - - self.log.info("Running test sequence-lock-unconfirmed-inputs") - self.test_sequence_lock_unconfirmed_inputs() - - self.log.info("Running test BIP68 not consensus before versionbits activation") - self.test_bip68_not_consensus() - - self.log.info("Activating BIP68 (and 112/113)") - self.activateCSV() - - self.log.info("Verifying nVersion=2 transactions are standard.") - self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).") - self.test_version2_relay() - - self.log.info("Passed") - - # Test that BIP68 is not in effect if tx version is 1, or if - # the first sequence bit is set. - def test_disable_flag(self): - # Create some unconfirmed inputs - new_addr = self.nodes[0].getnewaddress() - self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC - - utxos = self.nodes[0].listunspent(0, 0) - assert(len(utxos) > 0) - - utxo = utxos[0] - - tx1 = CTransaction() - value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN) - - # Check that the disable flag disables relative locktime. - # If sequence locks were used, this would require 1 block for the - # input to mature. - sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 - tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] - tx1.vout = [CTxOut(value, CScript([b'a']))] - - tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] - tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) - tx1_id = int(tx1_id, 16) - - # This transaction will enable sequence-locks, so this transaction should - # fail - tx2 = CTransaction() - tx2.nVersion = 2 - sequence_value = sequence_value & 0x7fffffff - tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] - tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] - tx2.rehash() - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) - - # Setting the version back down to 1 should disable the sequence lock, - # so this should be accepted. - tx2.nVersion = 1 - - self.nodes[0].sendrawtransaction(ToHex(tx2)) - - # Calculate the median time past of a prior block ("confirmations" before - # the current tip). - def get_median_time_past(self, confirmations): - block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations) - return self.nodes[0].getblockheader(block_hash)["mediantime"] - - # Test that sequence locks are respected for transactions spending confirmed inputs. - def test_sequence_lock_confirmed_inputs(self): - # Create lots of confirmed utxos, and use them to generate lots of random - # transactions. - max_outputs = 50 - addresses = [] - while len(addresses) < max_outputs: - addresses.append(self.nodes[0].getnewaddress()) - while len(self.nodes[0].listunspent()) < 200: - import random - random.shuffle(addresses) - num_outputs = random.randint(1, max_outputs) - outputs = {} - for i in range(num_outputs): - outputs[addresses[i]] = random.randint(1, 20)*0.01 - self.nodes[0].sendmany("", outputs) - self.nodes[0].generate(1) - - utxos = self.nodes[0].listunspent() - - # Try creating a lot of random transactions. - # Each time, choose a random number of inputs, and randomly set - # some of those inputs to be sequence locked (and randomly choose - # between height/time locking). Small random chance of making the locks - # all pass. - for i in range(400): - # Randomly choose up to 10 inputs - num_inputs = random.randint(1, 10) - random.shuffle(utxos) - - # Track whether any sequence locks used should fail - should_pass = True - - # Track whether this transaction was built with sequence locks - using_sequence_locks = False - - tx = CTransaction() - tx.nVersion = 2 - value = 0 - for j in range(num_inputs): - sequence_value = 0xfffffffe # this disables sequence locks - - # 50% chance we enable sequence locks - if random.randint(0,1): - using_sequence_locks = True - - # 10% of the time, make the input sequence value pass - input_will_pass = (random.randint(1,10) == 1) - sequence_value = utxos[j]["confirmations"] - if not input_will_pass: - sequence_value += 1 - should_pass = False - - # Figure out what the median-time-past was for the confirmed input - # Note that if an input has N confirmations, we're going back N blocks - # from the tip so that we're looking up MTP of the block - # PRIOR to the one the input appears in, as per the BIP68 spec. - orig_time = self.get_median_time_past(utxos[j]["confirmations"]) - cur_time = self.get_median_time_past(0) # MTP of the tip - - # can only timelock this input if it's not too old -- otherwise use height - can_time_lock = True - if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: - can_time_lock = False - - # if time-lockable, then 50% chance we make this a time lock - if random.randint(0,1) and can_time_lock: - # Find first time-lock value that fails, or latest one that succeeds - time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY - if input_will_pass and time_delta > cur_time - orig_time: - sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) - elif (not input_will_pass and time_delta <= cur_time - orig_time): - sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 - sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG - tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) - value += utxos[j]["amount"]*COIN - # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output - tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 - tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) - rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] - - if (using_sequence_locks and not should_pass): - # This transaction should be rejected - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) - else: - # This raw transaction should be accepted - self.nodes[0].sendrawtransaction(rawtx) - utxos = self.nodes[0].listunspent() - - # Test that sequence locks on unconfirmed inputs must have nSequence - # height or time of 0 to be accepted. - # Then test that BIP68-invalid transactions are removed from the mempool - # after a reorg. - def test_sequence_lock_unconfirmed_inputs(self): - # Store height so we can easily reset the chain at the end of the test - cur_height = self.nodes[0].getblockcount() - - # Create a mempool tx. - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) - tx1.rehash() - - # Anyone-can-spend mempool tx. - # Sequence lock of 0 should pass. - tx2 = CTransaction() - tx2.nVersion = 2 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] - tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) - tx2.rehash() - - self.nodes[0].sendrawtransaction(tx2_raw) - - # Create a spend of the 0th output of orig_tx with a sequence lock - # of 1, and test what happens when submitting. - # orig_tx.vout[0] must be an anyone-can-spend output - def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): - sequence_value = 1 - if not use_height_lock: - sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG - - tx = CTransaction() - tx.nVersion = 2 - tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] - tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] - tx.rehash() - - if (orig_tx.hash in node.getrawmempool()): - # sendrawtransaction should fail if the tx is in the mempool - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) - else: - # sendrawtransaction should succeed if the tx is not in the mempool - node.sendrawtransaction(ToHex(tx)) - - return tx - - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - - # Now mine some blocks, but make sure tx2 doesn't get mined. - # Use prioritisetransaction to lower the effective feerate to 0 - self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) - cur_time = int(time.time()) - for i in range(10): - self.nodes[0].setmocktime(cur_time + 600) - self.nodes[0].generate(1) - cur_time += 600 - - assert(tx2.hash in self.nodes[0].getrawmempool()) - - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - - # Mine tx2, and then try again - self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN)) - - # Advance the time on the node so that we can test timelocks - self.nodes[0].setmocktime(cur_time+600) - self.nodes[0].generate(1) - assert(tx2.hash not in self.nodes[0].getrawmempool()) - - # Now that tx2 is not in the mempool, a sequence locked spend should - # succeed - tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - assert(tx3.hash in self.nodes[0].getrawmempool()) - - self.nodes[0].generate(1) - assert(tx3.hash not in self.nodes[0].getrawmempool()) - - # One more test, this time using height locks - tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) - assert(tx4.hash in self.nodes[0].getrawmempool()) - - # Now try combining confirmed and unconfirmed inputs - tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) - assert(tx5.hash not in self.nodes[0].getrawmempool()) - - utxos = self.nodes[0].listunspent() - tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) - tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) - raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) - - # Test mempool-BIP68 consistency after reorg - # - # State of the transactions in the last blocks: - # ... -> [ tx2 ] -> [ tx3 ] - # tip-1 tip - # And currently tx4 is in the mempool. - # - # If we invalidate the tip, tx3 should get added to the mempool, causing - # tx4 to be removed (fails sequence-lock). - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - assert(tx4.hash not in self.nodes[0].getrawmempool()) - assert(tx3.hash in self.nodes[0].getrawmempool()) - - # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in - # diagram above). - # This would cause tx2 to be added back to the mempool, which in turn causes - # tx3 to be removed. - tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) - height = self.nodes[0].getblockcount() - for i in range(2): - block = create_block(tip, create_coinbase(height), cur_time) - block.nVersion = 3 - block.rehash() - block.solve() - tip = block.sha256 - height += 1 - self.nodes[0].submitblock(ToHex(block)) - cur_time += 1 - - mempool = self.nodes[0].getrawmempool() - assert(tx3.hash not in mempool) - assert(tx2.hash in mempool) - - # Reset the chain and get rid of the mocktimed-blocks - self.nodes[0].setmocktime(0) - self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1)) - self.nodes[0].generate(10) - - # Make sure that BIP68 isn't being used to validate blocks, prior to - # versionbits activation. If more blocks are mined prior to this test - # being run, then it's possible the test has activated the soft fork, and - # this test should be moved to run earlier, or deleted. - def test_bip68_not_consensus(self): - assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) - tx1.rehash() - - # Make an anyone-can-spend transaction - tx2 = CTransaction() - tx2.nVersion = 1 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] - tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - - # sign tx2 - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) - tx2.rehash() - - self.nodes[0].sendrawtransaction(ToHex(tx2)) - - # Now make an invalid spend of tx2 according to BIP68 - sequence_value = 100 # 100 block relative locktime - - tx3 = CTransaction() - tx3.nVersion = 2 - tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] - tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - tx3.rehash() - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) - - # make a block that violates bip68; ensure that the tip updates - tip = int(self.nodes[0].getbestblockhash(), 16) - block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) - block.nVersion = 3 - block.vtx.extend([tx1, tx2, tx3]) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - add_witness_commitment(block) - block.solve() - - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - def activateCSV(self): - # activation should happen at block height 432 (3 periods) - # getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block. - min_activation_height = 432 - height = self.nodes[0].getblockcount() - assert_greater_than(min_activation_height - height, 2) - self.nodes[0].generate(min_activation_height - height - 2) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in") - self.nodes[0].generate(1) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active") - sync_blocks(self.nodes) - - # Use self.nodes[1] to test that version 2 transactions are standard. - def test_version2_relay(self): - inputs = [ ] - outputs = { self.nodes[1].getnewaddress() : 1.0 } - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] - tx = FromHex(CTransaction(), rawtxfund) - tx.nVersion = 2 - tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] - self.nodes[1].sendrawtransaction(tx_signed) - -if __name__ == '__main__': - BIP68Test().main() diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py deleted file mode 100755 index ae92e9f07c..0000000000 --- a/test/functional/bip9-softforks.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP 9 soft forks. - -Connect to a single node. -regtest lock-in with 108/144 block signalling -activation after a further 144 blocks -mine 2 block and save coinbases for later use -mine 141 blocks to transition from DEFINED to STARTED -mine 100 blocks signalling readiness and 44 not in order to fail to change state this period -mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN) -mine a further 143 blocks (LOCKED_IN) -test that enforcement has not triggered (which triggers ACTIVE) -test that enforcement has triggered -""" -from io import BytesIO -import shutil -import time -import itertools - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP - -class BIP9SoftForksTest(ComparisonTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.test = TestManager(self, self.options.tmpdir) - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.run() - - def create_transaction(self, node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - tx.nVersion = 2 - return tx - - def sign_transaction(self, node, tx): - signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize())) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): - for i in range(number): - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = version - block.rehash() - block.solve() - test_blocks.append([block, True]) - self.last_block_time += 1 - self.tip = block.sha256 - self.height += 1 - return test_blocks - - def get_bip9_status(self, key): - info = self.nodes[0].getblockchaininfo() - return info['bip9_softforks'][key] - - def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) - - # generate some coins for later - self.coinbase_blocks = self.nodes[0].generate(2) - self.height = 3 # height of the next block to build - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.nodeaddress = self.nodes[0].getnewaddress() - self.last_block_time = int(time.time()) - - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) - assert_equal(tmpl['vbrequired'], 0) - assert_equal(tmpl['version'], 0x20000000) - - # Test 1 - # Advance from DEFINED to STARTED - test_blocks = self.generate_blocks(141, 4) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) - assert_equal(tmpl['vbrequired'], 0) - assert(tmpl['version'] & activated_version) - - # Test 1-A - # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period - test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - - # Test 1-B - # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period - test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False) - - # Test 1-C - # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN - test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - - # Test 2 - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) - assert_equal(tmpl['vbrequired'], 0) - assert(tmpl['version'] & activated_version) - - # Test 3 - # 108 out of 144 signal bit 1 to achieve LOCKED_IN - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN... - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - - # ...continue with Test 3 - test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - - # Test 4 - # 143 more version 536870913 blocks (waiting period-1) - test_blocks = self.generate_blocks(143, 4) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - - # Test 5 - # Check that the new rule is enforced - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[0], self.nodeaddress, 1.0) - invalidate(spendtx) - spendtx = self.sign_transaction(self.nodes[0], spendtx) - spendtx.rehash() - invalidatePostSignature(spendtx) - spendtx.rehash() - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = activated_version - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.last_block_time += 1 - self.tip = block.sha256 - self.height += 1 - yield TestInstance([[block, True]]) - - assert_equal(self.get_bip9_status(bipName)['status'], 'active') - assert_equal(self.get_bip9_status(bipName)['since'], 720) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) - assert_equal(tmpl['vbrequired'], 0) - assert(not (tmpl['version'] & (1 << bitno))) - - # Test 6 - # Check that the new sequence lock rules are enforced - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - invalidate(spendtx) - spendtx = self.sign_transaction(self.nodes[0], spendtx) - spendtx.rehash() - invalidatePostSignature(spendtx) - spendtx.rehash() - - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = 5 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - self.last_block_time += 1 - yield TestInstance([[block, False]]) - - # Restart all - self.test.clear_all_connections() - self.stop_nodes() - self.nodes = [] - shutil.rmtree(self.options.tmpdir + "/node0") - self.setup_chain() - self.setup_network() - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.p2p_connections[0].wait_for_verack() - - def get_tests(self): - for test in itertools.chain( - self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) - ): - yield test - - def donothing(self, tx): - return - - def csv_invalidate(self, tx): - """Modify the signature in vin 0 of the tx to fail CSV - Prepends -1 CSV DROP in the scriptSig itself. - """ - tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + - list(CScript(tx.vin[0].scriptSig))) - - def sequence_lock_invalidate(self, tx): - """Modify the nSequence to make it fails once sequence lock rule is - activated (high timespan). - """ - tx.vin[0].nSequence = 0x00FFFFFF - tx.nLockTime = 0 - - def mtp_invalidate(self, tx): - """Modify the nLockTime to make it fails once MTP rule is activated.""" - # Disable Sequence lock, Activate nLockTime - tx.vin[0].nSequence = 0x90FFFFFF - tx.nLockTime = self.last_block_time - -if __name__ == '__main__': - BIP9SoftForksTest().main() diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py deleted file mode 100755 index 3414571678..0000000000 --- a/test/functional/bipdersig-p2p.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP66 (DER SIG). - -Test that the DERSIG soft-fork activates at (regtest) height 1251. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript -from io import BytesIO - -DERSIG_HEIGHT = 1251 - -# Reject codes that we might receive in this test -REJECT_INVALID = 16 -REJECT_OBSOLETE = 17 -REJECT_NONSTANDARD = 64 - -# A canonical signature consists of: -# <30> <02> <02> -def unDERify(tx): - """ - Make the signature in vin 0 of a tx non-DER-compliant, - by adding padding after the S-value. - """ - scriptSig = CScript(tx.vin[0].scriptSig) - newscript = [] - for i in scriptSig: - if (len(newscript) == 0): - newscript.append(i[0:-1] + b'\0' + i[-1:]) - else: - newscript.append(i) - tx.vin[0].scriptSig = CScript(newscript) - -def create_transaction(node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) - return tx - -class BIP66Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # wait_for_verack ensures that the P2P connection is fully up. - self.nodes[0].p2p.wait_for_verack() - - self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) - self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) - self.nodeaddress = self.nodes[0].getnewaddress() - - self.log.info("Test that a transaction with non-DER signature can still appear in a block") - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], - self.nodeaddress, 1.0) - unDERify(spendtx) - spendtx.rehash() - - tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) - block.nVersion = 2 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - self.log.info("Test that blocks must now be at least version 3") - tip = block.sha256 - block_time += 1 - block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) - block.nVersion = 2 - block.rehash() - block.solve() - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - del self.nodes[0].p2p.last_message["reject"] - - self.log.info("Test that transactions with non-DER signatures cannot appear in a block") - block.nVersion = 3 - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], - self.nodeaddress, 1.0) - unDERify(spendtx) - spendtx.rehash() - - # First we show that this tx is valid except for DERSIG by getting it - # accepted to the mempool (which we can achieve with - # -promiscuousmempoolflags). - self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) - assert spendtx.hash in self.nodes[0].getrawmempool() - - # Now we verify that a block with this transaction is invalid. - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - # We can receive different reject messages depending on whether - # bitcoind is running with multiple script check threads. If script - # check threads are not in use, then transaction script validation - # happens sequentially, and bitcoind produces more specific reject - # reasons. - assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: - # Generic rejection when a block is invalid - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') - else: - assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason - - self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") - block.vtx[1] = create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) - -if __name__ == '__main__': - BIP66Test().main() diff --git a/test/functional/conf_args.py b/test/functional/conf_args.py deleted file mode 100755 index 61abba8082..0000000000 --- a/test/functional/conf_args.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various command line arguments and configuration file parameters.""" - -import os - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import get_datadir_path - -class ConfArgsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def run_test(self): - self.stop_node(0) - # Remove the -datadir argument so it doesn't override the config file - self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")] - - default_data_dir = get_datadir_path(self.options.tmpdir, 0) - new_data_dir = os.path.join(default_data_dir, 'newdatadir') - new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') - - # Check that using -datadir argument on non-existent directory fails - self.nodes[0].datadir = new_data_dir - self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') - - # Check that using non-existent datadir in conf file fails - conf_file = os.path.join(default_data_dir, "bitcoin.conf") - with open(conf_file, 'a', encoding='utf8') as f: - f.write("datadir=" + new_data_dir + "\n") - self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') - - # Create the directory and ensure the config file now works - os.mkdir(new_data_dir) - self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) - self.stop_node(0) - assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1')) - - # Ensure command line argument overrides datadir in conf - os.mkdir(new_data_dir_2) - self.nodes[0].datadir = new_data_dir_2 - self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) - assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2')) - -if __name__ == '__main__': - ConfArgsTest().main() diff --git a/test/functional/dbcrash.py b/test/functional/dbcrash.py deleted file mode 100755 index 24b9765b4e..0000000000 --- a/test/functional/dbcrash.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test recovery from a crash during chainstate writing. - -- 4 nodes - * node0, node1, and node2 will have different dbcrash ratios, and different - dbcache sizes - * node3 will be a regular node, with no crashing. - * The nodes will not connect to each other. - -- use default test framework starting chain. initialize starting_tip_height to - tip height. - -- Main loop: - * generate lots of transactions on node3, enough to fill up a block. - * uniformly randomly pick a tip height from starting_tip_height to - tip_height; with probability 1/(height_difference+4), invalidate this block. - * mine enough blocks to overtake tip_height at start of loop. - * for each node in [node0,node1,node2]: - - for each mined block: - * submit block to node - * if node crashed on/after submitting: - - restart until recovery succeeds - - check that utxo matches node3 using gettxoutsetinfo""" - -import errno -import http.client -import random -import sys -import time - -from test_framework.mininode import * -from test_framework.script import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest] -try: - HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected) -except AttributeError: - pass - -class ChainstateWriteCrashTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = False - - # Set -maxmempool=0 to turn off mempool memory sharing with dbcache - # Set -rpcservertimeout=900 to reduce socket disconnects in this - # long-running test - self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"] - - # Set different crash ratios and cache sizes. Note that not all of - # -dbcache goes to pcoinsTip. - self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args - self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args - self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args - - # Node3 is a normal node with default args, except will mine full blocks - self.node3_args = ["-blockmaxweight=4000000"] - self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] - - def setup_network(self): - # Need a bit of extra time for the nodes to start up for this test - self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90) - self.start_nodes() - # Leave them unconnected, we'll use submitblock directly in this test - - def restart_node(self, node_index, expected_tip): - """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. - - Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up - after 60 seconds. Returns the utxo hash of the given node.""" - - time_start = time.time() - while time.time() - time_start < 120: - try: - # Any of these RPC calls could throw due to node crash - self.start_node(node_index) - self.nodes[node_index].waitforblock(expected_tip) - utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2'] - return utxo_hash - except: - # An exception here should mean the node is about to crash. - # If bitcoind exits, then try again. wait_for_node_exit() - # should raise an exception if bitcoind doesn't exit. - self.wait_for_node_exit(node_index, timeout=10) - self.crashed_on_restart += 1 - time.sleep(1) - - # If we got here, bitcoind isn't coming back up on restart. Could be a - # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- - # perhaps we generated a test case that blew up our cache? - # TODO: If this happens a lot, we should try to restart without -dbcrashratio - # and make sure that recovery happens. - raise AssertionError("Unable to successfully restart node %d in allotted time", node_index) - - def submit_block_catch_error(self, node_index, block): - """Try submitting a block to the given node. - - Catch any exceptions that indicate the node has crashed. - Returns true if the block was submitted successfully; false otherwise.""" - - try: - self.nodes[node_index].submitblock(block) - return True - except http.client.BadStatusLine as e: - # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. - if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": - self.log.debug("node %d submitblock raised exception: %s", node_index, e) - return False - else: - raise - except tuple(HTTP_DISCONNECT_ERRORS) as e: - self.log.debug("node %d submitblock raised exception: %s", node_index, e) - return False - except OSError as e: - self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) - if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: - # The node has likely crashed - return False - else: - # Unexpected exception, raise - raise - - def sync_node3blocks(self, block_hashes): - """Use submitblock to sync node3's chain with the other nodes - - If submitblock fails, restart the node and get the new utxo hash. - If any nodes crash while updating, we'll compare utxo hashes to - ensure recovery was successful.""" - - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] - - # Retrieve all the blocks from node3 - blocks = [] - for block_hash in block_hashes: - blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)]) - - # Deliver each block to each other node - for i in range(3): - nodei_utxo_hash = None - self.log.debug("Syncing blocks to node %d", i) - for (block_hash, block) in blocks: - # Get the block from node3, and submit to node_i - self.log.debug("submitting block %s", block_hash) - if not self.submit_block_catch_error(i, block): - # TODO: more carefully check that the crash is due to -dbcrashratio - # (change the exit code perhaps, and check that here?) - self.wait_for_node_exit(i, timeout=30) - self.log.debug("Restarting node %d after block hash %s", i, block_hash) - nodei_utxo_hash = self.restart_node(i, block_hash) - assert nodei_utxo_hash is not None - self.restart_counts[i] += 1 - else: - # Clear it out after successful submitblock calls -- the cached - # utxo hash will no longer be correct - nodei_utxo_hash = None - - # Check that the utxo hash matches node3's utxo set - # NOTE: we only check the utxo set if we had to restart the node - # after the last block submitted: - # - checking the utxo hash causes a cache flush, which we don't - # want to do every time; so - # - we only update the utxo cache after a node restart, since flushing - # the cache is a no-op at that point - if nodei_utxo_hash is not None: - self.log.debug("Checking txoutsetinfo matches for node %d", i) - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def verify_utxo_hash(self): - """Verify that the utxo hash of each node matches node3. - - Restart any nodes that crash while querying.""" - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] - self.log.info("Verifying utxo hash matches for all nodes") - - for i in range(3): - try: - nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2'] - except OSError: - # probably a crash on db flushing - nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def generate_small_transactions(self, node, count, utxo_list): - FEE = 1000 # TODO: replace this with node relay fee based calculation - num_transactions = 0 - random.shuffle(utxo_list) - while len(utxo_list) >= 2 and num_transactions < count: - tx = CTransaction() - input_amount = 0 - for i in range(2): - utxo = utxo_list.pop() - tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) - input_amount += int(utxo['amount'] * COIN) - output_amount = (input_amount - FEE) // 3 - - if output_amount <= 0: - # Sanity check -- if we chose inputs that are too small, skip - continue - - for i in range(3): - tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) - - # Sign and send the transaction to get into the mempool - tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] - node.sendrawtransaction(tx_signed_hex) - num_transactions += 1 - - def run_test(self): - # Track test coverage statistics - self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 - self.crashed_on_restart = 0 # Track count of crashes during recovery - - # Start by creating a lot of utxos on node3 - initial_height = self.nodes[3].getblockcount() - utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000) - self.log.info("Prepped %d utxo entries", len(utxo_list)) - - # Sync these blocks with the other nodes - block_hashes_to_sync = [] - for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): - block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - - self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync)) - # Syncing the blocks could cause nodes to crash, so the test begins here. - self.sync_node3blocks(block_hashes_to_sync) - - starting_tip_height = self.nodes[3].getblockcount() - - # Main test loop: - # each time through the loop, generate a bunch of transactions, - # and then either mine a single new block on the tip, or some-sized reorg. - for i in range(40): - self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts) - # Generate a bunch of small-ish transactions - self.generate_small_transactions(self.nodes[3], 2500, utxo_list) - # Pick a random block between current tip, and starting tip - current_height = self.nodes[3].getblockcount() - random_height = random.randint(starting_tip_height, current_height) - self.log.debug("At height %d, considering height %d", current_height, random_height) - if random_height > starting_tip_height: - # Randomly reorg from this point with some probability (1/4 for - # tip, 1/5 for tip-1, ...) - if random.random() < 1.0 / (current_height + 4 - random_height): - self.log.debug("Invalidating block at height %d", random_height) - self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) - - # Now generate new blocks until we pass the old tip height - self.log.debug("Mining longer tip") - block_hashes = [] - while current_height + 1 > self.nodes[3].getblockcount(): - block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount()))) - self.log.debug("Syncing %d new blocks...", len(block_hashes)) - self.sync_node3blocks(block_hashes) - utxo_list = self.nodes[3].listunspent() - self.log.debug("Node3 utxo count: %d", len(utxo_list)) - - # Check that the utxo hashes agree with node3 - # Useful side effect: each utxo cache gets flushed here, so that we - # won't get crashes on shutdown at the end of the test. - self.verify_utxo_hash() - - # Check the test coverage - self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart) - - # If no nodes were restarted, we didn't test anything. - assert self.restart_counts != [0, 0, 0] - - # Make sure we tested the case of crash-during-recovery. - assert self.crashed_on_restart > 0 - - # Warn if any of the nodes escaped restart. - for i in range(3): - if self.restart_counts[i] == 0: - self.log.warn("Node %d never crashed during utxo flush!", i) - -if __name__ == "__main__": - ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py new file mode 100755 index 0000000000..5a09142412 --- /dev/null +++ b/test/functional/feature_assumevalid.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test logic for skipping signature validation on old blocks. + +Test logic for skipping signature validation on blocks which we've assumed +valid (https://github.com/bitcoin/bitcoin/pull/9484) + +We build a chain that includes and invalid signature for one of the +transactions: + + 0: genesis block + 1: block 1 with coinbase transaction output. + 2-101: bury that block with 100 blocks so the coinbase transaction + output can be spent + 102: a block containing a transaction spending the coinbase + transaction output. The transaction has an invalid signature. + 103-2202: bury the bad block with just over two weeks' worth of blocks + (2100 blocks) + +Start three nodes: + + - node0 has no -assumevalid parameter. Try to sync to block 2202. It will + reject block 102 and only sync as far as block 101 + - node1 has -assumevalid set to the hash of block 102. Try to sync to + block 2202. node1 will sync all the way to block 2202. + - node2 has -assumevalid set to the hash of block 102. Try to sync to + block 200. node2 will reject block 102 since it's assumed valid, but it + isn't buried by at least two weeks' work. +""" +import time + +from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.key import CECKey +from test_framework.mininode import (CBlockHeader, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + network_thread_join, + network_thread_start, + P2PInterface, + msg_block, + msg_headers) +from test_framework.script import (CScript, OP_TRUE) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class BaseNode(P2PInterface): + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [CBlockHeader(b) for b in new_blocks] + self.send_message(headers_message) + +class AssumeValidTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + def setup_network(self): + self.add_nodes(3) + # Start node0. We don't start the other nodes yet since + # we need to pre-mine a block with an invalid transaction + # signature so we can pass in the block hash as assumevalid. + self.start_node(0) + + def send_blocks_until_disconnected(self, p2p_conn): + """Keep sending blocks to the node until we're disconnected.""" + for i in range(len(self.blocks)): + if p2p_conn.state != "connected": + break + try: + p2p_conn.send_message(msg_block(self.blocks[i])) + except IOError as e: + assert str(e) == 'Not connected, no pushbuf' + break + + def assert_blockchain_height(self, node, height): + """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" + last_height = node.getblock(node.getbestblockhash())['height'] + timeout = 10 + while True: + time.sleep(0.25) + current_height = node.getblock(node.getbestblockhash())['height'] + if current_height != last_height: + last_height = current_height + if timeout < 0: + assert False, "blockchain too short after timeout: %d" % current_height + timeout - 0.25 + continue + elif current_height > height: + assert False, "blockchain too long: %d" % current_height + elif current_height == height: + break + + def run_test(self): + + # Connect to node0 + p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) + + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + # Build the blockchain + self.tip = int(self.nodes[0].getbestblockhash(), 16) + self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 + + self.blocks = [] + + # Get a pubkey for the coinbase TXO + coinbase_key = CECKey() + coinbase_key.set_secretbytes(b"horsebattery") + coinbase_pubkey = coinbase_key.get_pubkey() + + # Create the first block with a coinbase output to our key + height = 1 + block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) + self.blocks.append(block) + self.block_time += 1 + block.solve() + # Save the coinbase for later + self.block1 = block + self.tip = block.sha256 + height += 1 + + # Bury the block 100 deep so the coinbase output is spendable + for i in range(100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # Create a transaction spending the coinbase output with an invalid (null) signature + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) + tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) + tx.calc_sha256() + + block102 = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block102.vtx.extend([tx]) + block102.hashMerkleRoot = block102.calc_merkle_root() + block102.rehash() + block102.solve() + self.blocks.append(block102) + self.tip = block102.sha256 + self.block_time += 1 + height += 1 + + # Bury the assumed valid block 2100 deep + for i in range(2100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.nVersion = 4 + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # We're adding new connections so terminate the network thread + self.nodes[0].disconnect_p2ps() + network_thread_join() + + # Start node1 and node2 with assumevalid so they accept a block with a bad signature. + self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) + self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) + + p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) + p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) + p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) + + network_thread_start() + + p2p0.wait_for_verack() + p2p1.wait_for_verack() + p2p2.wait_for_verack() + + # send header lists to all three nodes + p2p0.send_header_for_blocks(self.blocks[0:2000]) + p2p0.send_header_for_blocks(self.blocks[2000:]) + p2p1.send_header_for_blocks(self.blocks[0:2000]) + p2p1.send_header_for_blocks(self.blocks[2000:]) + p2p2.send_header_for_blocks(self.blocks[0:200]) + + # Send blocks to node0. Block 102 will be rejected. + self.send_blocks_until_disconnected(p2p0) + self.assert_blockchain_height(self.nodes[0], 101) + + # Send all blocks to node1. All blocks will be accepted. + for i in range(2202): + p2p1.send_message(msg_block(self.blocks[i])) + # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. + p2p1.sync_with_ping(120) + assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) + + # Send blocks to node2. Block 102 will be rejected. + self.send_blocks_until_disconnected(p2p2) + self.assert_blockchain_height(self.nodes[2], 101) + +if __name__ == '__main__': + AssumeValidTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py new file mode 100755 index 0000000000..94b13653b9 --- /dev/null +++ b/test/functional/feature_bip68_sequence.py @@ -0,0 +1,395 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP68 implementation.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.blocktools import * + +SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) +SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) +SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift +SEQUENCE_LOCKTIME_MASK = 0x0000ffff + +# RPC error for non-BIP68 final transactions +NOT_FINAL_ERROR = "64: non-BIP68-final" + +class BIP68Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [[], ["-acceptnonstdtxn=0"]] + + def run_test(self): + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + + # Generate some coins + self.nodes[0].generate(110) + + self.log.info("Running test disable flag") + self.test_disable_flag() + + self.log.info("Running test sequence-lock-confirmed-inputs") + self.test_sequence_lock_confirmed_inputs() + + self.log.info("Running test sequence-lock-unconfirmed-inputs") + self.test_sequence_lock_unconfirmed_inputs() + + self.log.info("Running test BIP68 not consensus before versionbits activation") + self.test_bip68_not_consensus() + + self.log.info("Activating BIP68 (and 112/113)") + self.activateCSV() + + self.log.info("Verifying nVersion=2 transactions are standard.") + self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).") + self.test_version2_relay() + + self.log.info("Passed") + + # Test that BIP68 is not in effect if tx version is 1, or if + # the first sequence bit is set. + def test_disable_flag(self): + # Create some unconfirmed inputs + new_addr = self.nodes[0].getnewaddress() + self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC + + utxos = self.nodes[0].listunspent(0, 0) + assert(len(utxos) > 0) + + utxo = utxos[0] + + tx1 = CTransaction() + value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN) + + # Check that the disable flag disables relative locktime. + # If sequence locks were used, this would require 1 block for the + # input to mature. + sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 + tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] + tx1.vout = [CTxOut(value, CScript([b'a']))] + + tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] + tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) + tx1_id = int(tx1_id, 16) + + # This transaction will enable sequence-locks, so this transaction should + # fail + tx2 = CTransaction() + tx2.nVersion = 2 + sequence_value = sequence_value & 0x7fffffff + tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] + tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] + tx2.rehash() + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) + + # Setting the version back down to 1 should disable the sequence lock, + # so this should be accepted. + tx2.nVersion = 1 + + self.nodes[0].sendrawtransaction(ToHex(tx2)) + + # Calculate the median time past of a prior block ("confirmations" before + # the current tip). + def get_median_time_past(self, confirmations): + block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations) + return self.nodes[0].getblockheader(block_hash)["mediantime"] + + # Test that sequence locks are respected for transactions spending confirmed inputs. + def test_sequence_lock_confirmed_inputs(self): + # Create lots of confirmed utxos, and use them to generate lots of random + # transactions. + max_outputs = 50 + addresses = [] + while len(addresses) < max_outputs: + addresses.append(self.nodes[0].getnewaddress()) + while len(self.nodes[0].listunspent()) < 200: + import random + random.shuffle(addresses) + num_outputs = random.randint(1, max_outputs) + outputs = {} + for i in range(num_outputs): + outputs[addresses[i]] = random.randint(1, 20)*0.01 + self.nodes[0].sendmany("", outputs) + self.nodes[0].generate(1) + + utxos = self.nodes[0].listunspent() + + # Try creating a lot of random transactions. + # Each time, choose a random number of inputs, and randomly set + # some of those inputs to be sequence locked (and randomly choose + # between height/time locking). Small random chance of making the locks + # all pass. + for i in range(400): + # Randomly choose up to 10 inputs + num_inputs = random.randint(1, 10) + random.shuffle(utxos) + + # Track whether any sequence locks used should fail + should_pass = True + + # Track whether this transaction was built with sequence locks + using_sequence_locks = False + + tx = CTransaction() + tx.nVersion = 2 + value = 0 + for j in range(num_inputs): + sequence_value = 0xfffffffe # this disables sequence locks + + # 50% chance we enable sequence locks + if random.randint(0,1): + using_sequence_locks = True + + # 10% of the time, make the input sequence value pass + input_will_pass = (random.randint(1,10) == 1) + sequence_value = utxos[j]["confirmations"] + if not input_will_pass: + sequence_value += 1 + should_pass = False + + # Figure out what the median-time-past was for the confirmed input + # Note that if an input has N confirmations, we're going back N blocks + # from the tip so that we're looking up MTP of the block + # PRIOR to the one the input appears in, as per the BIP68 spec. + orig_time = self.get_median_time_past(utxos[j]["confirmations"]) + cur_time = self.get_median_time_past(0) # MTP of the tip + + # can only timelock this input if it's not too old -- otherwise use height + can_time_lock = True + if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: + can_time_lock = False + + # if time-lockable, then 50% chance we make this a time lock + if random.randint(0,1) and can_time_lock: + # Find first time-lock value that fails, or latest one that succeeds + time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY + if input_will_pass and time_delta > cur_time - orig_time: + sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + elif (not input_will_pass and time_delta <= cur_time - orig_time): + sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 + sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG + tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) + value += utxos[j]["amount"]*COIN + # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output + tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 + tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) + rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] + + if (using_sequence_locks and not should_pass): + # This transaction should be rejected + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) + else: + # This raw transaction should be accepted + self.nodes[0].sendrawtransaction(rawtx) + utxos = self.nodes[0].listunspent() + + # Test that sequence locks on unconfirmed inputs must have nSequence + # height or time of 0 to be accepted. + # Then test that BIP68-invalid transactions are removed from the mempool + # after a reorg. + def test_sequence_lock_unconfirmed_inputs(self): + # Store height so we can easily reset the chain at the end of the test + cur_height = self.nodes[0].getblockcount() + + # Create a mempool tx. + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) + tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1.rehash() + + # Anyone-can-spend mempool tx. + # Sequence lock of 0 should pass. + tx2 = CTransaction() + tx2.nVersion = 2 + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2 = FromHex(tx2, tx2_raw) + tx2.rehash() + + self.nodes[0].sendrawtransaction(tx2_raw) + + # Create a spend of the 0th output of orig_tx with a sequence lock + # of 1, and test what happens when submitting. + # orig_tx.vout[0] must be an anyone-can-spend output + def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): + sequence_value = 1 + if not use_height_lock: + sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG + + tx = CTransaction() + tx.nVersion = 2 + tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] + tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] + tx.rehash() + + if (orig_tx.hash in node.getrawmempool()): + # sendrawtransaction should fail if the tx is in the mempool + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) + else: + # sendrawtransaction should succeed if the tx is not in the mempool + node.sendrawtransaction(ToHex(tx)) + + return tx + + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + + # Now mine some blocks, but make sure tx2 doesn't get mined. + # Use prioritisetransaction to lower the effective feerate to 0 + self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) + cur_time = int(time.time()) + for i in range(10): + self.nodes[0].setmocktime(cur_time + 600) + self.nodes[0].generate(1) + cur_time += 600 + + assert(tx2.hash in self.nodes[0].getrawmempool()) + + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + + # Mine tx2, and then try again + self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN)) + + # Advance the time on the node so that we can test timelocks + self.nodes[0].setmocktime(cur_time+600) + self.nodes[0].generate(1) + assert(tx2.hash not in self.nodes[0].getrawmempool()) + + # Now that tx2 is not in the mempool, a sequence locked spend should + # succeed + tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + assert(tx3.hash in self.nodes[0].getrawmempool()) + + self.nodes[0].generate(1) + assert(tx3.hash not in self.nodes[0].getrawmempool()) + + # One more test, this time using height locks + tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) + assert(tx4.hash in self.nodes[0].getrawmempool()) + + # Now try combining confirmed and unconfirmed inputs + tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) + assert(tx5.hash not in self.nodes[0].getrawmempool()) + + utxos = self.nodes[0].listunspent() + tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) + tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) + raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) + + # Test mempool-BIP68 consistency after reorg + # + # State of the transactions in the last blocks: + # ... -> [ tx2 ] -> [ tx3 ] + # tip-1 tip + # And currently tx4 is in the mempool. + # + # If we invalidate the tip, tx3 should get added to the mempool, causing + # tx4 to be removed (fails sequence-lock). + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + assert(tx4.hash not in self.nodes[0].getrawmempool()) + assert(tx3.hash in self.nodes[0].getrawmempool()) + + # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in + # diagram above). + # This would cause tx2 to be added back to the mempool, which in turn causes + # tx3 to be removed. + tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) + height = self.nodes[0].getblockcount() + for i in range(2): + block = create_block(tip, create_coinbase(height), cur_time) + block.nVersion = 3 + block.rehash() + block.solve() + tip = block.sha256 + height += 1 + self.nodes[0].submitblock(ToHex(block)) + cur_time += 1 + + mempool = self.nodes[0].getrawmempool() + assert(tx3.hash not in mempool) + assert(tx2.hash in mempool) + + # Reset the chain and get rid of the mocktimed-blocks + self.nodes[0].setmocktime(0) + self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1)) + self.nodes[0].generate(10) + + # Make sure that BIP68 isn't being used to validate blocks, prior to + # versionbits activation. If more blocks are mined prior to this test + # being run, then it's possible the test has activated the soft fork, and + # this test should be moved to run earlier, or deleted. + def test_bip68_not_consensus(self): + assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) + + tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1.rehash() + + # Make an anyone-can-spend transaction + tx2 = CTransaction() + tx2.nVersion = 1 + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + + # sign tx2 + tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2 = FromHex(tx2, tx2_raw) + tx2.rehash() + + self.nodes[0].sendrawtransaction(ToHex(tx2)) + + # Now make an invalid spend of tx2 according to BIP68 + sequence_value = 100 # 100 block relative locktime + + tx3 = CTransaction() + tx3.nVersion = 2 + tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] + tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + tx3.rehash() + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) + + # make a block that violates bip68; ensure that the tip updates + tip = int(self.nodes[0].getbestblockhash(), 16) + block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) + block.nVersion = 3 + block.vtx.extend([tx1, tx2, tx3]) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + add_witness_commitment(block) + block.solve() + + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + def activateCSV(self): + # activation should happen at block height 432 (3 periods) + # getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block. + min_activation_height = 432 + height = self.nodes[0].getblockcount() + assert_greater_than(min_activation_height - height, 2) + self.nodes[0].generate(min_activation_height - height - 2) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in") + self.nodes[0].generate(1) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active") + sync_blocks(self.nodes) + + # Use self.nodes[1] to test that version 2 transactions are standard. + def test_version2_relay(self): + inputs = [ ] + outputs = { self.nodes[1].getnewaddress() : 1.0 } + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] + tx = FromHex(CTransaction(), rawtxfund) + tx.nVersion = 2 + tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] + self.nodes[1].sendrawtransaction(tx_signed) + +if __name__ == '__main__': + BIP68Test().main() diff --git a/test/functional/feature_bip9_softforks.py b/test/functional/feature_bip9_softforks.py new file mode 100755 index 0000000000..ae92e9f07c --- /dev/null +++ b/test/functional/feature_bip9_softforks.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP 9 soft forks. + +Connect to a single node. +regtest lock-in with 108/144 block signalling +activation after a further 144 blocks +mine 2 block and save coinbases for later use +mine 141 blocks to transition from DEFINED to STARTED +mine 100 blocks signalling readiness and 44 not in order to fail to change state this period +mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN) +mine a further 143 blocks (LOCKED_IN) +test that enforcement has not triggered (which triggers ACTIVE) +test that enforcement has triggered +""" +from io import BytesIO +import shutil +import time +import itertools + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.mininode import CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block +from test_framework.comptool import TestInstance, TestManager +from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP + +class BIP9SoftForksTest(ComparisonTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + network_thread_start() + self.test.run() + + def create_transaction(self, node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + tx.nVersion = 2 + return tx + + def sign_transaction(self, node, tx): + signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize())) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + def generate_blocks(self, number, version, test_blocks = []): + for i in range(number): + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = version + block.rehash() + block.solve() + test_blocks.append([block, True]) + self.last_block_time += 1 + self.tip = block.sha256 + self.height += 1 + return test_blocks + + def get_bip9_status(self, key): + info = self.nodes[0].getblockchaininfo() + return info['bip9_softforks'][key] + + def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): + assert_equal(self.get_bip9_status(bipName)['status'], 'defined') + assert_equal(self.get_bip9_status(bipName)['since'], 0) + + # generate some coins for later + self.coinbase_blocks = self.nodes[0].generate(2) + self.height = 3 # height of the next block to build + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.nodeaddress = self.nodes[0].getnewaddress() + self.last_block_time = int(time.time()) + + assert_equal(self.get_bip9_status(bipName)['status'], 'defined') + assert_equal(self.get_bip9_status(bipName)['since'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert(bipName not in tmpl['vbavailable']) + assert_equal(tmpl['vbrequired'], 0) + assert_equal(tmpl['version'], 0x20000000) + + # Test 1 + # Advance from DEFINED to STARTED + test_blocks = self.generate_blocks(141, 4) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bipName)['since'], 144) + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bipName], bitno) + assert_equal(tmpl['vbrequired'], 0) + assert(tmpl['version'] & activated_version) + + # Test 1-A + # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period + test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + + # Test 1-B + # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period + test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False) + + # Test 1-C + # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN + test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + + # Test 2 + # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bipName)['since'], 144) + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bipName], bitno) + assert_equal(tmpl['vbrequired'], 0) + assert(tmpl['version'] & activated_version) + + # Test 3 + # 108 out of 144 signal bit 1 to achieve LOCKED_IN + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN... + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + + # ...continue with Test 3 + test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bipName)['since'], 576) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + + # Test 4 + # 143 more version 536870913 blocks (waiting period-1) + test_blocks = self.generate_blocks(143, 4) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bipName)['since'], 576) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + + # Test 5 + # Check that the new rule is enforced + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[0], self.nodeaddress, 1.0) + invalidate(spendtx) + spendtx = self.sign_transaction(self.nodes[0], spendtx) + spendtx.rehash() + invalidatePostSignature(spendtx) + spendtx.rehash() + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = activated_version + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.last_block_time += 1 + self.tip = block.sha256 + self.height += 1 + yield TestInstance([[block, True]]) + + assert_equal(self.get_bip9_status(bipName)['status'], 'active') + assert_equal(self.get_bip9_status(bipName)['since'], 720) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName in tmpl['rules']) + assert(bipName not in tmpl['vbavailable']) + assert_equal(tmpl['vbrequired'], 0) + assert(not (tmpl['version'] & (1 << bitno))) + + # Test 6 + # Check that the new sequence lock rules are enforced + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + invalidate(spendtx) + spendtx = self.sign_transaction(self.nodes[0], spendtx) + spendtx.rehash() + invalidatePostSignature(spendtx) + spendtx.rehash() + + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = 5 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + self.last_block_time += 1 + yield TestInstance([[block, False]]) + + # Restart all + self.test.clear_all_connections() + self.stop_nodes() + self.nodes = [] + shutil.rmtree(self.options.tmpdir + "/node0") + self.setup_chain() + self.setup_network() + self.test.add_all_connections(self.nodes) + network_thread_start() + self.test.p2p_connections[0].wait_for_verack() + + def get_tests(self): + for test in itertools.chain( + self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), + self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), + self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) + ): + yield test + + def donothing(self, tx): + return + + def csv_invalidate(self, tx): + """Modify the signature in vin 0 of the tx to fail CSV + Prepends -1 CSV DROP in the scriptSig itself. + """ + tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + + list(CScript(tx.vin[0].scriptSig))) + + def sequence_lock_invalidate(self, tx): + """Modify the nSequence to make it fails once sequence lock rule is + activated (high timespan). + """ + tx.vin[0].nSequence = 0x00FFFFFF + tx.nLockTime = 0 + + def mtp_invalidate(self, tx): + """Modify the nLockTime to make it fails once MTP rule is activated.""" + # Disable Sequence lock, Activate nLockTime + tx.vin[0].nSequence = 0x90FFFFFF + tx.nLockTime = self.last_block_time + +if __name__ == '__main__': + BIP9SoftForksTest().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py new file mode 100755 index 0000000000..fe9bbda14b --- /dev/null +++ b/test/functional/feature_block.py @@ -0,0 +1,1293 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test block processing. + +This reimplements tests from the bitcoinj/FullBlockTestGenerator used +by the pull-tester. + +We use the testing framework in which we expect a particular answer from +each test. +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.mininode import network_thread_start +import struct + +class PreviousSpendableOutput(): + def __init__(self, tx = CTransaction(), n = -1): + self.tx = tx + self.n = n # the output we're spending + +# Use this class for tests that require behavior other than normal "mininode" behavior. +# For now, it is used to serialize a bloated varint (b64). +class CBrokenBlock(CBlock): + def __init__(self, header=None): + super(CBrokenBlock, self).__init__(header) + + def initialize(self, base_block): + self.vtx = copy.deepcopy(base_block.vtx) + self.hashMerkleRoot = self.calc_merkle_root() + + def serialize(self, with_witness=False): + r = b"" + r += super(CBlock, self).serialize() + r += struct.pack(" b1 (0) -> b2 (1) + block(1, spend=out[0]) + save_spendable_output() + yield accepted() + + block(2, spend=out[1]) + yield accepted() + save_spendable_output() + + # so fork like this: + # + # genesis -> b1 (0) -> b2 (1) + # \-> b3 (1) + # + # Nothing should happen at this point. We saw b2 first so it takes priority. + tip(1) + b3 = block(3, spend=out[1]) + txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) + yield rejected() + + + # Now we add another block to make the alternative chain longer. + # + # genesis -> b1 (0) -> b2 (1) + # \-> b3 (1) -> b4 (2) + block(4, spend=out[2]) + yield accepted() + + + # ... and back to the first chain. + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b3 (1) -> b4 (2) + tip(2) + block(5, spend=out[2]) + save_spendable_output() + yield rejected() + + block(6, spend=out[3]) + yield accepted() + + # Try to create a fork that double-spends + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b7 (2) -> b8 (4) + # \-> b3 (1) -> b4 (2) + tip(5) + block(7, spend=out[2]) + yield rejected() + + block(8, spend=out[4]) + yield rejected() + + # Try to create a block that has too much fee + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b9 (4) + # \-> b3 (1) -> b4 (2) + tip(6) + block(9, spend=out[4], additional_coinbase_value=1) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + # Create a fork that ends in a block with too much fee (the one that causes the reorg) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b10 (3) -> b11 (4) + # \-> b3 (1) -> b4 (2) + tip(5) + block(10, spend=out[3]) + yield rejected() + + block(11, spend=out[4], additional_coinbase_value=1) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + + # Try again, but with a valid fork first + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b14 (5) + # (b12 added last) + # \-> b3 (1) -> b4 (2) + tip(5) + b12 = block(12, spend=out[3]) + save_spendable_output() + b13 = block(13, spend=out[4]) + # Deliver the block header for b12, and the block b13. + # b13 should be accepted but the tip won't advance until b12 is delivered. + yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) + + save_spendable_output() + # b14 is invalid, but the node won't know that until it tries to connect + # Tip still can't advance because b12 is missing + block(14, spend=out[5], additional_coinbase_value=1) + yield rejected() + + yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. + + # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) + # \-> b3 (1) -> b4 (2) + + # Test that a block with a lot of checksigs is okay + lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) + tip(13) + block(15, spend=out[5], script=lots_of_checksigs) + yield accepted() + save_spendable_output() + + + # Test that a block with too many checksigs is rejected + too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) + block(16, spend=out[6], script=too_many_checksigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # Attempt to spend a transaction created on a different fork + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) + # \-> b3 (1) -> b4 (2) + tip(15) + block(17, spend=txout_b3) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Attempt to spend a transaction created on a different fork (on a fork this time) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) + # \-> b18 (b3.vtx[1]) -> b19 (6) + # \-> b3 (1) -> b4 (2) + tip(13) + block(18, spend=txout_b3) + yield rejected() + + block(19, spend=out[6]) + yield rejected() + + # Attempt to spend a coinbase at depth too low + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) + # \-> b3 (1) -> b4 (2) + tip(15) + block(20, spend=out[7]) + yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) + + # Attempt to spend a coinbase at depth too low (on a fork this time) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) + # \-> b21 (6) -> b22 (5) + # \-> b3 (1) -> b4 (2) + tip(13) + block(21, spend=out[6]) + yield rejected() + + block(22, spend=out[5]) + yield rejected() + + # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) + # \-> b24 (6) -> b25 (7) + # \-> b3 (1) -> b4 (2) + tip(15) + b23 = block(23, spend=out[6]) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) + b23 = update_block(23, [tx]) + # Make sure the math above worked out to produce a max-sized block + assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) + yield accepted() + save_spendable_output() + + # Make the next block one byte bigger and check that it fails + tip(15) + b24 = block(24, spend=out[6]) + script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 + script_output = CScript([b'\x00' * (script_length+1)]) + tx.vout = [CTxOut(0, script_output)] + b24 = update_block(24, [tx]) + assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) + yield rejected(RejectResult(16, b'bad-blk-length')) + + block(25, spend=out[7]) + yield rejected() + + # Create blocks with a coinbase input script size out of range + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) + # \-> ... (6) -> ... (7) + # \-> b3 (1) -> b4 (2) + tip(15) + b26 = block(26, spend=out[6]) + b26.vtx[0].vin[0].scriptSig = b'\x00' + b26.vtx[0].rehash() + # update_block causes the merkle root to get updated, even with no new + # transactions, and updates the required state. + b26 = update_block(26, []) + yield rejected(RejectResult(16, b'bad-cb-length')) + + # Extend the b26 chain to make sure bitcoind isn't accepting b26 + block(27, spend=out[7]) + yield rejected(False) + + # Now try a too-large-coinbase script + tip(15) + b28 = block(28, spend=out[6]) + b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 + b28.vtx[0].rehash() + b28 = update_block(28, []) + yield rejected(RejectResult(16, b'bad-cb-length')) + + # Extend the b28 chain to make sure bitcoind isn't accepting b28 + block(29, spend=out[7]) + yield rejected(False) + + # b30 has a max-sized coinbase scriptSig. + tip(23) + b30 = block(30) + b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 + b30.vtx[0].rehash() + b30 = update_block(30, []) + yield accepted() + save_spendable_output() + + # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY + # + # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) + # \-> b36 (11) + # \-> b34 (10) + # \-> b32 (9) + # + + # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. + lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + b31 = block(31, spend=out[8], script=lots_of_multisigs) + assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) + yield accepted() + save_spendable_output() + + # this goes over the limit because the coinbase has one sigop + too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) + b32 = block(32, spend=out[9], script=too_many_multisigs) + assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # CHECKMULTISIGVERIFY + tip(31) + lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + block(33, spend=out[9], script=lots_of_multisigs) + yield accepted() + save_spendable_output() + + too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) + block(34, spend=out[10], script=too_many_multisigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # CHECKSIGVERIFY + tip(33) + lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) + b35 = block(35, spend=out[10], script=lots_of_checksigs) + yield accepted() + save_spendable_output() + + too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) + block(36, spend=out[11], script=too_many_checksigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # Check spending of a transaction in a block which failed to connect + # + # b6 (3) + # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) + # \-> b37 (11) + # \-> b38 (11/37) + # + + # save 37's spendable output, but then double-spend out11 to invalidate the block + tip(35) + b37 = block(37, spend=out[11]) + txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) + tx = create_and_sign_tx(out[11].tx, out[11].n, 0) + b37 = update_block(37, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid + tip(35) + block(38, spend=txout_b37) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Check P2SH SigOp counting + # + # + # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) + # \-> b40 (12) + # + # b39 - create some P2SH outputs that will require 6 sigops to spend: + # + # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG + # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL + # + tip(35) + b39 = block(39) + b39_outputs = 0 + b39_sigops_per_output = 6 + + # Build the redeem script, hash it, use hash to create the p2sh script + redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script_hash = hash160(redeem_script) + p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) + + # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE + # This must be signed because it is spending a coinbase + spend = out[11] + tx = create_tx(spend.tx, spend.n, 1, p2sh_script) + tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) + self.sign_tx(tx, spend.tx, spend.n) + tx.rehash() + b39 = update_block(39, [tx]) + b39_outputs += 1 + + # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE + tx_new = None + tx_last = tx + total_size=len(b39.serialize()) + while(total_size < MAX_BLOCK_BASE_SIZE): + tx_new = create_tx(tx_last, 1, 1, p2sh_script) + tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) + tx_new.rehash() + total_size += len(tx_new.serialize()) + if total_size >= MAX_BLOCK_BASE_SIZE: + break + b39.vtx.append(tx_new) # add tx to block + tx_last = tx_new + b39_outputs += 1 + + b39 = update_block(39, []) + yield accepted() + save_spendable_output() + + + # Test sigops in P2SH redeem scripts + # + # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. + # The first tx has one sigop and then at the end we add 2 more to put us just over the max. + # + # b41 does the same, less one, so it has the maximum sigops permitted. + # + tip(39) + b40 = block(40, spend=out[12]) + sigops = get_legacy_sigopcount_block(b40) + numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output + assert_equal(numTxes <= b39_outputs, True) + + lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) + new_txs = [] + for i in range(1, numTxes+1): + tx = CTransaction() + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + tx.vin.append(CTxIn(lastOutpoint, b'')) + # second input is corresponding P2SH output from b39 + tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) + # Note: must pass the redeem_script (not p2sh_script) to the signature hash function + (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) + sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) + scriptSig = CScript([sig, redeem_script]) + + tx.vin[1].scriptSig = scriptSig + tx.rehash() + new_txs.append(tx) + lastOutpoint = COutPoint(tx.sha256, 0) + + b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1 + tx = CTransaction() + tx.vin.append(CTxIn(lastOutpoint, b'')) + tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) + tx.rehash() + new_txs.append(tx) + update_block(40, new_txs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + # same as b40, but one less sigop + tip(39) + block(41, spend=None) + update_block(41, b40.vtx[1:-1]) + b41_sigops_to_fill = b40_sigops_to_fill - 1 + tx = CTransaction() + tx.vin.append(CTxIn(lastOutpoint, b'')) + tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) + tx.rehash() + update_block(41, [tx]) + yield accepted() + + # Fork off of b39 to create a constant base again + # + # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) + # \-> b41 (12) + # + tip(39) + block(42, spend=out[12]) + yield rejected() + save_spendable_output() + + block(43, spend=out[13]) + yield accepted() + save_spendable_output() + + + # Test a number of really invalid scenarios + # + # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) + # \-> ??? (15) + + # The next few blocks are going to be created "by hand" since they'll do funky things, such as having + # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. + height = self.block_heights[self.tip.sha256] + 1 + coinbase = create_coinbase(height, self.coinbase_pubkey) + b44 = CBlock() + b44.nTime = self.tip.nTime + 1 + b44.hashPrevBlock = self.tip.sha256 + b44.nBits = 0x207fffff + b44.vtx.append(coinbase) + b44.hashMerkleRoot = b44.calc_merkle_root() + b44.solve() + self.tip = b44 + self.block_heights[b44.sha256] = height + self.blocks[44] = b44 + yield accepted() + + # A block with a non-coinbase as the first tx + non_coinbase = create_tx(out[15].tx, out[15].n, 1) + b45 = CBlock() + b45.nTime = self.tip.nTime + 1 + b45.hashPrevBlock = self.tip.sha256 + b45.nBits = 0x207fffff + b45.vtx.append(non_coinbase) + b45.hashMerkleRoot = b45.calc_merkle_root() + b45.calc_sha256() + b45.solve() + self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 + self.tip = b45 + self.blocks[45] = b45 + yield rejected(RejectResult(16, b'bad-cb-missing')) + + # A block with no txns + tip(44) + b46 = CBlock() + b46.nTime = b44.nTime+1 + b46.hashPrevBlock = b44.sha256 + b46.nBits = 0x207fffff + b46.vtx = [] + b46.hashMerkleRoot = 0 + b46.solve() + self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 + self.tip = b46 + assert 46 not in self.blocks + self.blocks[46] = b46 + s = ser_uint256(b46.hashMerkleRoot) + yield rejected(RejectResult(16, b'bad-blk-length')) + + # A block with invalid work + tip(44) + b47 = block(47, solve=False) + target = uint256_from_compact(b47.nBits) + while b47.sha256 < target: #changed > to < + b47.nNonce += 1 + b47.rehash() + yield rejected(RejectResult(16, b'high-hash')) + + # A block with timestamp > 2 hrs in the future + tip(44) + b48 = block(48, solve=False) + b48.nTime = int(time.time()) + 60 * 60 * 3 + b48.solve() + yield rejected(RejectResult(16, b'time-too-new')) + + # A block with an invalid merkle hash + tip(44) + b49 = block(49) + b49.hashMerkleRoot += 1 + b49.solve() + yield rejected(RejectResult(16, b'bad-txnmrklroot')) + + # A block with an incorrect POW limit + tip(44) + b50 = block(50) + b50.nBits = b50.nBits - 1 + b50.solve() + yield rejected(RejectResult(16, b'bad-diffbits')) + + # A block with two coinbase txns + tip(44) + b51 = block(51) + cb2 = create_coinbase(51, self.coinbase_pubkey) + b51 = update_block(51, [cb2]) + yield rejected(RejectResult(16, b'bad-cb-multiple')) + + # A block w/ duplicate txns + # Note: txns have to be in the right position in the merkle tree to trigger this error + tip(44) + b52 = block(52, spend=out[15]) + tx = create_tx(b52.vtx[1], 0, 1) + b52 = update_block(52, [tx, tx]) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + # Test block timestamps + # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) + # \-> b54 (15) + # + tip(43) + block(53, spend=out[14]) + yield rejected() # rejected since b44 is at same height + save_spendable_output() + + # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) + b54 = block(54, spend=out[15]) + b54.nTime = b35.nTime - 1 + b54.solve() + yield rejected(RejectResult(16, b'time-too-old')) + + # valid timestamp + tip(53) + b55 = block(55, spend=out[15]) + b55.nTime = b35.nTime + update_block(55, []) + yield accepted() + save_spendable_output() + + + # Test CVE-2012-2459 + # + # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) + # \-> b57 (16) + # \-> b56p2 (16) + # \-> b56 (16) + # + # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without + # affecting the merkle root of a block, while still invalidating it. + # See: src/consensus/merkle.h + # + # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. + # Result: OK + # + # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle + # root but duplicate transactions. + # Result: Fails + # + # b57p2 has six transactions in its merkle tree: + # - coinbase, tx, tx1, tx2, tx3, tx4 + # Merkle root calculation will duplicate as necessary. + # Result: OK. + # + # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches + # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates + # that the error was caught early, avoiding a DOS vulnerability.) + + # b57 - a good block with 2 txs, don't submit until end + tip(55) + b57 = block(57) + tx = create_and_sign_tx(out[16].tx, out[16].n, 1) + tx1 = create_tx(tx, 0, 1) + b57 = update_block(57, [tx, tx1]) + + # b56 - copy b57, add a duplicate tx + tip(55) + b56 = copy.deepcopy(b57) + self.blocks[56] = b56 + assert_equal(len(b56.vtx),3) + b56 = update_block(56, [tx1]) + assert_equal(b56.hash, b57.hash) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + # b57p2 - a good block with 6 tx'es, don't submit until end + tip(55) + b57p2 = block("57p2") + tx = create_and_sign_tx(out[16].tx, out[16].n, 1) + tx1 = create_tx(tx, 0, 1) + tx2 = create_tx(tx1, 0, 1) + tx3 = create_tx(tx2, 0, 1) + tx4 = create_tx(tx3, 0, 1) + b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) + + # b56p2 - copy b57p2, duplicate two non-consecutive tx's + tip(55) + b56p2 = copy.deepcopy(b57p2) + self.blocks["b56p2"] = b56p2 + assert_equal(b56p2.hash, b57p2.hash) + assert_equal(len(b56p2.vtx),6) + b56p2 = update_block("b56p2", [tx3, tx4]) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + tip("57p2") + yield accepted() + + tip(57) + yield rejected() #rejected because 57p2 seen first + save_spendable_output() + + # Test a few invalid tx types + # + # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> ??? (17) + # + + # tx with prevout.n out of range + tip(57) + b58 = block(58, spend=out[17]) + tx = CTransaction() + assert(len(out[17].tx.vout) < 42) + tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) + tx.vout.append(CTxOut(0, b"")) + tx.calc_sha256() + b58 = update_block(58, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # tx with output value > input value out of range + tip(57) + b59 = block(59) + tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) + b59 = update_block(59, [tx]) + yield rejected(RejectResult(16, b'bad-txns-in-belowout')) + + # reset to good chain + tip(57) + b60 = block(60, spend=out[17]) + yield accepted() + save_spendable_output() + + # Test BIP30 + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b61 (18) + # + # Blocks are not allowed to contain a transaction whose id matches that of an earlier, + # not-fully-spent transaction in the same chain. To test, make identical coinbases; + # the second one should be rejected. + # + tip(60) + b61 = block(61, spend=out[18]) + b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases + b61.vtx[0].rehash() + b61 = update_block(61, []) + assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) + yield rejected(RejectResult(16, b'bad-txns-BIP30')) + + + # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b62 (18) + # + tip(60) + b62 = block(62) + tx = CTransaction() + tx.nLockTime = 0xffffffff #this locktime is non-final + assert(out[18].n < len(out[18].tx.vout)) + tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + assert(tx.vin[0].nSequence < 0xffffffff) + tx.calc_sha256() + b62 = update_block(62, [tx]) + yield rejected(RejectResult(16, b'bad-txns-nonfinal')) + + + # Test a non-final coinbase is also rejected + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b63 (-) + # + tip(60) + b63 = block(63) + b63.vtx[0].nLockTime = 0xffffffff + b63.vtx[0].vin[0].nSequence = 0xDEADBEEF + b63.vtx[0].rehash() + b63 = update_block(63, []) + yield rejected(RejectResult(16, b'bad-txns-nonfinal')) + + + # This checks that a block with a bloated VARINT between the block_header and the array of tx such that + # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, + # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not + # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. + # + # What matters is that the receiving node should not reject the bloated block, and then reject the canonical + # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) + # \ + # b64a (18) + # b64a is a bloated block (non-canonical varint) + # b64 is a good block (same as b64 but w/ canonical varint) + # + tip(60) + regular_block = block("64a", spend=out[18]) + + # make it a "broken_block," with non-canonical serialization + b64a = CBrokenBlock(regular_block) + b64a.initialize(regular_block) + self.blocks["64a"] = b64a + self.tip = b64a + tx = CTransaction() + + # use canonical serialization to calculate size + script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) + b64a = update_block("64a", [tx]) + assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) + yield TestInstance([[self.tip, None]]) + + # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore + self.test.block_store.erase(b64a.sha256) + + tip(60) + b64 = CBlock(b64a) + b64.vtx = copy.deepcopy(b64a.vtx) + assert_equal(b64.hash, b64a.hash) + assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) + self.blocks[64] = b64 + update_block(64, []) + yield accepted() + save_spendable_output() + + # Spend an output created in the block itself + # + # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # + tip(64) + block(65) + tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 0) + update_block(65, [tx1, tx2]) + yield accepted() + save_spendable_output() + + # Attempt to spend an output created later in the same block + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # \-> b66 (20) + tip(65) + block(66) + tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 1) + update_block(66, [tx2, tx1]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Attempt to double-spend a transaction created in a block + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # \-> b67 (20) + # + # + tip(65) + block(67) + tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 1) + tx3 = create_and_sign_tx(tx1, 0, 2) + update_block(67, [tx1, tx2, tx3]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # More tests of block subsidy + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) + # \-> b68 (20) + # + # b68 - coinbase with an extra 10 satoshis, + # creates a tx that has 9 satoshis from out[20] go to fees + # this fails because the coinbase is trying to claim 1 satoshi too much in fees + # + # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee + # this succeeds + # + tip(65) + block(68, additional_coinbase_value=10) + tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) + update_block(68, [tx]) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + tip(65) + b69 = block(69, additional_coinbase_value=10) + tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) + update_block(69, [tx]) + yield accepted() + save_spendable_output() + + # Test spending the outpoint of a non-existent transaction + # + # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) + # \-> b70 (21) + # + tip(69) + block(70, spend=out[21]) + bogus_tx = CTransaction() + bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) + tx.vout.append(CTxOut(1, b"")) + update_block(70, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + + # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) + # + # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) + # \-> b71 (21) + # + # b72 is a good block. + # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. + # + tip(69) + b72 = block(72) + tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) + tx2 = create_and_sign_tx(tx1, 0, 1) + b72 = update_block(72, [tx1, tx2]) # now tip is 72 + b71 = copy.deepcopy(b72) + b71.vtx.append(tx2) # add duplicate tx2 + self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 + self.blocks[71] = b71 + + assert_equal(len(b71.vtx), 4) + assert_equal(len(b72.vtx), 3) + assert_equal(b72.sha256, b71.sha256) + + tip(71) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + tip(72) + yield accepted() + save_spendable_output() + + + # Test some invalid scripts and MAX_BLOCK_SIGOPS + # + # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) + # \-> b** (22) + # + + # b73 - tx with excessive sigops that are placed after an excessively large script element. + # The purpose of the test is to make sure those sigops are counted. + # + # script is a bytearray of size 20,526 + # + # bytearray[0-19,998] : OP_CHECKSIG + # bytearray[19,999] : OP_PUSHDATA4 + # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) + # bytearray[20,004-20,525]: unread data (script_element) + # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) + # + tip(72) + b73 = block(73) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 + + element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 + a[MAX_BLOCK_SIGOPS] = element_size % 256 + a[MAX_BLOCK_SIGOPS+1] = element_size // 256 + a[MAX_BLOCK_SIGOPS+2] = 0 + a[MAX_BLOCK_SIGOPS+3] = 0 + + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b73 = update_block(73, [tx]) + assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + # b74/75 - if we push an invalid script element, all prevous sigops are counted, + # but sigops after the element are not counted. + # + # The invalid script element is that the push_data indicates that + # there will be a large amount of data (0xffffff bytes), but we only + # provide a much smaller number. These bytes are CHECKSIGS so they would + # cause b75 to fail for excessive sigops, if those bytes were counted. + # + # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element + # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element + # + # + tip(72) + b74 = block(74) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS] = 0x4e + a[MAX_BLOCK_SIGOPS+1] = 0xfe + a[MAX_BLOCK_SIGOPS+2] = 0xff + a[MAX_BLOCK_SIGOPS+3] = 0xff + a[MAX_BLOCK_SIGOPS+4] = 0xff + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b74 = update_block(74, [tx]) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + tip(72) + b75 = block(75) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS-1] = 0x4e + a[MAX_BLOCK_SIGOPS] = 0xff + a[MAX_BLOCK_SIGOPS+1] = 0xff + a[MAX_BLOCK_SIGOPS+2] = 0xff + a[MAX_BLOCK_SIGOPS+3] = 0xff + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b75 = update_block(75, [tx]) + yield accepted() + save_spendable_output() + + # Check that if we push an element filled with CHECKSIGs, they are not counted + tip(75) + b76 = block(76) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs + tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) + b76 = update_block(76, [tx]) + yield accepted() + save_spendable_output() + + # Test transaction resurrection + # + # -> b77 (24) -> b78 (25) -> b79 (26) + # \-> b80 (25) -> b81 (26) -> b82 (27) + # + # b78 creates a tx, which is spent in b79. After b82, both should be in mempool + # + # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the + # rather obscure reason that the Python signature code does not distinguish between + # Low-S and High-S values (whereas the bitcoin code has custom code which does so); + # as a result of which, the odds are 50% that the python code will use the right + # value and the transaction will be accepted into the mempool. Until we modify the + # test framework to support low-S signing, we are out of luck. + # + # To get around this issue, we construct transactions which are not signed and which + # spend to OP_TRUE. If the standard-ness rules change, this test would need to be + # updated. (Perhaps to spend to a P2SH OP_TRUE script) + # + tip(76) + block(77) + tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) + update_block(77, [tx77]) + yield accepted() + save_spendable_output() + + block(78) + tx78 = create_tx(tx77, 0, 9*COIN) + update_block(78, [tx78]) + yield accepted() + + block(79) + tx79 = create_tx(tx78, 0, 8*COIN) + update_block(79, [tx79]) + yield accepted() + + # mempool should be empty + assert_equal(len(self.nodes[0].getrawmempool()), 0) + + tip(77) + block(80, spend=out[25]) + yield rejected() + save_spendable_output() + + block(81, spend=out[26]) + yield rejected() # other chain is same length + save_spendable_output() + + block(82, spend=out[27]) + yield accepted() # now this chain is longer, triggers re-org + save_spendable_output() + + # now check that tx78 and tx79 have been put back into the peer's mempool + mempool = self.nodes[0].getrawmempool() + assert_equal(len(mempool), 2) + assert(tx78.hash in mempool) + assert(tx79.hash in mempool) + + + # Test invalid opcodes in dead execution paths. + # + # -> b81 (26) -> b82 (27) -> b83 (28) + # + block(83) + op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] + script = CScript(op_codes) + tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) + + tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) + tx2.vin[0].scriptSig = CScript([OP_FALSE]) + tx2.rehash() + + update_block(83, [tx1, tx2]) + yield accepted() + save_spendable_output() + + + # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) + # + # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) + # \-> b85 (29) -> b86 (30) \-> b89a (32) + # + # + block(84) + tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.calc_sha256() + self.sign_tx(tx1, out[29].tx, out[29].n) + tx1.rehash() + tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) + tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) + tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) + tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) + tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) + tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) + + update_block(84, [tx1,tx2,tx3,tx4,tx5]) + yield accepted() + save_spendable_output() + + tip(83) + block(85, spend=out[29]) + yield rejected() + + block(86, spend=out[30]) + yield accepted() + + tip(84) + block(87, spend=out[30]) + yield rejected() + save_spendable_output() + + block(88, spend=out[31]) + yield accepted() + save_spendable_output() + + # trying to spend the OP_RETURN output is rejected + block("89a", spend=out[32]) + tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) + update_block("89a", [tx]) + yield rejected() + + + # Test re-org of a week's worth of blocks (1088 blocks) + # This test takes a minute or two and can be accomplished in memory + # + if self.options.runbarelyexpensive: + tip(88) + LARGE_REORG_SIZE = 1088 + test1 = TestInstance(sync_every_block=False) + spend=out[32] + for i in range(89, LARGE_REORG_SIZE + 89): + b = block(i, spend) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) + b = update_block(i, [tx]) + assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) + test1.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + spend = get_spendable_output() + + yield test1 + chain1_tip = i + + # now create alt chain of same length + tip(88) + test2 = TestInstance(sync_every_block=False) + for i in range(89, LARGE_REORG_SIZE + 89): + block("alt"+str(i)) + test2.blocks_and_transactions.append([self.tip, False]) + yield test2 + + # extend alt chain to trigger re-org + block("alt" + str(chain1_tip + 1)) + yield accepted() + + # ... and re-org back to the first chain + tip(chain1_tip) + block(chain1_tip + 1) + yield rejected() + block(chain1_tip + 2) + yield accepted() + + chain1_tip += 2 + + + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py new file mode 100755 index 0000000000..f62ae31654 --- /dev/null +++ b/test/functional/feature_cltv.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP65 (CHECKLOCKTIMEVERIFY). + +Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height +1351. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import * +from test_framework.blocktools import create_coinbase, create_block +from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum +from io import BytesIO + +CLTV_HEIGHT = 1351 + +# Reject codes that we might receive in this test +REJECT_INVALID = 16 +REJECT_OBSOLETE = 17 +REJECT_NONSTANDARD = 64 + +def cltv_invalidate(tx): + '''Modify the signature in vin 0 of the tx to fail CLTV + + Prepends -1 CLTV DROP in the scriptSig itself. + + TODO: test more ways that transactions using CLTV could be invalid (eg + locktime requirements fail, sequence time requirements fail, etc). + ''' + tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + + list(CScript(tx.vin[0].scriptSig))) + +def cltv_validate(node, tx, height): + '''Modify the signature in vin 0 of the tx to pass CLTV + Prepends CLTV DROP in the scriptSig, and sets + the locktime to height''' + tx.vin[0].nSequence = 0 + tx.nLockTime = height + + # Need to re-sign, since nSequence and nLockTime changed + signed_result = node.signrawtransaction(ToHex(tx)) + new_tx = CTransaction() + new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) + + new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + + list(CScript(new_tx.vin[0].scriptSig))) + return new_tx + +def create_transaction(node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) + return tx + +class BIP65Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + + # wait_for_verack ensures that the P2P connection is fully up. + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) + self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) + self.nodeaddress = self.nodes[0].getnewaddress() + + self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block") + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], + self.nodeaddress, 1.0) + cltv_invalidate(spendtx) + spendtx.rehash() + + tip = self.nodes[0].getbestblockhash() + block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 + block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) + block.nVersion = 3 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + self.log.info("Test that blocks must now be at least version 4") + tip = block.sha256 + block_time += 1 + block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) + block.nVersion = 3 + block.solve() + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] + + self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") + block.nVersion = 4 + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], + self.nodeaddress, 1.0) + cltv_invalidate(spendtx) + spendtx.rehash() + + # First we show that this tx is valid except for CLTV by getting it + # accepted to the mempool (which we can achieve with + # -promiscuousmempoolflags). + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) + assert spendtx.hash in self.nodes[0].getrawmempool() + + # Now we verify that a block with this transaction is invalid. + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: + # Generic rejection when a block is invalid + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') + else: + assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason + + self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") + spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) + spendtx.rehash() + + block.vtx.pop(1) + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + + +if __name__ == '__main__': + BIP65Test().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py new file mode 100755 index 0000000000..61abba8082 --- /dev/null +++ b/test/functional/feature_config_args.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various command line arguments and configuration file parameters.""" + +import os + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import get_datadir_path + +class ConfArgsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + self.stop_node(0) + # Remove the -datadir argument so it doesn't override the config file + self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")] + + default_data_dir = get_datadir_path(self.options.tmpdir, 0) + new_data_dir = os.path.join(default_data_dir, 'newdatadir') + new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') + + # Check that using -datadir argument on non-existent directory fails + self.nodes[0].datadir = new_data_dir + self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') + + # Check that using non-existent datadir in conf file fails + conf_file = os.path.join(default_data_dir, "bitcoin.conf") + with open(conf_file, 'a', encoding='utf8') as f: + f.write("datadir=" + new_data_dir + "\n") + self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') + + # Create the directory and ensure the config file now works + os.mkdir(new_data_dir) + self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) + self.stop_node(0) + assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1')) + + # Ensure command line argument overrides datadir in conf + os.mkdir(new_data_dir_2) + self.nodes[0].datadir = new_data_dir_2 + self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) + assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2')) + +if __name__ == '__main__': + ConfArgsTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py new file mode 100755 index 0000000000..82aa0ff891 --- /dev/null +++ b/test/functional/feature_csv_activation.py @@ -0,0 +1,534 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test activation of the first version bits soft fork. + +This soft fork will activate the following BIPS: +BIP 68 - nSequence relative lock times +BIP 112 - CHECKSEQUENCEVERIFY +BIP 113 - MedianTimePast semantics for nLockTime + +regtest lock-in with 108/144 block signalling +activation after a further 144 blocks + +mine 82 blocks whose coinbases will be used to generate inputs for our tests +mine 61 blocks to transition from DEFINED to STARTED +mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period +mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN +mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572 +mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered +mine 1 block and test that enforcement has triggered (which triggers ACTIVE) +Test BIP 113 is enforced +Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height +Mine 1 block so next height is 581 and test BIP 68 now passes time but not height +Mine 1 block so next height is 582 and test BIP 68 now passes time and height +Test that BIP 112 is enforced + +Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates +And that after the soft fork activates transactions pass and fail as they should according to the rules. +For each BIP, transactions of versions 1 and 2 will be tested. +---------------- +BIP 113: +bip113tx - modify the nLocktime variable + +BIP 68: +bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below + +BIP 112: +bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP +bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP +bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP +bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP +bip112tx_special - test negative argument to OP_CSV +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.mininode import ToHex, CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block +from test_framework.comptool import TestInstance, TestManager +from test_framework.script import * +from io import BytesIO +import time + +base_relative_locktime = 10 +seq_disable_flag = 1<<31 +seq_random_high_bit = 1<<25 +seq_type_flag = 1<<22 +seq_random_low_bit = 1<<18 + +# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field +# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 +relative_locktimes = [] +for b31 in range(2): + b25times = [] + for b25 in range(2): + b22times = [] + for b22 in range(2): + b18times = [] + for b18 in range(2): + rlt = base_relative_locktime + if (b31): + rlt = rlt | seq_disable_flag + if (b25): + rlt = rlt | seq_random_high_bit + if (b22): + rlt = rlt | seq_type_flag + if (b18): + rlt = rlt | seq_random_low_bit + b18times.append(rlt) + b22times.append(b18times) + b25times.append(b22times) + relative_locktimes.append(b25times) + +def all_rlt_txs(txarray): + txs = [] + for b31 in range(2): + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + txs.append(txarray[b31][b25][b22][b18]) + return txs + +class BIP68_112_113Test(ComparisonTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] + + def run_test(self): + test = TestManager(self, self.options.tmpdir) + test.add_all_connections(self.nodes) + network_thread_start() + test.run() + + def send_generic_input_tx(self, node, coinbases): + amount = Decimal("49.99") + return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) + + def create_transaction(self, node, txid, to_address, amount): + inputs = [{ "txid" : txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + return tx + + def sign_transaction(self, node, unsignedtx): + rawtx = ToHex(unsignedtx) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + def generate_blocks(self, number, version, test_blocks = []): + for i in range(number): + block = self.create_test_block([], version) + test_blocks.append([block, True]) + self.last_block_time += 600 + self.tip = block.sha256 + self.tipheight += 1 + return test_blocks + + def create_test_block(self, txs, version = 536870912): + block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) + block.nVersion = version + block.vtx.extend(txs) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + return block + + def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): + txs = [] + assert(len(bip68inputs) >= 16) + i = 0 + for b31 in range(2): + b25txs = [] + for b25 in range(2): + b22txs = [] + for b22 in range(2): + b18txs = [] + for b18 in range(2): + tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) + i += 1 + tx.nVersion = txversion + tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta + b18txs.append(self.sign_transaction(self.nodes[0], tx)) + b22txs.append(b18txs) + b25txs.append(b22txs) + txs.append(b25txs) + return txs + + def create_bip112special(self, input, txversion): + tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) + tx.nVersion = txversion + signtx = self.sign_transaction(self.nodes[0], tx) + signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + return signtx + + def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): + txs = [] + assert(len(bip112inputs) >= 16) + i = 0 + for b31 in range(2): + b25txs = [] + for b25 in range(2): + b22txs = [] + for b22 in range(2): + b18txs = [] + for b18 in range(2): + tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) + i += 1 + if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + tx.vin[0].nSequence = base_relative_locktime + locktime_delta + else: # vary nSequence instead, OP_CSV is fixed + tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta + tx.nVersion = txversion + signtx = self.sign_transaction(self.nodes[0], tx) + if (varyOP_CSV): + signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + else: + signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + b18txs.append(signtx) + b22txs.append(b18txs) + b25txs.append(b22txs) + txs.append(b25txs) + return txs + + def get_tests(self): + long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future + self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time + self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs + self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time + self.tipheight = 82 # height of the next block to build + self.last_block_time = long_past_time + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.nodeaddress = self.nodes[0].getnewaddress() + + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') + test_blocks = self.generate_blocks(61, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 1 + # Advanced from DEFINED to STARTED, height = 143 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + + # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 2 + # Failed to advance past STARTED, height = 287 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + + # 108 out of 144 signal bit 0 to achieve lock-in + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 3 + # Advanced from STARTED to LOCKED_IN, height = 431 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') + + # 140 more version 4 blocks + test_blocks = self.generate_blocks(140, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 4 + + ### Inputs at height = 572 + # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) + # Note we reuse inputs for v1 and v2 txs so must test these separately + # 16 normal inputs + bip68inputs = [] + for i in range(16): + bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112basicinputs = [] + for j in range(2): + inputs = [] + for i in range(16): + inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip112basicinputs.append(inputs) + # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112diverseinputs = [] + for j in range(2): + inputs = [] + for i in range(16): + inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip112diverseinputs.append(inputs) + # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + # 1 normal input + bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + + self.nodes[0].setmocktime(self.last_block_time + 600) + inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 + self.nodes[0].setmocktime(0) + self.tip = int("0x" + inputblockhash, 0) + self.tipheight += 1 + self.last_block_time += 600 + assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) + + # 2 more version 4 blocks + test_blocks = self.generate_blocks(2, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 5 + # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') + + # Test both version 1 and version 2 transactions for all tests + # BIP113 test transaction will be modified before each use to put in appropriate block time + bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE + bip113tx_v1.nVersion = 1 + bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE + bip113tx_v2.nVersion = 2 + + # For BIP68 test all 16 relative sequence locktimes + bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) + bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) + + # For BIP112 test: + # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs + bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) + bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) + # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs + bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) + bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) + # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs + bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) + bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) + # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs + bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) + bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) + # -1 OP_CSV OP_DROP input + bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) + bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) + + + ### TESTING ### + ################################## + ### Before Soft Forks Activate ### + ################################## + # All txs should pass + ### Version 1 txs ### + success_txs = [] + # add BIP113 tx and -1 CSV tx + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + success_txs.append(bip113signed1) + success_txs.append(bip112tx_special_v1) + # add BIP 68 txs + success_txs.extend(all_rlt_txs(bip68txs_v1)) + # add BIP 112 with seq=10 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) + # try BIP 112 with seq=9 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Version 2 txs ### + success_txs = [] + # add BIP113 tx and -1 CSV tx + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + success_txs.append(bip113signed2) + success_txs.append(bip112tx_special_v2) + # add BIP 68 txs + success_txs.extend(all_rlt_txs(bip68txs_v2)) + # add BIP 112 with seq=10 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) + # try BIP 112 with seq=9 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + + # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block + test_blocks = self.generate_blocks(1, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 8 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') + + + ################################# + ### After Soft Forks Activate ### + ################################# + ### BIP 113 ### + # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + for bip113tx in [bip113signed1, bip113signed2]: + yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 + # BIP 113 tests should now pass if the locktime is < MTP + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + for bip113tx in [bip113signed1, bip113signed2]: + yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # Next block height = 580 after 4 blocks of random version + test_blocks = self.generate_blocks(4, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 13 + + ### BIP 68 ### + ### Version 1 txs ### + # All still pass + success_txs = [] + success_txs.extend(all_rlt_txs(bip68txs_v1)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Version 2 txs ### + bip68success_txs = [] + # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 + bip68timetxs = [] + for b25 in range(2): + for b18 in range(2): + bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) + for tx in bip68timetxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 + bip68heighttxs = [] + for b25 in range(2): + for b18 in range(2): + bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) + for tx in bip68heighttxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 + + # Advance one block to 581 + test_blocks = self.generate_blocks(1, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 24 + + # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 + bip68success_txs.extend(bip68timetxs) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + for tx in bip68heighttxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 + + # Advance one block to 582 + test_blocks = self.generate_blocks(1, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 30 + + # All BIP 68 txs should pass + bip68success_txs.extend(bip68heighttxs) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + + ### BIP 112 ### + ### Version 1 txs ### + # -1 OP_CSV tx should fail + yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass + success_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) + success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail + fail_txs = [] + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) + fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) + + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 + + ### Version 2 txs ### + # -1 OP_CSV tx should fail + yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) + success_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV + success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 + + yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## + # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check + fail_txs = [] + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 + + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail + fail_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 + + # If sequencelock types mismatch, tx should fail + fail_txs = [] + for b25 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence + fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 + + # Remaining txs should pass, just test masking works properly + success_txs = [] + for b25 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence + success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV + yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # Additional test, of checking that comparison of two time types works properly + time_txs = [] + for b25 in range(2): + for b18 in range(2): + tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] + tx.vin[0].nSequence = base_relative_locktime | seq_type_flag + signtx = self.sign_transaction(self.nodes[0], tx) + time_txs.append(signtx) + yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Missing aspects of test + ## Testing empty stack fails + + +if __name__ == '__main__': + BIP68_112_113Test().main() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py new file mode 100755 index 0000000000..24b9765b4e --- /dev/null +++ b/test/functional/feature_dbcrash.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test recovery from a crash during chainstate writing. + +- 4 nodes + * node0, node1, and node2 will have different dbcrash ratios, and different + dbcache sizes + * node3 will be a regular node, with no crashing. + * The nodes will not connect to each other. + +- use default test framework starting chain. initialize starting_tip_height to + tip height. + +- Main loop: + * generate lots of transactions on node3, enough to fill up a block. + * uniformly randomly pick a tip height from starting_tip_height to + tip_height; with probability 1/(height_difference+4), invalidate this block. + * mine enough blocks to overtake tip_height at start of loop. + * for each node in [node0,node1,node2]: + - for each mined block: + * submit block to node + * if node crashed on/after submitting: + - restart until recovery succeeds + - check that utxo matches node3 using gettxoutsetinfo""" + +import errno +import http.client +import random +import sys +import time + +from test_framework.mininode import * +from test_framework.script import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest] +try: + HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected) +except AttributeError: + pass + +class ChainstateWriteCrashTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = False + + # Set -maxmempool=0 to turn off mempool memory sharing with dbcache + # Set -rpcservertimeout=900 to reduce socket disconnects in this + # long-running test + self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"] + + # Set different crash ratios and cache sizes. Note that not all of + # -dbcache goes to pcoinsTip. + self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args + self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args + self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args + + # Node3 is a normal node with default args, except will mine full blocks + self.node3_args = ["-blockmaxweight=4000000"] + self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] + + def setup_network(self): + # Need a bit of extra time for the nodes to start up for this test + self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90) + self.start_nodes() + # Leave them unconnected, we'll use submitblock directly in this test + + def restart_node(self, node_index, expected_tip): + """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. + + Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up + after 60 seconds. Returns the utxo hash of the given node.""" + + time_start = time.time() + while time.time() - time_start < 120: + try: + # Any of these RPC calls could throw due to node crash + self.start_node(node_index) + self.nodes[node_index].waitforblock(expected_tip) + utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2'] + return utxo_hash + except: + # An exception here should mean the node is about to crash. + # If bitcoind exits, then try again. wait_for_node_exit() + # should raise an exception if bitcoind doesn't exit. + self.wait_for_node_exit(node_index, timeout=10) + self.crashed_on_restart += 1 + time.sleep(1) + + # If we got here, bitcoind isn't coming back up on restart. Could be a + # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- + # perhaps we generated a test case that blew up our cache? + # TODO: If this happens a lot, we should try to restart without -dbcrashratio + # and make sure that recovery happens. + raise AssertionError("Unable to successfully restart node %d in allotted time", node_index) + + def submit_block_catch_error(self, node_index, block): + """Try submitting a block to the given node. + + Catch any exceptions that indicate the node has crashed. + Returns true if the block was submitted successfully; false otherwise.""" + + try: + self.nodes[node_index].submitblock(block) + return True + except http.client.BadStatusLine as e: + # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. + if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": + self.log.debug("node %d submitblock raised exception: %s", node_index, e) + return False + else: + raise + except tuple(HTTP_DISCONNECT_ERRORS) as e: + self.log.debug("node %d submitblock raised exception: %s", node_index, e) + return False + except OSError as e: + self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) + if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: + # The node has likely crashed + return False + else: + # Unexpected exception, raise + raise + + def sync_node3blocks(self, block_hashes): + """Use submitblock to sync node3's chain with the other nodes + + If submitblock fails, restart the node and get the new utxo hash. + If any nodes crash while updating, we'll compare utxo hashes to + ensure recovery was successful.""" + + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] + + # Retrieve all the blocks from node3 + blocks = [] + for block_hash in block_hashes: + blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)]) + + # Deliver each block to each other node + for i in range(3): + nodei_utxo_hash = None + self.log.debug("Syncing blocks to node %d", i) + for (block_hash, block) in blocks: + # Get the block from node3, and submit to node_i + self.log.debug("submitting block %s", block_hash) + if not self.submit_block_catch_error(i, block): + # TODO: more carefully check that the crash is due to -dbcrashratio + # (change the exit code perhaps, and check that here?) + self.wait_for_node_exit(i, timeout=30) + self.log.debug("Restarting node %d after block hash %s", i, block_hash) + nodei_utxo_hash = self.restart_node(i, block_hash) + assert nodei_utxo_hash is not None + self.restart_counts[i] += 1 + else: + # Clear it out after successful submitblock calls -- the cached + # utxo hash will no longer be correct + nodei_utxo_hash = None + + # Check that the utxo hash matches node3's utxo set + # NOTE: we only check the utxo set if we had to restart the node + # after the last block submitted: + # - checking the utxo hash causes a cache flush, which we don't + # want to do every time; so + # - we only update the utxo cache after a node restart, since flushing + # the cache is a no-op at that point + if nodei_utxo_hash is not None: + self.log.debug("Checking txoutsetinfo matches for node %d", i) + assert_equal(nodei_utxo_hash, node3_utxo_hash) + + def verify_utxo_hash(self): + """Verify that the utxo hash of each node matches node3. + + Restart any nodes that crash while querying.""" + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] + self.log.info("Verifying utxo hash matches for all nodes") + + for i in range(3): + try: + nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2'] + except OSError: + # probably a crash on db flushing + nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) + assert_equal(nodei_utxo_hash, node3_utxo_hash) + + def generate_small_transactions(self, node, count, utxo_list): + FEE = 1000 # TODO: replace this with node relay fee based calculation + num_transactions = 0 + random.shuffle(utxo_list) + while len(utxo_list) >= 2 and num_transactions < count: + tx = CTransaction() + input_amount = 0 + for i in range(2): + utxo = utxo_list.pop() + tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) + input_amount += int(utxo['amount'] * COIN) + output_amount = (input_amount - FEE) // 3 + + if output_amount <= 0: + # Sanity check -- if we chose inputs that are too small, skip + continue + + for i in range(3): + tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) + + # Sign and send the transaction to get into the mempool + tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] + node.sendrawtransaction(tx_signed_hex) + num_transactions += 1 + + def run_test(self): + # Track test coverage statistics + self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 + self.crashed_on_restart = 0 # Track count of crashes during recovery + + # Start by creating a lot of utxos on node3 + initial_height = self.nodes[3].getblockcount() + utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000) + self.log.info("Prepped %d utxo entries", len(utxo_list)) + + # Sync these blocks with the other nodes + block_hashes_to_sync = [] + for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): + block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) + + self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync)) + # Syncing the blocks could cause nodes to crash, so the test begins here. + self.sync_node3blocks(block_hashes_to_sync) + + starting_tip_height = self.nodes[3].getblockcount() + + # Main test loop: + # each time through the loop, generate a bunch of transactions, + # and then either mine a single new block on the tip, or some-sized reorg. + for i in range(40): + self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts) + # Generate a bunch of small-ish transactions + self.generate_small_transactions(self.nodes[3], 2500, utxo_list) + # Pick a random block between current tip, and starting tip + current_height = self.nodes[3].getblockcount() + random_height = random.randint(starting_tip_height, current_height) + self.log.debug("At height %d, considering height %d", current_height, random_height) + if random_height > starting_tip_height: + # Randomly reorg from this point with some probability (1/4 for + # tip, 1/5 for tip-1, ...) + if random.random() < 1.0 / (current_height + 4 - random_height): + self.log.debug("Invalidating block at height %d", random_height) + self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) + + # Now generate new blocks until we pass the old tip height + self.log.debug("Mining longer tip") + block_hashes = [] + while current_height + 1 > self.nodes[3].getblockcount(): + block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount()))) + self.log.debug("Syncing %d new blocks...", len(block_hashes)) + self.sync_node3blocks(block_hashes) + utxo_list = self.nodes[3].listunspent() + self.log.debug("Node3 utxo count: %d", len(utxo_list)) + + # Check that the utxo hashes agree with node3 + # Useful side effect: each utxo cache gets flushed here, so that we + # won't get crashes on shutdown at the end of the test. + self.verify_utxo_hash() + + # Check the test coverage + self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart) + + # If no nodes were restarted, we didn't test anything. + assert self.restart_counts != [0, 0, 0] + + # Make sure we tested the case of crash-during-recovery. + assert self.crashed_on_restart > 0 + + # Warn if any of the nodes escaped restart. + for i in range(3): + if self.restart_counts[i] == 0: + self.log.warn("Node %d never crashed during utxo flush!", i) + +if __name__ == "__main__": + ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py new file mode 100755 index 0000000000..3414571678 --- /dev/null +++ b/test/functional/feature_dersig.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP66 (DER SIG). + +Test that the DERSIG soft-fork activates at (regtest) height 1251. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import * +from test_framework.blocktools import create_coinbase, create_block +from test_framework.script import CScript +from io import BytesIO + +DERSIG_HEIGHT = 1251 + +# Reject codes that we might receive in this test +REJECT_INVALID = 16 +REJECT_OBSOLETE = 17 +REJECT_NONSTANDARD = 64 + +# A canonical signature consists of: +# <30> <02> <02> +def unDERify(tx): + """ + Make the signature in vin 0 of a tx non-DER-compliant, + by adding padding after the S-value. + """ + scriptSig = CScript(tx.vin[0].scriptSig) + newscript = [] + for i in scriptSig: + if (len(newscript) == 0): + newscript.append(i[0:-1] + b'\0' + i[-1:]) + else: + newscript.append(i) + tx.vin[0].scriptSig = CScript(newscript) + +def create_transaction(node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) + return tx + +class BIP66Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + + # wait_for_verack ensures that the P2P connection is fully up. + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) + self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) + self.nodeaddress = self.nodes[0].getnewaddress() + + self.log.info("Test that a transaction with non-DER signature can still appear in a block") + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], + self.nodeaddress, 1.0) + unDERify(spendtx) + spendtx.rehash() + + tip = self.nodes[0].getbestblockhash() + block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 + block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) + block.nVersion = 2 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + self.log.info("Test that blocks must now be at least version 3") + tip = block.sha256 + block_time += 1 + block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) + block.nVersion = 2 + block.rehash() + block.solve() + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] + + self.log.info("Test that transactions with non-DER signatures cannot appear in a block") + block.nVersion = 3 + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], + self.nodeaddress, 1.0) + unDERify(spendtx) + spendtx.rehash() + + # First we show that this tx is valid except for DERSIG by getting it + # accepted to the mempool (which we can achieve with + # -promiscuousmempoolflags). + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) + assert spendtx.hash in self.nodes[0].getrawmempool() + + # Now we verify that a block with this transaction is invalid. + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + # We can receive different reject messages depending on whether + # bitcoind is running with multiple script check threads. If script + # check threads are not in use, then transaction script validation + # happens sequentially, and bitcoind produces more specific reject + # reasons. + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: + # Generic rejection when a block is invalid + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') + else: + assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason + + self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") + block.vtx[1] = create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + +if __name__ == '__main__': + BIP66Test().main() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py new file mode 100755 index 0000000000..68453e50f4 --- /dev/null +++ b/test/functional/feature_fee_estimation.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test fee estimation code.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN + +# Construct 2 trivial P2SH's and the ScriptSigs that spend them +# So we can create many transactions without needing to spend +# time signing. +redeem_script_1 = CScript([OP_1, OP_DROP]) +redeem_script_2 = CScript([OP_2, OP_DROP]) +P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL]) +P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL]) + +# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 +SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] + +global log + +def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): + """ + Create and send a transaction with a random fee. + The transaction pays to a trivial P2SH script, and assumes that its inputs + are of the same form. + The function takes a list of confirmed outputs and unconfirmed outputs + and attempts to use the confirmed list first for its inputs. + It adds the newly created outputs to the unconfirmed list. + Returns (raw transaction, fee) + """ + # It's best to exponentially distribute our random fees + # because the buckets are exponentially spaced. + # Exponentially distributed from 1-128 * fee_increment + rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) + # Total fee ranges from min_fee to min_fee + 127*fee_increment + fee = min_fee - fee_increment + satoshi_round(rand_fee) + tx = CTransaction() + total_in = Decimal("0.00000000") + while total_in <= (amount + fee) and len(conflist) > 0: + t = conflist.pop(0) + total_in += t["amount"] + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) + if total_in <= amount + fee: + while total_in <= (amount + fee) and len(unconflist) > 0: + t = unconflist.pop(0) + total_in += t["amount"] + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) + if total_in <= amount + fee: + raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) + tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(amount*COIN), P2SH_2)) + # These transactions don't need to be signed, but we still have to insert + # the ScriptSig that will satisfy the ScriptPubKey. + for inp in tx.vin: + inp.scriptSig = SCRIPT_SIG[inp.prevout.n] + txid = from_node.sendrawtransaction(ToHex(tx), True) + unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) + unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) + + return (ToHex(tx), fee) + +def split_inputs(from_node, txins, txouts, initial_split = False): + """ + We need to generate a lot of inputs so we can generate a ton of transactions. + This function takes an input from txins, and creates and sends a transaction + which splits the value into 2 outputs which are appended to txouts. + Previously this was designed to be small inputs so they wouldn't have + a high coin age when the notion of priority still existed. + """ + prevtxout = txins.pop() + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b"")) + + half_change = satoshi_round(prevtxout["amount"]/2) + rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") + tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2)) + + # If this is the initial split we actually need to sign the transaction + # Otherwise we just need to insert the proper ScriptSig + if (initial_split) : + completetx = from_node.signrawtransaction(ToHex(tx))["hex"] + else : + tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] + completetx = ToHex(tx) + txid = from_node.sendrawtransaction(completetx, True) + txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) + txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) + +def check_estimates(node, fees_seen, max_invalid, print_estimates = True): + """ + This function calls estimatefee and verifies that the estimates + meet certain invariants. + """ + all_estimates = [ node.estimatefee(i) for i in range(1,26) ] + if print_estimates: + log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) + delta = 1.0e-6 # account for rounding error + last_e = max(fees_seen) + for e in [x for x in all_estimates if x >= 0]: + # Estimates should be within the bounds of what transactions fees actually were: + if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): + raise AssertionError("Estimated fee (%f) out of range (%f,%f)" + %(float(e), min(fees_seen), max(fees_seen))) + # Estimates should be monotonically decreasing + if float(e)-delta > last_e: + raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" + %(float(e),float(last_e))) + last_e = e + valid_estimate = False + invalid_estimates = 0 + for i,e in enumerate(all_estimates): # estimate is for i+1 + if e >= 0: + valid_estimate = True + if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n) + assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta) + + else: + invalid_estimates += 1 + + # estimatesmartfee should still be valid + approx_estimate = node.estimatesmartfee(i+1)["feerate"] + answer_found = node.estimatesmartfee(i+1)["blocks"] + assert(approx_estimate > 0) + assert(answer_found > i+1) + + # Once we're at a high enough confirmation count that we can give an estimate + # We should have estimates for all higher confirmation counts + if valid_estimate: + raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") + + # Check on the expected number of different confirmation counts + # that we might not have valid estimates for + if invalid_estimates > max_invalid: + raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) + return all_estimates + + +class EstimateFeeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 3 + + def setup_network(self): + """ + We'll setup the network to have 3 nodes that all mine with different parameters. + But first we need to use one node to create a lot of outputs + which we will use to generate our transactions. + """ + self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], + ["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"], + ["-blockmaxsize=8000", "-maxorphantx=1000"]]) + # Use node0 to mine blocks for input splitting + # Node1 mines small blocks but that are bigger than the expected transaction rate. + # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, + # (17k is room enough for 110 or so transactions) + # Node2 is a stingy miner, that + # produces too small blocks (room for only 55 or so transactions) + + + def transact_and_mine(self, numblocks, mining_node): + min_fee = Decimal("0.00001") + # We will now mine numblocks blocks generating on average 100 transactions between each block + # We shuffle our confirmed txout set before each set of transactions + # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible + # resorting to tx's that depend on the mempool when those run out + for i in range(numblocks): + random.shuffle(self.confutxo) + for j in range(random.randrange(100-50,100+50)): + from_index = random.randint(1,2) + (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, + self.memutxo, Decimal("0.005"), min_fee, min_fee) + tx_kbytes = (len(txhex) // 2) / 1000.0 + self.fees_per_kb.append(float(fee)/tx_kbytes) + sync_mempools(self.nodes[0:3], wait=.1) + mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] + sync_blocks(self.nodes[0:3], wait=.1) + # update which txouts are confirmed + newmem = [] + for utx in self.memutxo: + if utx["txid"] in mined: + self.confutxo.append(utx) + else: + newmem.append(utx) + self.memutxo = newmem + + def run_test(self): + self.log.info("This test is time consuming, please be patient") + self.log.info("Splitting inputs so we can generate tx's") + + # Make log handler available to helper functions + global log + log = self.log + + # Start node0 + self.start_node(0) + self.txouts = [] + self.txouts2 = [] + # Split a coinbase into two transaction puzzle outputs + split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) + + # Mine + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + + # Repeatedly split those 2 outputs, doubling twice for each rep + # Use txouts to monitor the available utxo, since these won't be tracked in wallet + reps = 0 + while (reps < 5): + #Double txouts to txouts2 + while (len(self.txouts)>0): + split_inputs(self.nodes[0], self.txouts, self.txouts2) + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + #Double txouts2 to txouts + while (len(self.txouts2)>0): + split_inputs(self.nodes[0], self.txouts2, self.txouts) + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + reps += 1 + self.log.info("Finished splitting") + + # Now we can connect the other nodes, didn't want to connect them earlier + # so the estimates would not be affected by the splitting transactions + self.start_node(1) + self.start_node(2) + connect_nodes(self.nodes[1], 0) + connect_nodes(self.nodes[0], 2) + connect_nodes(self.nodes[2], 1) + + self.sync_all() + + self.fees_per_kb = [] + self.memutxo = [] + self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting + self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") + + for i in range(2): + self.log.info("Creating transactions and mining them with a block size that can't keep up") + # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine + self.transact_and_mine(10, self.nodes[2]) + check_estimates(self.nodes[1], self.fees_per_kb, 14) + + self.log.info("Creating transactions and mining them at a block size that is just big enough") + # Generate transactions while mining 10 more blocks, this time with node1 + # which mines blocks with capacity just above the rate that transactions are being created + self.transact_and_mine(10, self.nodes[1]) + check_estimates(self.nodes[1], self.fees_per_kb, 2) + + # Finish by mining a normal-sized block: + while len(self.nodes[1].getrawmempool()) > 0: + self.nodes[1].generate(1) + + sync_blocks(self.nodes[0:3], wait=.1) + self.log.info("Final estimates after emptying mempools") + check_estimates(self.nodes[1], self.fees_per_kb, 2) + +if __name__ == '__main__': + EstimateFeeTest().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py new file mode 100755 index 0000000000..45336ee801 --- /dev/null +++ b/test/functional/feature_maxuploadtarget.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test behavior of -maxuploadtarget. + +* Verify that getdata requests for old blocks (>1week) are dropped +if uploadtarget has been reached. +* Verify that getdata requests for recent blocks are respecteved even +if uploadtarget has been reached. +* Verify that the upload counters are reset after 24 hours. +""" +from collections import defaultdict +import time + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class TestNode(P2PInterface): + def __init__(self): + super().__init__() + self.block_receive_map = defaultdict(int) + + def on_inv(self, message): + pass + + def on_block(self, message): + message.block.calc_sha256() + self.block_receive_map[message.block.sha256] += 1 + +class MaxUploadTest(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-maxuploadtarget=800", "-blockmaxsize=999000"]] + + # Cache for utxos, as the listunspent may take a long time later in the test + self.utxo_cache = [] + + def run_test(self): + # Before we connect anything, we first set the time on the node + # to be in the past, otherwise things break because the CNode + # time counters can't be reset backward after initialization + old_time = int(time.time() - 2*60*60*24*7) + self.nodes[0].setmocktime(old_time) + + # Generate some old blocks + self.nodes[0].generate(130) + + # p2p_conns[0] will only request old blocks + # p2p_conns[1] will only request new blocks + # p2p_conns[2] will test resetting the counters + p2p_conns = [] + + for _ in range(3): + p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) + + network_thread_start() + for p2pc in p2p_conns: + p2pc.wait_for_verack() + + # Test logic begins here + + # Now mine a big block + mine_large_block(self.nodes[0], self.utxo_cache) + + # Store the hash; we'll request this later + big_old_block = self.nodes[0].getbestblockhash() + old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] + big_old_block = int(big_old_block, 16) + + # Advance to two days ago + self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) + + # Mine one more block, so that the prior block looks old + mine_large_block(self.nodes[0], self.utxo_cache) + + # We'll be requesting this new block too + big_new_block = self.nodes[0].getbestblockhash() + big_new_block = int(big_new_block, 16) + + # p2p_conns[0] will test what happens if we just keep requesting the + # the same big old block too many times (expect: disconnect) + + getdata_request = msg_getdata() + getdata_request.inv.append(CInv(2, big_old_block)) + + max_bytes_per_day = 800*1024*1024 + daily_buffer = 144 * 4000000 + max_bytes_available = max_bytes_per_day - daily_buffer + success_count = max_bytes_available // old_block_size + + # 576MB will be reserved for relaying new blocks, so expect this to + # succeed for ~235 tries. + for i in range(success_count): + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].sync_with_ping() + assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) + + assert_equal(len(self.nodes[0].getpeerinfo()), 3) + # At most a couple more tries should succeed (depending on how long + # the test has been running so far). + for i in range(3): + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].wait_for_disconnect() + assert_equal(len(self.nodes[0].getpeerinfo()), 2) + self.log.info("Peer 0 disconnected after downloading old block too many times") + + # Requesting the current block on p2p_conns[1] should succeed indefinitely, + # even when over the max upload target. + # We'll try 800 times + getdata_request.inv = [CInv(2, big_new_block)] + for i in range(800): + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].sync_with_ping() + assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) + + self.log.info("Peer 1 able to repeatedly download new block") + + # But if p2p_conns[1] tries for an old block, it gets disconnected too. + getdata_request.inv = [CInv(2, big_old_block)] + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].wait_for_disconnect() + assert_equal(len(self.nodes[0].getpeerinfo()), 1) + + self.log.info("Peer 1 disconnected after trying to download old block") + + self.log.info("Advancing system time on node to clear counters...") + + # If we advance the time by 24 hours, then the counters should reset, + # and p2p_conns[2] should be able to retrieve the old block. + self.nodes[0].setmocktime(int(time.time())) + p2p_conns[2].sync_with_ping() + p2p_conns[2].send_message(getdata_request) + p2p_conns[2].sync_with_ping() + assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) + + self.log.info("Peer 2 able to download old block") + + self.nodes[0].disconnect_p2ps() + + #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 + self.log.info("Restarting nodes with -whitelist=127.0.0.1") + self.stop_node(0) + self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) + + # Reconnect to self.nodes[0] + self.nodes[0].add_p2p_connection(TestNode()) + + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + #retrieve 20 blocks which should be enough to break the 1MB limit + getdata_request.inv = [CInv(2, big_new_block)] + for i in range(20): + self.nodes[0].p2p.send_message(getdata_request) + self.nodes[0].p2p.sync_with_ping() + assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) + + getdata_request.inv = [CInv(2, big_old_block)] + self.nodes[0].p2p.send_and_ping(getdata_request) + assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist + + self.log.info("Peer still connected after trying to download old block (whitelisted)") + +if __name__ == '__main__': + MaxUploadTest().main() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py new file mode 100755 index 0000000000..90a3de0e0d --- /dev/null +++ b/test/functional/feature_minchainwork.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test logic for setting nMinimumChainWork on command line. + +Nodes don't consider themselves out of "initial block download" until +their active chain has more work than nMinimumChainWork. + +Nodes don't download blocks from a peer unless the peer's best known block +has more work than nMinimumChainWork. + +While in initial block download, nodes won't relay blocks to their peers, so +test that this parameter functions as intended by verifying that block relay +only succeeds past a given node once its nMinimumChainWork has been exceeded. +""" + +import time + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import connect_nodes, assert_equal + +# 2 hashes required per regtest block (with no difficulty adjustment) +REGTEST_WORK_PER_BLOCK = 2 + +class MinimumChainWorkTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] + self.node_min_work = [0, 101, 101] + + def setup_network(self): + # This test relies on the chain setup being: + # node0 <- node1 <- node2 + # Before leaving IBD, nodes prefer to download blocks from outbound + # peers, so ensure that we're mining on an outbound peer and testing + # block relay to inbound peers. + self.setup_nodes() + for i in range(self.num_nodes-1): + connect_nodes(self.nodes[i+1], i) + + def run_test(self): + # Start building a chain on node0. node2 shouldn't be able to sync until node1's + # minchainwork is exceeded + starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work + self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) + + starting_blockcount = self.nodes[2].getblockcount() + + num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) + self.log.info("Generating %d blocks on node0", num_blocks_to_generate) + hashes = self.nodes[0].generate(num_blocks_to_generate) + + self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) + + # Sleep a few seconds and verify that node2 didn't get any new blocks + # or headers. We sleep, rather than sync_blocks(node0, node1) because + # it's reasonable either way for node1 to get the blocks, or not get + # them (since they're below node1's minchainwork). + time.sleep(3) + + self.log.info("Verifying node 2 has no more blocks than before") + self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + # Node2 shouldn't have any new headers yet, because node1 should not + # have relayed anything. + assert_equal(len(self.nodes[2].getchaintips()), 1) + assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) + + assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() + assert_equal(self.nodes[2].getblockcount(), starting_blockcount) + + self.log.info("Generating one more block") + self.nodes[0].generate(1) + + self.log.info("Verifying nodes are all synced") + + # Because nodes in regtest are all manual connections (eg using + # addnode), node1 should not have disconnected node0. If not for that, + # we'd expect node1 to have disconnected node0 for serving an + # insufficient work chain, in which case we'd need to reconnect them to + # continue the test. + + self.sync_all() + self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + +if __name__ == '__main__': + MinimumChainWorkTest().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py new file mode 100755 index 0000000000..980bef5fc8 --- /dev/null +++ b/test/functional/feature_notifications.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the -alertnotify, -blocknotify and -walletnotify options.""" +import os + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, wait_until, connect_nodes_bi + +class NotificationsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + + def setup_network(self): + self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") + self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") + self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt") + + # -alertnotify and -blocknotify on node0, walletnotify on node1 + self.extra_args = [["-blockversion=2", + "-alertnotify=echo %%s >> %s" % self.alert_filename, + "-blocknotify=echo %%s >> %s" % self.block_filename], + ["-blockversion=211", + "-rescan", + "-walletnotify=echo %%s >> %s" % self.tx_filename]] + super().setup_network() + + def run_test(self): + self.log.info("test -blocknotify") + block_count = 10 + blocks = self.nodes[1].generate(block_count) + + # wait at most 10 seconds for expected file size before reading the content + wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated blocks hashes + with open(self.block_filename, 'r') as f: + assert_equal(sorted(blocks), sorted(f.read().splitlines())) + + self.log.info("test -walletnotify") + # wait at most 10 seconds for expected file size before reading the content + wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated transaction hashes + txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) + with open(self.tx_filename, 'r') as f: + assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) + os.remove(self.tx_filename) + + self.log.info("test -walletnotify after rescan") + # restart node to rescan to force wallet notifications + self.restart_node(1) + connect_nodes_bi(self.nodes, 0, 1) + + wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated transaction hashes + txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) + with open(self.tx_filename, 'r') as f: + assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) + + # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st. + self.log.info("test -alertnotify") + self.nodes[1].generate(41) + self.sync_all() + + # Give bitcoind 10 seconds to write the alert notification + wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) + + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text = f.read() + + # Mine more up-version blocks, should not get more alerts: + self.nodes[1].generate(2) + self.sync_all() + + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text2 = f.read() + + self.log.info("-alertnotify should not continue notifying for more unknown version blocks") + assert_equal(alert_text, alert_text2) + +if __name__ == '__main__': + NotificationsTest().main() diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py new file mode 100755 index 0000000000..e4f413cc2a --- /dev/null +++ b/test/functional/feature_nulldummy.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test NULLDUMMY softfork. + +Connect to a single node. +Generate 2 blocks (save the coinbases for later). +Generate 427 more blocks. +[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. +[Policy] Check that non-NULLDUMMY transactions are rejected before activation. +[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. +[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment +from test_framework.script import CScript +from io import BytesIO +import time + +NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" + +def trueDummy(tx): + scriptSig = CScript(tx.vin[0].scriptSig) + newscript = [] + for i in scriptSig: + if (len(newscript) == 0): + assert(len(i) == 0) + newscript.append(b'\x51') + else: + newscript.append(i) + tx.vin[0].scriptSig = CScript(newscript) + tx.rehash() + +class NULLDUMMYTest(BitcoinTestFramework): + + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through + # normal segwit activation here (and don't use the default always-on behaviour). + self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]] + + def run_test(self): + self.address = self.nodes[0].getnewaddress() + self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])['address'] + self.wit_address = self.nodes[0].addwitnessaddress(self.address) + self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address'] + + network_thread_start() + self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 + coinbase_txid = [] + for i in self.coinbase_blocks: + coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) + self.nodes[0].generate(427) # Block 429 + self.lastblockhash = self.nodes[0].getbestblockhash() + self.tip = int("0x" + self.lastblockhash, 0) + self.lastblockheight = 429 + self.lastblocktime = int(time.time()) + 429 + + self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") + test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] + txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True) + test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) + txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True) + test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) + txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True) + self.block_submit(self.nodes[0], test1txs, False, True) + + self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") + test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) + trueDummy(test2tx) + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) + + self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") + self.block_submit(self.nodes[0], [test2tx], False, True) + + self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") + test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46) + test6txs=[CTransaction(test4tx)] + trueDummy(test4tx) + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True) + self.block_submit(self.nodes[0], [test4tx]) + + self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") + test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) + test6txs.append(CTransaction(test5tx)) + test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True) + self.block_submit(self.nodes[0], [test5tx], True) + + self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]") + for i in test6txs: + self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True) + self.block_submit(self.nodes[0], test6txs, True, True) + + + def create_transaction(self, node, txid, to_address, amount): + inputs = [{ "txid" : txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + + def block_submit(self, node, txs, witness = False, accept = False): + block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) + block.nVersion = 4 + for tx in txs: + tx.rehash() + block.vtx.append(tx) + block.hashMerkleRoot = block.calc_merkle_root() + witness and add_witness_commitment(block) + block.rehash() + block.solve() + node.submitblock(bytes_to_hex_str(block.serialize(True))) + if (accept): + assert_equal(node.getbestblockhash(), block.hash) + self.tip = block.sha256 + self.lastblockhash = block.hash + self.lastblocktime += 1 + self.lastblockheight += 1 + else: + assert_equal(node.getbestblockhash(), self.lastblockhash) + +if __name__ == '__main__': + NULLDUMMYTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py new file mode 100755 index 0000000000..2eb1be47a5 --- /dev/null +++ b/test/functional/feature_proxy.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test bitcoind with different proxy configuration. + +Test plan: +- Start bitcoind's with different proxy configurations +- Use addnode to initiate connections +- Verify that proxies are connected to, and the right connection command is given +- Proxy configurations to test on bitcoind side: + - `-proxy` (proxy everything) + - `-onion` (proxy just onions) + - `-proxyrandomize` Circuit randomization +- Proxy configurations to test on proxy side, + - support no authentication (other proxy) + - support no authentication + user/pass authentication (Tor) + - proxy on IPv6 + +- Create various proxies (as threads) +- Create bitcoinds that connect to them +- Manipulate the bitcoinds using addnode (onetry) an observe effects + +addnode connect to IPv4 +addnode connect to IPv6 +addnode connect to onion +addnode connect to generic DNS name +""" + +import socket +import os + +from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + PORT_MIN, + PORT_RANGE, + assert_equal, +) +from test_framework.netutil import test_ipv6_local + +RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports + +class ProxyTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + + def setup_nodes(self): + self.have_ipv6 = test_ipv6_local() + # Create two proxies on different ports + # ... one unauthenticated + self.conf1 = Socks5Configuration() + self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) + self.conf1.unauth = True + self.conf1.auth = False + # ... one supporting authenticated and unauthenticated (Tor) + self.conf2 = Socks5Configuration() + self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) + self.conf2.unauth = True + self.conf2.auth = True + if self.have_ipv6: + # ... one on IPv6 with similar configuration + self.conf3 = Socks5Configuration() + self.conf3.af = socket.AF_INET6 + self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) + self.conf3.unauth = True + self.conf3.auth = True + else: + self.log.warning("Testing without local IPv6 support") + + self.serv1 = Socks5Server(self.conf1) + self.serv1.start() + self.serv2 = Socks5Server(self.conf2) + self.serv2.start() + if self.have_ipv6: + self.serv3 = Socks5Server(self.conf3) + self.serv3.start() + + # Note: proxies are not used to connect to local nodes + # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost + args = [ + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], + ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], + [] + ] + if self.have_ipv6: + args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] + self.add_nodes(self.num_nodes, extra_args=args) + self.start_nodes() + + def node_test(self, node, proxies, auth, test_onion=True): + rv = [] + # Test: outgoing IPv4 connection through node + node.addnode("15.61.23.23:1234", "onetry") + cmd = proxies[0].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"15.61.23.23") + assert_equal(cmd.port, 1234) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + if self.have_ipv6: + # Test: outgoing IPv6 connection through node + node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") + cmd = proxies[1].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") + assert_equal(cmd.port, 5443) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + if test_onion: + # Test: outgoing onion connection through node + node.addnode("bitcoinostk4e4re.onion:8333", "onetry") + cmd = proxies[2].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + # Test: outgoing DNS name connection through node + node.addnode("node.noumenon:8333", "onetry") + cmd = proxies[3].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"node.noumenon") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + return rv + + def run_test(self): + # basic -proxy + self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) + + # -proxy plus -onion + self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) + + # -proxy plus -onion, -proxyrandomize + rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) + # Check that credentials as used for -proxyrandomize connections are unique + credentials = set((x.username,x.password) for x in rv) + assert_equal(len(credentials), len(rv)) + + if self.have_ipv6: + # proxy on IPv6 localhost + self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) + + def networks_dict(d): + r = {} + for x in d['networks']: + r[x['name']] = x + return r + + # test RPC getnetworkinfo + n0 = networks_dict(self.nodes[0].getnetworkinfo()) + for net in ['ipv4','ipv6','onion']: + assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n0[net]['proxy_randomize_credentials'], True) + assert_equal(n0['onion']['reachable'], True) + + n1 = networks_dict(self.nodes[1].getnetworkinfo()) + for net in ['ipv4','ipv6']: + assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n1[net]['proxy_randomize_credentials'], False) + assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n1['onion']['proxy_randomize_credentials'], False) + assert_equal(n1['onion']['reachable'], True) + + n2 = networks_dict(self.nodes[2].getnetworkinfo()) + for net in ['ipv4','ipv6','onion']: + assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n2[net]['proxy_randomize_credentials'], True) + assert_equal(n2['onion']['reachable'], True) + + if self.have_ipv6: + n3 = networks_dict(self.nodes[3].getnetworkinfo()) + for net in ['ipv4','ipv6']: + assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) + assert_equal(n3[net]['proxy_randomize_credentials'], False) + assert_equal(n3['onion']['reachable'], False) + +if __name__ == '__main__': + ProxyTest().main() + diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py new file mode 100755 index 0000000000..49ad7f838c --- /dev/null +++ b/test/functional/feature_pruning.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the pruning code. + +WARNING: +This test uses 4GB of disk space. +This test takes 30 mins or more (up to 2 hours) +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import time +import os + +MIN_BLOCKS_TO_KEEP = 288 + +# Rescans start at the earliest block up to 2 hours before a key timestamp, so +# the manual prune RPC avoids pruning blocks in the same window to be +# compatible with pruning based on key creation time. +TIMESTAMP_WINDOW = 2 * 60 * 60 + + +def calc_usage(blockdir): + return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) + +class PruneTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 6 + + # Create nodes 0 and 1 to mine. + # Create node 2 to test pruning. + self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] + # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) + # Create nodes 5 to test wallet in prune mode, but do not connect + self.extra_args = [self.full_node_default_args, + self.full_node_default_args, + ["-maxreceivebuffer=20000", "-prune=550"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-prune=550"]] + + def setup_network(self): + self.setup_nodes() + + self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" + + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[1], 2) + connect_nodes(self.nodes[2], 0) + connect_nodes(self.nodes[0], 3) + connect_nodes(self.nodes[0], 4) + sync_blocks(self.nodes[0:5]) + + def setup_nodes(self): + self.add_nodes(self.num_nodes, self.extra_args, timewait=900) + self.start_nodes() + + def create_big_chain(self): + # Start by creating some coinbases we can spend later + self.nodes[1].generate(200) + sync_blocks(self.nodes[0:2]) + self.nodes[0].generate(150) + # Then mine enough full blocks to create more than 550MiB of data + for i in range(645): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + sync_blocks(self.nodes[0:5]) + + def test_height_min(self): + if not os.path.isfile(self.prunedir+"blk00000.dat"): + raise AssertionError("blk00000.dat is missing, pruning too early") + self.log.info("Success") + self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) + self.log.info("Mining 25 more blocks should cause the first block file to be pruned") + # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this + for i in range(25): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + waitstart = time.time() + while os.path.isfile(self.prunedir+"blk00000.dat"): + time.sleep(0.1) + if time.time() - waitstart > 30: + raise AssertionError("blk00000.dat not pruned when it should be") + + self.log.info("Success") + usage = calc_usage(self.prunedir) + self.log.info("Usage should be below target: %d" % usage) + if (usage > 550): + raise AssertionError("Pruning target not being met") + + def create_chain_with_staleblocks(self): + # Create stale blocks in manageable sized chunks + self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") + + for j in range(12): + # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain + # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects + # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine + self.stop_node(0) + self.start_node(0, extra_args=self.full_node_default_args) + # Mine 24 blocks in node 1 + for i in range(24): + if j == 0: + mine_large_block(self.nodes[1], self.utxo_cache_1) + else: + # Add node1's wallet transactions back to the mempool, to + # avoid the mined blocks from being too small. + self.nodes[1].resendwallettransactions() + self.nodes[1].generate(1) #tx's already in mempool from previous disconnects + + # Reorg back with 25 block chain from node 0 + for i in range(25): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + # Create connections in the order so both nodes can see the reorg at the same time + connect_nodes(self.nodes[1], 0) + connect_nodes(self.nodes[2], 0) + sync_blocks(self.nodes[0:3]) + + self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) + + def reorg_test(self): + # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip + # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain + # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) + # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) + self.stop_node(1) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + + height = self.nodes[1].getblockcount() + self.log.info("Current block height: %d" % height) + + invalidheight = height-287 + badhash = self.nodes[1].getblockhash(invalidheight) + self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) + self.nodes[1].invalidateblock(badhash) + + # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want + # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) + mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + while curhash != mainchainhash: + self.nodes[1].invalidateblock(curhash) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + + assert(self.nodes[1].getblockcount() == invalidheight - 1) + self.log.info("New best height: %d" % self.nodes[1].getblockcount()) + + # Reboot node1 to clear those giant tx's from mempool + self.stop_node(1) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + + self.log.info("Generating new longer chain of 300 more blocks") + self.nodes[1].generate(300) + + self.log.info("Reconnect nodes") + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[2], 1) + sync_blocks(self.nodes[0:3], timeout=120) + + self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) + self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) + + self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") + + # Get node0's wallet transactions back in its mempool, to avoid the + # mined blocks from being too small. + self.nodes[0].resendwallettransactions() + + for i in range(22): + # This can be slow, so do this in multiple RPC calls to avoid + # RPC timeouts. + self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects + sync_blocks(self.nodes[0:3], timeout=300) + + usage = calc_usage(self.prunedir) + self.log.info("Usage should be below target: %d" % usage) + if (usage > 550): + raise AssertionError("Pruning target not being met") + + return invalidheight,badhash + + def reorg_back(self): + # Verify that a block on the old main chain fork has been pruned away + assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) + self.log.info("Will need to redownload block %d" % self.forkheight) + + # Verify that we have enough history to reorg back to the fork point + # Although this is more than 288 blocks, because this chain was written more recently + # and only its other 299 small and 220 large block are in the block files after it, + # its expected to still be retained + self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) + + first_reorg_height = self.nodes[2].getblockcount() + curchainhash = self.nodes[2].getblockhash(self.mainchainheight) + self.nodes[2].invalidateblock(curchainhash) + goalbestheight = self.mainchainheight + goalbesthash = self.mainchainhash2 + + # As of 0.10 the current block download logic is not able to reorg to the original chain created in + # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to + # redownload its missing blocks. + # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain + # because it has all the block data. + # However it must mine enough blocks to have a more work chain than the reorg_test chain in order + # to trigger node 2's block download logic. + # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg + if self.nodes[2].getblockcount() < self.mainchainheight: + blocks_to_mine = first_reorg_height + 1 - self.mainchainheight + self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) + self.nodes[0].invalidateblock(curchainhash) + assert(self.nodes[0].getblockcount() == self.mainchainheight) + assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) + goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] + goalbestheight = first_reorg_height + 1 + + self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") + waitstart = time.time() + while self.nodes[2].getblockcount() < goalbestheight: + time.sleep(0.1) + if time.time() - waitstart > 900: + raise AssertionError("Node 2 didn't reorg to proper height") + assert(self.nodes[2].getbestblockhash() == goalbesthash) + # Verify we can now have the data for a block previously pruned + assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) + + def manual_test(self, node_number, use_timestamp): + # at this point, node has 995 blocks and has not yet run in prune mode + self.start_node(node_number) + node = self.nodes[node_number] + assert_equal(node.getblockcount(), 995) + assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) + + # now re-start in manual pruning mode + self.stop_node(node_number) + self.start_node(node_number, extra_args=["-prune=1"]) + node = self.nodes[node_number] + assert_equal(node.getblockcount(), 995) + + def height(index): + if use_timestamp: + return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW + else: + return index + + def prune(index, expected_ret=None): + ret = node.pruneblockchain(height(index)) + # Check the return value. When use_timestamp is True, just check + # that the return value is less than or equal to the expected + # value, because when more than one block is generated per second, + # a timestamp will not be granular enough to uniquely identify an + # individual block. + if expected_ret is None: + expected_ret = index + if use_timestamp: + assert_greater_than(ret, 0) + assert_greater_than(expected_ret + 1, ret) + else: + assert_equal(ret, expected_ret) + + def has_block(index): + return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) + + # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) + assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) + + # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) + node.generate(6) + assert_equal(node.getblockchaininfo()["blocks"], 1001) + + # negative heights should raise an exception + assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) + + # height=100 too low to prune first block file so this is a no-op + prune(100) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # Does nothing + node.pruneblockchain(height(0)) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # height=500 should prune first file + prune(500) + if has_block(0): + raise AssertionError("blk00000.dat is still there, should be pruned by now") + if not has_block(1): + raise AssertionError("blk00001.dat is missing when should still be there") + + # height=650 should prune second file + prune(650) + if has_block(1): + raise AssertionError("blk00001.dat is still there, should be pruned by now") + + # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. + prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) + if not has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + + # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) + node.generate(288) + prune(1000) + if has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + if has_block(3): + raise AssertionError("blk00003.dat is still there, should be pruned by now") + + # stop node, start back up with auto-prune at 550MB, make sure still runs + self.stop_node(node_number) + self.start_node(node_number, extra_args=["-prune=550"]) + + self.log.info("Success") + + def wallet_test(self): + # check that the pruning node's wallet is still in good shape + self.log.info("Stop and start pruning node to trigger wallet rescan") + self.stop_node(2) + self.start_node(2, extra_args=["-prune=550"]) + self.log.info("Success") + + # check that wallet loads successfully when restarting a pruned node after IBD. + # this was reported to fail in #7494. + self.log.info("Syncing node 5 to test wallet") + connect_nodes(self.nodes[0], 5) + nds = [self.nodes[0], self.nodes[5]] + sync_blocks(nds, wait=5, timeout=300) + self.stop_node(5) #stop and start to trigger rescan + self.start_node(5, extra_args=["-prune=550"]) + self.log.info("Success") + + def run_test(self): + self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") + self.log.info("Mining a big blockchain of 995 blocks") + + # Determine default relay fee + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + + # Cache for utxos, as the listunspent may take a long time later in the test + self.utxo_cache_0 = [] + self.utxo_cache_1 = [] + + self.create_big_chain() + # Chain diagram key: + # * blocks on main chain + # +,&,$,@ blocks on other forks + # X invalidated block + # N1 Node 1 + # + # Start by mining a simple chain that all nodes have + # N0=N1=N2 **...*(995) + + # stop manual-pruning node with 995 blocks + self.stop_node(3) + self.stop_node(4) + + self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") + self.test_height_min() + # Extend this chain past the PruneAfterHeight + # N0=N1=N2 **...*(1020) + + self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") + self.create_chain_with_staleblocks() + # Disconnect N0 + # And mine a 24 block chain on N1 and a separate 25 block chain on N0 + # N1=N2 **...*+...+(1044) + # N0 **...**...**(1045) + # + # reconnect nodes causing reorg on N1 and N2 + # N1=N2 **...*(1020) *...**(1045) + # \ + # +...+(1044) + # + # repeat this process until you have 12 stale forks hanging off the + # main chain on N1 and N2 + # N0 *************************...***************************(1320) + # + # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) + # \ \ \ + # +...+(1044) &.. $...$(1319) + + # Save some current chain state for later use + self.mainchainheight = self.nodes[2].getblockcount() #1320 + self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) + + self.log.info("Check that we can survive a 288 block reorg still") + (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) + # Now create a 288 block reorg by mining a longer chain on N1 + # First disconnect N1 + # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain + # N1 **...*(1020) **...**(1032)X.. + # \ + # ++...+(1031)X.. + # + # Now mine 300 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@(1332) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # Reconnect nodes and mine 220 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # N2 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ *...**(1320) + # \ \ + # ++...++(1044) .. + # + # N0 ********************(1032) @@...@@@(1552) + # \ + # *...**(1320) + + self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") + self.reorg_back() + # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) + # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to + # original main chain (*), but will require redownload of some blocks + # In order to have a peer we think we can download from, must also perform this invalidation + # on N0 and mine a new longest chain to trigger. + # Final result: + # N0 ********************(1032) **...****(1553) + # \ + # X@...@@@(1552) + # + # N2 **...*(1020) **...**(1032) **...****(1553) + # \ \ + # \ X@...@@@(1552) + # \ + # +.. + # + # N1 doesn't change because 1033 on main chain (*) is invalid + + self.log.info("Test manual pruning with block indices") + self.manual_test(3, use_timestamp=False) + + self.log.info("Test manual pruning with timestamps") + self.manual_test(4, use_timestamp=True) + + self.log.info("Test wallet re-scan") + self.wallet_test() + + self.log.info("Done") + +if __name__ == '__main__': + PruneTest().main() diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py new file mode 100755 index 0000000000..6b7ab0f43e --- /dev/null +++ b/test/functional/feature_rbf.py @@ -0,0 +1,565 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the RBF code.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.script import * +from test_framework.mininode import * + +MAX_REPLACEMENT_LIMIT = 100 + +def txToHex(tx): + return bytes_to_hex_str(tx.serialize()) + +def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): + """Create a txout with a given amount and scriptPubKey + + Mines coins as needed. + + confirmed - txouts created will be confirmed in the blockchain; + unconfirmed otherwise. + """ + fee = 1*COIN + while node.getbalance() < satoshi_round((amount + fee)/COIN): + node.generate(100) + + new_addr = node.getnewaddress() + txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) + tx1 = node.getrawtransaction(txid, 1) + txid = int(txid, 16) + i = None + + for i, txout in enumerate(tx1['vout']): + if txout['scriptPubKey']['addresses'] == [new_addr]: + break + assert i is not None + + tx2 = CTransaction() + tx2.vin = [CTxIn(COutPoint(txid, i))] + tx2.vout = [CTxOut(amount, scriptPubKey)] + tx2.rehash() + + signed_tx = node.signrawtransaction(txToHex(tx2)) + + txid = node.sendrawtransaction(signed_tx['hex'], True) + + # If requested, ensure txouts are confirmed. + if confirmed: + mempool_size = len(node.getrawmempool()) + while mempool_size > 0: + node.generate(1) + new_size = len(node.getrawmempool()) + # Error out if we have something stuck in the mempool, as this + # would likely be a bug. + assert(new_size < mempool_size) + mempool_size = new_size + + return COutPoint(int(txid, 16), 0) + +class ReplaceByFeeTest(BitcoinTestFramework): + + def set_test_params(self): + self.num_nodes = 2 + self.extra_args= [["-maxorphantx=1000", + "-whitelist=127.0.0.1", + "-limitancestorcount=50", + "-limitancestorsize=101", + "-limitdescendantcount=200", + "-limitdescendantsize=101"], + ["-mempoolreplacement=0"]] + + def run_test(self): + # Leave IBD + self.nodes[0].generate(1) + + make_utxo(self.nodes[0], 1*COIN) + + # Ensure nodes are synced + self.sync_all() + + self.log.info("Running test simple doublespend...") + self.test_simple_doublespend() + + self.log.info("Running test doublespend chain...") + self.test_doublespend_chain() + + self.log.info("Running test doublespend tree...") + self.test_doublespend_tree() + + self.log.info("Running test replacement feeperkb...") + self.test_replacement_feeperkb() + + self.log.info("Running test spends of conflicting outputs...") + self.test_spends_of_conflicting_outputs() + + self.log.info("Running test new unconfirmed inputs...") + self.test_new_unconfirmed_inputs() + + self.log.info("Running test too many replacements...") + self.test_too_many_replacements() + + self.log.info("Running test opt-in...") + self.test_opt_in() + + self.log.info("Running test RPC...") + self.test_rpc() + + self.log.info("Running test prioritised transactions...") + self.test_prioritised_transactions() + + self.log.info("Passed") + + def test_simple_doublespend(self): + """Simple doublespend""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # make_utxo may have generated a bunch of blocks, so we need to sync + # before we can spend the coins generated, or else the resulting + # transactions might not be accepted by our peers. + self.sync_all() + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + self.sync_all() + + # Should fail because we haven't changed the fee + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + # This will raise an exception due to transaction replacement being disabled + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) + + # Extra 0.1 BTC fee + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + # Replacement still disabled even with "enough fee" + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) + # Works when enabled + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + + mempool = self.nodes[0].getrawmempool() + + assert (tx1a_txid not in mempool) + assert (tx1b_txid in mempool) + + assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) + + # Second node is running mempoolreplacement=0, will not replace originally-seen txn + mempool = self.nodes[1].getrawmempool() + assert tx1a_txid in mempool + assert tx1b_txid not in mempool + + def test_doublespend_chain(self): + """Doublespend of a long chain""" + + initial_nValue = 50*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + + prevout = tx0_outpoint + remaining_value = initial_nValue + chain_txids = [] + while remaining_value > 10*COIN: + remaining_value -= 1*COIN + tx = CTransaction() + tx.vin = [CTxIn(prevout, nSequence=0)] + tx.vout = [CTxOut(remaining_value, CScript([1]))] + tx_hex = txToHex(tx) + txid = self.nodes[0].sendrawtransaction(tx_hex, True) + chain_txids.append(txid) + prevout = COutPoint(int(txid, 16), 0) + + # Whether the double-spend is allowed is evaluated by including all + # child fees - 40 BTC - so this attempt is rejected. + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + # Accepted with sufficient fee + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + self.nodes[0].sendrawtransaction(dbl_tx_hex, True) + + mempool = self.nodes[0].getrawmempool() + for doublespent_txid in chain_txids: + assert(doublespent_txid not in mempool) + + def test_doublespend_tree(self): + """Doublespend of a big tree of transactions""" + + initial_nValue = 50*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + + def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): + if _total_txs is None: + _total_txs = [0] + if _total_txs[0] >= max_txs: + return + + txout_value = (initial_value - fee) // tree_width + if txout_value < fee: + return + + vout = [CTxOut(txout_value, CScript([i+1])) + for i in range(tree_width)] + tx = CTransaction() + tx.vin = [CTxIn(prevout, nSequence=0)] + tx.vout = vout + tx_hex = txToHex(tx) + + assert(len(tx.serialize()) < 100000) + txid = self.nodes[0].sendrawtransaction(tx_hex, True) + yield tx + _total_txs[0] += 1 + + txid = int(txid, 16) + + for i, txout in enumerate(tx.vout): + for x in branch(COutPoint(txid, i), txout_value, + max_txs, + tree_width=tree_width, fee=fee, + _total_txs=_total_txs): + yield x + + fee = int(0.0001*COIN) + n = MAX_REPLACEMENT_LIMIT + tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + assert_equal(len(tree_txs), n) + + # Attempt double-spend, will fail because too little fee paid + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + # 1 BTC fee is enough + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + self.nodes[0].sendrawtransaction(dbl_tx_hex, True) + + mempool = self.nodes[0].getrawmempool() + + for tx in tree_txs: + tx.rehash() + assert (tx.hash not in mempool) + + # Try again, but with more total transactions than the "max txs + # double-spent at once" anti-DoS limit. + for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): + fee = int(0.0001*COIN) + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + assert_equal(len(tree_txs), n) + + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + # This will raise an exception + assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + for tx in tree_txs: + tx.rehash() + self.nodes[0].getrawtransaction(tx.hash) + + def test_replacement_feeperkb(self): + """Replacement requires fee-per-KB to be higher""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Higher fee, but the fee per KB is much lower, so the replacement is + # rejected. + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + def test_spends_of_conflicting_outputs(self): + """Replacements that spend conflicting tx outputs are rejected""" + utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) + utxo2 = make_utxo(self.nodes[0], 3*COIN) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(utxo1, nSequence=0)] + tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + tx1a_txid = int(tx1a_txid, 16) + + # Direct spend an output of the transaction we're replacing. + tx2 = CTransaction() + tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] + tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) + tx2.vout = tx1a.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) + + # Spend tx1a's output to test the indirect case. + tx1b = CTransaction() + tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1b_hex = txToHex(tx1b) + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + tx1b_txid = int(tx1b_txid, 16) + + tx2 = CTransaction() + tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), + CTxIn(COutPoint(tx1b_txid, 0))] + tx2.vout = tx1a.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) + + def test_new_unconfirmed_inputs(self): + """Replacements that add new unconfirmed inputs are rejected""" + confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) + unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) + + tx1 = CTransaction() + tx1.vin = [CTxIn(confirmed_utxo)] + tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1_hex = txToHex(tx1) + self.nodes[0].sendrawtransaction(tx1_hex, True) + + tx2 = CTransaction() + tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] + tx2.vout = tx1.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True) + + def test_too_many_replacements(self): + """Replacements that evict too many transactions are rejected""" + # Try directly replacing more than MAX_REPLACEMENT_LIMIT + # transactions + + # Start by creating a single transaction with many outputs + initial_nValue = 10*COIN + utxo = make_utxo(self.nodes[0], initial_nValue) + fee = int(0.0001*COIN) + split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) + + outputs = [] + for i in range(MAX_REPLACEMENT_LIMIT+1): + outputs.append(CTxOut(split_value, CScript([1]))) + + splitting_tx = CTransaction() + splitting_tx.vin = [CTxIn(utxo, nSequence=0)] + splitting_tx.vout = outputs + splitting_tx_hex = txToHex(splitting_tx) + + txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) + txid = int(txid, 16) + + # Now spend each of those outputs individually + for i in range(MAX_REPLACEMENT_LIMIT+1): + tx_i = CTransaction() + tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] + tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] + tx_i_hex = txToHex(tx_i) + self.nodes[0].sendrawtransaction(tx_i_hex, True) + + # Now create doublespend of the whole lot; should fail. + # Need a big enough fee to cover all spending transactions and have + # a higher fee rate + double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) + inputs = [] + for i in range(MAX_REPLACEMENT_LIMIT+1): + inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) + double_tx = CTransaction() + double_tx.vin = inputs + double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] + double_tx_hex = txToHex(double_tx) + + # This will raise an exception + assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True) + + # If we remove an input, it should pass + double_tx = CTransaction() + double_tx.vin = inputs[0:-1] + double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] + double_tx_hex = txToHex(double_tx) + self.nodes[0].sendrawtransaction(double_tx_hex, True) + + def test_opt_in(self): + """Replacing should only work if orig tx opted in""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # Create a non-opting in transaction + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Shouldn't be able to double-spend + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # Create a different non-opting in transaction + tx2a = CTransaction() + tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] + tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx2a_hex = txToHex(tx2a) + tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) + + # Still shouldn't be able to double-spend + tx2b = CTransaction() + tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx2b_hex = txToHex(tx2b) + + # This will raise an exception + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True) + + # Now create a new transaction that spends from tx1a and tx2a + # opt-in on one of the inputs + # Transaction should be replaceable on either input + + tx1a_txid = int(tx1a_txid, 16) + tx2a_txid = int(tx2a_txid, 16) + + tx3a = CTransaction() + tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), + CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] + tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] + tx3a_hex = txToHex(tx3a) + + self.nodes[0].sendrawtransaction(tx3a_hex, True) + + tx3b = CTransaction() + tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] + tx3b_hex = txToHex(tx3b) + + tx3c = CTransaction() + tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] + tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] + tx3c_hex = txToHex(tx3c) + + self.nodes[0].sendrawtransaction(tx3b_hex, True) + # If tx3b was accepted, tx3c won't look like a replacement, + # but make sure it is accepted anyway + self.nodes[0].sendrawtransaction(tx3c_hex, True) + + def test_prioritised_transactions(self): + # Ensure that fee deltas used via prioritisetransaction are + # correctly used by replacement logic + + # 1. Check that feeperkb uses modified fees + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Higher fee, but the actual fee per KB is much lower. + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] + tx1b_hex = txToHex(tx1b) + + # Verify tx1b cannot replace tx1a. + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + # Use prioritisetransaction to set tx1a's fee to 0. + self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) + + # Now tx1b should be able to replace tx1a + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + + assert(tx1b_txid in self.nodes[0].getrawmempool()) + + # 2. Check that absolute fee checks use modified fee. + tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx2a = CTransaction() + tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx2a_hex = txToHex(tx2a) + self.nodes[0].sendrawtransaction(tx2a_hex, True) + + # Lower fee, but we'll prioritise it + tx2b = CTransaction() + tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] + tx2b.rehash() + tx2b_hex = txToHex(tx2b) + + # Verify tx2b cannot replace tx2a. + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True) + + # Now prioritise tx2b to have a higher modified fee + self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) + + # tx2b should now be accepted + tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) + + assert(tx2b_txid in self.nodes[0].getrawmempool()) + + def test_rpc(self): + us0 = self.nodes[0].listunspent()[0] + ins = [us0] + outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)} + rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) + rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) + json0 = self.nodes[0].decoderawtransaction(rawtx0) + json1 = self.nodes[0].decoderawtransaction(rawtx1) + assert_equal(json0["vin"][0]["sequence"], 4294967293) + assert_equal(json1["vin"][0]["sequence"], 4294967295) + + rawtx2 = self.nodes[0].createrawtransaction([], outs) + frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) + frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) + + json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) + json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) + assert_equal(json0["vin"][0]["sequence"], 4294967293) + assert_equal(json1["vin"][0]["sequence"], 4294967294) + +if __name__ == '__main__': + ReplaceByFeeTest().main() diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py new file mode 100755 index 0000000000..ac67e6e9ba --- /dev/null +++ b/test/functional/feature_reindex.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test running bitcoind with -reindex and -reindex-chainstate options. + +- Start a single node and generate 3 blocks. +- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. +- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal +import time + +class ReindexTest(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def reindex(self, justchainstate=False): + self.nodes[0].generate(3) + blockcount = self.nodes[0].getblockcount() + self.stop_nodes() + extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] + self.start_nodes(extra_args) + while self.nodes[0].getblockcount() < blockcount: + time.sleep(0.1) + assert_equal(self.nodes[0].getblockcount(), blockcount) + self.log.info("Success") + + def run_test(self): + self.reindex(False) + self.reindex(True) + self.reindex(False) + self.reindex(True) + +if __name__ == '__main__': + ReindexTest().main() diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py new file mode 100755 index 0000000000..ba6373fa33 --- /dev/null +++ b/test/functional/feature_segwit.py @@ -0,0 +1,638 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the SegWit changeover logic.""" + +from test_framework.address import ( + key_to_p2sh_p2wpkh, + key_to_p2wpkh, + program_to_witness, + script_to_p2sh_p2wsh, + script_to_p2wsh, +) +from test_framework.blocktools import witness_script, send_to_witness +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex +from test_framework.address import script_to_p2sh, key_to_p2pkh +from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE +from io import BytesIO + +NODE_0 = 0 +NODE_2 = 2 +WIT_V0 = 0 +WIT_V1 = 1 + +def getutxo(txid): + utxo = {} + utxo["vout"] = 0 + utxo["txid"] = txid + return utxo + +def find_unspent(node, min_value): + for utxo in node.listunspent(): + if utxo['amount'] >= min_value: + return utxo + +class SegWitTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. + self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], + ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], + ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] + + def setup_network(self): + super().setup_network() + connect_nodes(self.nodes[0], 2) + self.sync_all() + + def success_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + block = node.generate(1) + assert_equal(len(node.getblock(block[0])["tx"]), 2) + sync_blocks(self.nodes) + + def skip_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + block = node.generate(1) + assert_equal(len(node.getblock(block[0])["tx"]), 1) + sync_blocks(self.nodes) + + def fail_accept(self, node, error_msg, txid, sign, redeem_script=""): + assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + + def fail_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1) + sync_blocks(self.nodes) + + def run_test(self): + self.nodes[0].generate(161) #block 161 + + self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + tmpl = self.nodes[0].getblocktemplate({}) + assert(tmpl['sizelimit'] == 1000000) + assert('weightlimit' not in tmpl) + assert(tmpl['sigoplimit'] == 20000) + assert(tmpl['transactions'][0]['hash'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 2) + tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) + assert(tmpl['sizelimit'] == 1000000) + assert('weightlimit' not in tmpl) + assert(tmpl['sigoplimit'] == 20000) + assert(tmpl['transactions'][0]['hash'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 2) + self.nodes[0].generate(1) #block 162 + + balance_presetup = self.nodes[0].getbalance() + self.pubkey = [] + p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh + wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness + for i in range(3): + newaddress = self.nodes[i].getnewaddress() + self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) + multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) + p2sh_addr = self.nodes[i].addwitnessaddress(newaddress) + bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False) + p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address'] + bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address'] + assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1])) + assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1])) + assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript)) + assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) + p2sh_ids.append([]) + wit_ids.append([]) + for v in range(2): + p2sh_ids[i].append([]) + wit_ids[i].append([]) + + for i in range(5): + for n in range(3): + for v in range(2): + wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) + p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) + + self.nodes[0].generate(1) #block 163 + sync_blocks(self.nodes) + + # Make sure all nodes recognize the transactions as theirs + assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50) + assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) + assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) + + self.nodes[0].generate(260) #block 423 + sync_blocks(self.nodes) + + self.log.info("Verify default node can't accept any witness format txs before fork") + # unsigned, no scriptsig + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False) + # unsigned with redeem script + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0])) + # signed + self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True) + + self.log.info("Verify witness txs are skipped for mining before the fork") + self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424 + self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425 + self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426 + self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427 + + # TODO: An old node would see these txs without witnesses and be able to mine them + + self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork") + self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428 + self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429 + + self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") + self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False) + self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False) + + self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork") + self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430 + self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431 + + self.log.info("Verify previous witness txs skipped for mining can now be mined") + assert_equal(len(self.nodes[2].getrawmempool()), 4) + block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) + sync_blocks(self.nodes) + assert_equal(len(self.nodes[2].getrawmempool()), 0) + segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] + assert_equal(len(segwit_tx_list), 5) + + self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag") + assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False)) + assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False)) + for i in range(len(segwit_tx_list)): + tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i])) + assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i])) + assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness())) + + self.log.info("Verify witness txs without witness data are invalid after the fork") + self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False) + self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False) + self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2])) + self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2])) + + self.log.info("Verify default node can now use witness txs") + self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432 + self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433 + self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434 + self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435 + + self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) + assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data + assert(tmpl['weightlimit'] == 4000000) + assert(tmpl['sigoplimit'] == 80000) + assert(tmpl['transactions'][0]['txid'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 8) + + self.nodes[0].generate(1) # Mine a block to clear the gbt cache + + self.log.info("Non-segwit miners are able to use GBT response after activation.") + # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) -> + # tx2 (segwit input, paying to a non-segwit output) -> + # tx3 (non-segwit input, paying to a non-segwit output). + # tx1 is allowed to appear in the block, but no others. + txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) + hex_tx = self.nodes[0].gettransaction(txid)['hex'] + tx = FromHex(CTransaction(), hex_tx) + assert(tx.wit.is_null()) # This should not be a segwit input + assert(txid1 in self.nodes[0].getrawmempool()) + + # Now create tx2, which will spend from txid1. + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) + tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) + tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] + txid2 = self.nodes[0].sendrawtransaction(tx2_hex) + tx = FromHex(CTransaction(), tx2_hex) + assert(not tx.wit.is_null()) + + # Now create tx3, which will spend from txid2 + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) + tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee + tx.calc_sha256() + txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) + assert(tx.wit.is_null()) + assert(txid3 in self.nodes[0].getrawmempool()) + + # Now try calling getblocktemplate() without segwit support. + template = self.nodes[0].getblocktemplate() + + # Check that tx1 is the only transaction of the 3 in the template. + template_txids = [ t['txid'] for t in template['transactions'] ] + assert(txid2 not in template_txids and txid3 not in template_txids) + assert(txid1 in template_txids) + + # Check that running with segwit support results in all 3 being included. + template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) + template_txids = [ t['txid'] for t in template['transactions'] ] + assert(txid1 in template_txids) + assert(txid2 in template_txids) + assert(txid3 in template_txids) + + # Check that wtxid is properly reported in mempool entry + assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) + + # Mine a block to clear the gbt cache again. + self.nodes[0].generate(1) + + self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent") + + # Some public keys to be used later + pubkeys = [ + "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb + "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97 + "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV + "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd + "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66 + "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K + "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ + ] + + # Import a compressed key and an uncompressed key, generate some multisig addresses + self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn") + uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] + self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") + compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] + assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) + assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) + + self.nodes[0].importpubkey(pubkeys[0]) + compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] + self.nodes[0].importpubkey(pubkeys[1]) + compressed_solvable_address.append(key_to_p2pkh(pubkeys[1])) + self.nodes[0].importpubkey(pubkeys[2]) + uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])] + + spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress + spendable_after_importaddress = [] # These outputs should be seen after importaddress + solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable + unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress + solvable_anytime = [] # These outputs should be solvable after importpubkey + unseen_anytime = [] # These outputs should never be seen + + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) + compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address']) + unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"] + + # Test multisig_without_privkey + # We have 2 public keys without private keys, use addmultisigaddress to add to wallet. + # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. + + multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address'] + script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) + solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL])) + + for i in compressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # bare and p2sh multisig with compressed keys should always be spendable + spendable_anytime.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress + spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with compressed keys should always be spendable + spendable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress + spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + # P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable + spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in uncompressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # bare and p2sh multisig with uncompressed keys should always be spendable + spendable_anytime.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with uncompressed keys should always be spendable + spendable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress + spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) + # Witness output types with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + for i in compressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + # Multisig without private is not seen after addmultisigaddress, but seen after importaddress + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen + solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh]) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress + solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + for i in uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress + solvable_after_importaddress.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with uncompressed keys should always be seen + solvable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress + solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) + # Witness output types with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + op1 = CScript([OP_1]) + op0 = CScript([OP_0]) + # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V + unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)] + unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") + unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG]) + unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)]) + p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL]) + p2wshop1 = CScript([OP_0, sha256(op1)]) + unsolvable_after_importaddress.append(unsolvablep2pkh) + unsolvable_after_importaddress.append(unsolvablep2wshp2pkh) + unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script + unsolvable_after_importaddress.append(p2wshop1) + unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided + unsolvable_after_importaddress.append(p2shop0) + + spendable_txid = [] + solvable_txid = [] + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1)) + self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0) + + importlist = [] + for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + bare = hex_str_to_bytes(v['hex']) + importlist.append(bytes_to_hex_str(bare)) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) + else: + pubkey = hex_str_to_bytes(v['pubkey']) + p2pk = CScript([pubkey, OP_CHECKSIG]) + p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) + importlist.append(bytes_to_hex_str(p2pk)) + importlist.append(bytes_to_hex_str(p2pkh)) + importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)]))) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)]))) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)]))) + + importlist.append(bytes_to_hex_str(unsolvablep2pkh)) + importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh)) + importlist.append(bytes_to_hex_str(op1)) + importlist.append(bytes_to_hex_str(p2wshop1)) + + for i in importlist: + # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC + # exceptions and continue. + try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True) + + self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only + self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey + + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) + self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # addwitnessaddress should refuse to return a witness address if an uncompressed key is used + # note that no witness address should be returned by unsolvable addresses + for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address: + assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) + + # addwitnessaddress should return a witness addresses even if keys are not in the wallet + self.nodes[0].addwitnessaddress(multisig_without_privkey_address) + + for i in compressed_spendable_address + compressed_solvable_address: + witaddress = self.nodes[0].addwitnessaddress(i) + # addwitnessaddress should return the same address if it is a known P2SH-witness address + assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) + + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) + self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # Repeat some tests. This time we don't add witness scripts with importaddress + # Import a compressed key and an uncompressed key, generate some multisig addresses + self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH") + uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"] + self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw") + compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"] + + self.nodes[0].importpubkey(pubkeys[5]) + compressed_solvable_address = [key_to_p2pkh(pubkeys[5])] + self.nodes[0].importpubkey(pubkeys[6]) + uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])] + + spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress + solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable + unseen_anytime = [] # These outputs should never be seen + solvable_anytime = [] # These outputs should be solvable after importpubkey + unseen_anytime = [] # These outputs should never be seen + + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) + compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) + + premature_witaddress = [] + + for i in compressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress + spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) + premature_witaddress.append(script_to_p2sh(p2wsh)) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2WPKH, P2SH_P2WPKH are always spendable + spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in uncompressed_spendable_address + uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in compressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + # P2WSH multisig without private key are seen after addwitnessaddress + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) + premature_witaddress.append(script_to_p2sh(p2wsh)) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable + solvable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + self.mine_and_test_listunspent(spendable_anytime, 2) + self.mine_and_test_listunspent(solvable_anytime, 1) + self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0) + + # addwitnessaddress should refuse to return a witness address if an uncompressed key is used + # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress + # premature_witaddress are not accepted until the script is added with addwitnessaddress first + for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress: + # This will raise an exception + assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) + + # after importaddress it should pass addwitnessaddress + v = self.nodes[0].validateaddress(compressed_solvable_address[1]) + self.nodes[0].importaddress(v['hex'],"",False,True) + for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress: + witaddress = self.nodes[0].addwitnessaddress(i) + assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) + + spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1)) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works + v1_addr = program_to_witness(1, [3,5]) + v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1}) + v1_decoded = self.nodes[1].decoderawtransaction(v1_tx) + assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr) + assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305") + + # Check that spendable outputs are really spendable + self.create_and_mine_tx_from_txids(spendable_txid) + + # import all the private keys so solvable addresses become spendable + self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb") + self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97") + self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV") + self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd") + self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66") + self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K") + self.create_and_mine_tx_from_txids(solvable_txid) + + # Test that importing native P2WPKH/P2WSH scripts works + for use_p2wsh in [False, True]: + if use_p2wsh: + scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a" + transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000" + else: + scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87" + transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000" + + self.nodes[1].importaddress(scriptPubKey, "", False) + rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex'] + rawtxfund = self.nodes[1].signrawtransaction(rawtxfund)["hex"] + txid = self.nodes[1].sendrawtransaction(rawtxfund) + + assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) + assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) + + # Assert it is properly saved + self.stop_node(1) + self.start_node(1) + assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) + assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) + + def mine_and_test_listunspent(self, script_list, ismine): + utxo = find_unspent(self.nodes[0], 50) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout']))) + for i in script_list: + tx.vout.append(CTxOut(10000000, i)) + tx.rehash() + signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] + txid = self.nodes[0].sendrawtransaction(signresults, True) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + watchcount = 0 + spendcount = 0 + for i in self.nodes[0].listunspent(): + if (i['txid'] == txid): + watchcount += 1 + if (i['spendable'] == True): + spendcount += 1 + if (ismine == 2): + assert_equal(spendcount, len(script_list)) + elif (ismine == 1): + assert_equal(watchcount, len(script_list)) + assert_equal(spendcount, 0) + else: + assert_equal(watchcount, 0) + return txid + + def p2sh_address_to_script(self,v): + bare = CScript(hex_str_to_bytes(v['hex'])) + p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + p2wsh = CScript([OP_0, sha256(bare)]) + p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) + return([bare, p2sh, p2wsh, p2sh_p2wsh]) + + def p2pkh_address_to_script(self,v): + pubkey = hex_str_to_bytes(v['pubkey']) + p2wpkh = CScript([OP_0, hash160(pubkey)]) + p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) + p2pk = CScript([pubkey, OP_CHECKSIG]) + p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL]) + p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL]) + p2wsh_p2pk = CScript([OP_0, sha256(p2pk)]) + p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)]) + p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL]) + p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) + return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] + + def create_and_mine_tx_from_txids(self, txids, success = True): + tx = CTransaction() + for i in txids: + txtmp = CTransaction() + txraw = self.nodes[0].getrawtransaction(i) + f = BytesIO(hex_str_to_bytes(txraw)) + txtmp.deserialize(f) + for j in range(len(txtmp.vout)): + tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j))) + tx.vout.append(CTxOut(0, CScript())) + tx.rehash() + signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] + self.nodes[0].sendrawtransaction(signresults, True) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + +if __name__ == '__main__': + SegWitTest().main() diff --git a/test/functional/feature_uacomment.py b/test/functional/feature_uacomment.py new file mode 100755 index 0000000000..0b2c64ab69 --- /dev/null +++ b/test/functional/feature_uacomment.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the -uacomment option.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class UacommentTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + self.log.info("test multiple -uacomment") + test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1] + assert_equal(test_uacomment, "(testnode0)") + + self.restart_node(0, ["-uacomment=foo"]) + foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1] + assert_equal(foo_uacomment, "(testnode0; foo)") + + self.log.info("test -uacomment max length") + self.stop_node(0) + expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments." + self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected) + + self.log.info("test -uacomment unsafe characters") + for unsafe_char in ['/', ':', '(', ')']: + expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters" + self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected) + +if __name__ == '__main__': + UacommentTest().main() diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py new file mode 100755 index 0000000000..0bfe94622f --- /dev/null +++ b/test/functional/feature_versionbits_warning.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test version bits warning system. + +Generate chains with block versions that appear to be signalling unknown +soft-forks, and test that warning alerts are generated. +""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import re +from test_framework.blocktools import create_block, create_coinbase + +VB_PERIOD = 144 # versionbits period length for regtest +VB_THRESHOLD = 108 # versionbits activation threshold for regtest +VB_TOP_BITS = 0x20000000 +VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment + +WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" +WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) +VB_PATTERN = re.compile("^Warning.*versionbit") + +class TestNode(P2PInterface): + def on_inv(self, message): + pass + +class VersionBitsWarningTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def setup_network(self): + self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") + # Open and close to create zero-length file + with open(self.alert_filename, 'w', encoding='utf8') as _: + pass + self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] + self.setup_nodes() + + # Send numblocks blocks via peer with nVersionToUse set. + def send_blocks_with_version(self, peer, numblocks, nVersionToUse): + tip = self.nodes[0].getbestblockhash() + height = self.nodes[0].getblockcount() + block_time = self.nodes[0].getblockheader(tip)["time"]+1 + tip = int(tip, 16) + + for _ in range(numblocks): + block = create_block(tip, create_coinbase(height+1), block_time) + block.nVersion = nVersionToUse + block.solve() + peer.send_message(msg_block(block)) + block_time += 1 + height += 1 + tip = block.sha256 + peer.sync_with_ping() + + def test_versionbits_in_alert_file(self): + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text = f.read() + assert(VB_PATTERN.match(alert_text)) + + def run_test(self): + # Setup the p2p connection and start up the network thread. + self.nodes[0].add_p2p_connection(TestNode()) + + network_thread_start() + + # Test logic begins here + self.nodes[0].p2p.wait_for_verack() + + # 1. Have the node mine one period worth of blocks + self.nodes[0].generate(VB_PERIOD) + + # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD + # blocks signaling some unknown bit. + nVersion = VB_TOP_BITS | (1<= VB_THRESHOLD blocks signaling + # some unknown bit + self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) + self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) + # Might not get a versionbits-related alert yet, as we should + # have gotten a different alert due to more than 51/100 blocks + # being of unexpected version. + # Check that get*info() shows some kind of error. + assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"]) + assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"]) + + # Mine a period worth of expected blocks so the generic block-version warning + # is cleared, and restart the node. This should move the versionbit state + # to ACTIVE. + self.nodes[0].generate(VB_PERIOD) + self.stop_nodes() + # Empty out the alert file + with open(self.alert_filename, 'w', encoding='utf8') as _: + pass + self.start_nodes() + + # Connecting one block should be enough to generate an error. + self.nodes[0].generate(1) + assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["warnings"]) + assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"]) + self.stop_nodes() + self.test_versionbits_in_alert_file() + + # Test framework expects the node to still be running... + self.start_nodes() + +if __name__ == '__main__': + VersionBitsWarningTest().main() diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py deleted file mode 100755 index 45336ee801..0000000000 --- a/test/functional/maxuploadtarget.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test behavior of -maxuploadtarget. - -* Verify that getdata requests for old blocks (>1week) are dropped -if uploadtarget has been reached. -* Verify that getdata requests for recent blocks are respecteved even -if uploadtarget has been reached. -* Verify that the upload counters are reset after 24 hours. -""" -from collections import defaultdict -import time - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class TestNode(P2PInterface): - def __init__(self): - super().__init__() - self.block_receive_map = defaultdict(int) - - def on_inv(self, message): - pass - - def on_block(self, message): - message.block.calc_sha256() - self.block_receive_map[message.block.sha256] += 1 - -class MaxUploadTest(BitcoinTestFramework): - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-maxuploadtarget=800", "-blockmaxsize=999000"]] - - # Cache for utxos, as the listunspent may take a long time later in the test - self.utxo_cache = [] - - def run_test(self): - # Before we connect anything, we first set the time on the node - # to be in the past, otherwise things break because the CNode - # time counters can't be reset backward after initialization - old_time = int(time.time() - 2*60*60*24*7) - self.nodes[0].setmocktime(old_time) - - # Generate some old blocks - self.nodes[0].generate(130) - - # p2p_conns[0] will only request old blocks - # p2p_conns[1] will only request new blocks - # p2p_conns[2] will test resetting the counters - p2p_conns = [] - - for _ in range(3): - p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) - - network_thread_start() - for p2pc in p2p_conns: - p2pc.wait_for_verack() - - # Test logic begins here - - # Now mine a big block - mine_large_block(self.nodes[0], self.utxo_cache) - - # Store the hash; we'll request this later - big_old_block = self.nodes[0].getbestblockhash() - old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] - big_old_block = int(big_old_block, 16) - - # Advance to two days ago - self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) - - # Mine one more block, so that the prior block looks old - mine_large_block(self.nodes[0], self.utxo_cache) - - # We'll be requesting this new block too - big_new_block = self.nodes[0].getbestblockhash() - big_new_block = int(big_new_block, 16) - - # p2p_conns[0] will test what happens if we just keep requesting the - # the same big old block too many times (expect: disconnect) - - getdata_request = msg_getdata() - getdata_request.inv.append(CInv(2, big_old_block)) - - max_bytes_per_day = 800*1024*1024 - daily_buffer = 144 * 4000000 - max_bytes_available = max_bytes_per_day - daily_buffer - success_count = max_bytes_available // old_block_size - - # 576MB will be reserved for relaying new blocks, so expect this to - # succeed for ~235 tries. - for i in range(success_count): - p2p_conns[0].send_message(getdata_request) - p2p_conns[0].sync_with_ping() - assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) - - assert_equal(len(self.nodes[0].getpeerinfo()), 3) - # At most a couple more tries should succeed (depending on how long - # the test has been running so far). - for i in range(3): - p2p_conns[0].send_message(getdata_request) - p2p_conns[0].wait_for_disconnect() - assert_equal(len(self.nodes[0].getpeerinfo()), 2) - self.log.info("Peer 0 disconnected after downloading old block too many times") - - # Requesting the current block on p2p_conns[1] should succeed indefinitely, - # even when over the max upload target. - # We'll try 800 times - getdata_request.inv = [CInv(2, big_new_block)] - for i in range(800): - p2p_conns[1].send_message(getdata_request) - p2p_conns[1].sync_with_ping() - assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) - - self.log.info("Peer 1 able to repeatedly download new block") - - # But if p2p_conns[1] tries for an old block, it gets disconnected too. - getdata_request.inv = [CInv(2, big_old_block)] - p2p_conns[1].send_message(getdata_request) - p2p_conns[1].wait_for_disconnect() - assert_equal(len(self.nodes[0].getpeerinfo()), 1) - - self.log.info("Peer 1 disconnected after trying to download old block") - - self.log.info("Advancing system time on node to clear counters...") - - # If we advance the time by 24 hours, then the counters should reset, - # and p2p_conns[2] should be able to retrieve the old block. - self.nodes[0].setmocktime(int(time.time())) - p2p_conns[2].sync_with_ping() - p2p_conns[2].send_message(getdata_request) - p2p_conns[2].sync_with_ping() - assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) - - self.log.info("Peer 2 able to download old block") - - self.nodes[0].disconnect_p2ps() - - #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 - self.log.info("Restarting nodes with -whitelist=127.0.0.1") - self.stop_node(0) - self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) - - # Reconnect to self.nodes[0] - self.nodes[0].add_p2p_connection(TestNode()) - - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - #retrieve 20 blocks which should be enough to break the 1MB limit - getdata_request.inv = [CInv(2, big_new_block)] - for i in range(20): - self.nodes[0].p2p.send_message(getdata_request) - self.nodes[0].p2p.sync_with_ping() - assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) - - getdata_request.inv = [CInv(2, big_old_block)] - self.nodes[0].p2p.send_and_ping(getdata_request) - assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist - - self.log.info("Peer still connected after trying to download old block (whitelisted)") - -if __name__ == '__main__': - MaxUploadTest().main() diff --git a/test/functional/minchainwork.py b/test/functional/minchainwork.py deleted file mode 100755 index 90a3de0e0d..0000000000 --- a/test/functional/minchainwork.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for setting nMinimumChainWork on command line. - -Nodes don't consider themselves out of "initial block download" until -their active chain has more work than nMinimumChainWork. - -Nodes don't download blocks from a peer unless the peer's best known block -has more work than nMinimumChainWork. - -While in initial block download, nodes won't relay blocks to their peers, so -test that this parameter functions as intended by verifying that block relay -only succeeds past a given node once its nMinimumChainWork has been exceeded. -""" - -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import connect_nodes, assert_equal - -# 2 hashes required per regtest block (with no difficulty adjustment) -REGTEST_WORK_PER_BLOCK = 2 - -class MinimumChainWorkTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] - self.node_min_work = [0, 101, 101] - - def setup_network(self): - # This test relies on the chain setup being: - # node0 <- node1 <- node2 - # Before leaving IBD, nodes prefer to download blocks from outbound - # peers, so ensure that we're mining on an outbound peer and testing - # block relay to inbound peers. - self.setup_nodes() - for i in range(self.num_nodes-1): - connect_nodes(self.nodes[i+1], i) - - def run_test(self): - # Start building a chain on node0. node2 shouldn't be able to sync until node1's - # minchainwork is exceeded - starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work - self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) - - starting_blockcount = self.nodes[2].getblockcount() - - num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) - self.log.info("Generating %d blocks on node0", num_blocks_to_generate) - hashes = self.nodes[0].generate(num_blocks_to_generate) - - self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) - - # Sleep a few seconds and verify that node2 didn't get any new blocks - # or headers. We sleep, rather than sync_blocks(node0, node1) because - # it's reasonable either way for node1 to get the blocks, or not get - # them (since they're below node1's minchainwork). - time.sleep(3) - - self.log.info("Verifying node 2 has no more blocks than before") - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) - # Node2 shouldn't have any new headers yet, because node1 should not - # have relayed anything. - assert_equal(len(self.nodes[2].getchaintips()), 1) - assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) - - assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() - assert_equal(self.nodes[2].getblockcount(), starting_blockcount) - - self.log.info("Generating one more block") - self.nodes[0].generate(1) - - self.log.info("Verifying nodes are all synced") - - # Because nodes in regtest are all manual connections (eg using - # addnode), node1 should not have disconnected node0. If not for that, - # we'd expect node1 to have disconnected node0 for serving an - # insufficient work chain, in which case we'd need to reconnect them to - # continue the test. - - self.sync_all() - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) - -if __name__ == '__main__': - MinimumChainWorkTest().main() diff --git a/test/functional/notifications.py b/test/functional/notifications.py deleted file mode 100755 index 980bef5fc8..0000000000 --- a/test/functional/notifications.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the -alertnotify, -blocknotify and -walletnotify options.""" -import os - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, wait_until, connect_nodes_bi - -class NotificationsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.setup_clean_chain = True - - def setup_network(self): - self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") - self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") - self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt") - - # -alertnotify and -blocknotify on node0, walletnotify on node1 - self.extra_args = [["-blockversion=2", - "-alertnotify=echo %%s >> %s" % self.alert_filename, - "-blocknotify=echo %%s >> %s" % self.block_filename], - ["-blockversion=211", - "-rescan", - "-walletnotify=echo %%s >> %s" % self.tx_filename]] - super().setup_network() - - def run_test(self): - self.log.info("test -blocknotify") - block_count = 10 - blocks = self.nodes[1].generate(block_count) - - # wait at most 10 seconds for expected file size before reading the content - wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated blocks hashes - with open(self.block_filename, 'r') as f: - assert_equal(sorted(blocks), sorted(f.read().splitlines())) - - self.log.info("test -walletnotify") - # wait at most 10 seconds for expected file size before reading the content - wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) - with open(self.tx_filename, 'r') as f: - assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) - os.remove(self.tx_filename) - - self.log.info("test -walletnotify after rescan") - # restart node to rescan to force wallet notifications - self.restart_node(1) - connect_nodes_bi(self.nodes, 0, 1) - - wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) - with open(self.tx_filename, 'r') as f: - assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) - - # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st. - self.log.info("test -alertnotify") - self.nodes[1].generate(41) - self.sync_all() - - # Give bitcoind 10 seconds to write the alert notification - wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) - - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text = f.read() - - # Mine more up-version blocks, should not get more alerts: - self.nodes[1].generate(2) - self.sync_all() - - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text2 = f.read() - - self.log.info("-alertnotify should not continue notifying for more unknown version blocks") - assert_equal(alert_text, alert_text2) - -if __name__ == '__main__': - NotificationsTest().main() diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py deleted file mode 100755 index e4f413cc2a..0000000000 --- a/test/functional/nulldummy.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test NULLDUMMY softfork. - -Connect to a single node. -Generate 2 blocks (save the coinbases for later). -Generate 427 more blocks. -[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. -[Policy] Check that non-NULLDUMMY transactions are rejected before activation. -[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. -[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment -from test_framework.script import CScript -from io import BytesIO -import time - -NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" - -def trueDummy(tx): - scriptSig = CScript(tx.vin[0].scriptSig) - newscript = [] - for i in scriptSig: - if (len(newscript) == 0): - assert(len(i) == 0) - newscript.append(b'\x51') - else: - newscript.append(i) - tx.vin[0].scriptSig = CScript(newscript) - tx.rehash() - -class NULLDUMMYTest(BitcoinTestFramework): - - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through - # normal segwit activation here (and don't use the default always-on behaviour). - self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]] - - def run_test(self): - self.address = self.nodes[0].getnewaddress() - self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])['address'] - self.wit_address = self.nodes[0].addwitnessaddress(self.address) - self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address'] - - network_thread_start() - self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 - coinbase_txid = [] - for i in self.coinbase_blocks: - coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) - self.nodes[0].generate(427) # Block 429 - self.lastblockhash = self.nodes[0].getbestblockhash() - self.tip = int("0x" + self.lastblockhash, 0) - self.lastblockheight = 429 - self.lastblocktime = int(time.time()) + 429 - - self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") - test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] - txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True) - test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) - txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True) - test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) - txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True) - self.block_submit(self.nodes[0], test1txs, False, True) - - self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") - test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) - trueDummy(test2tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) - - self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") - self.block_submit(self.nodes[0], [test2tx], False, True) - - self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") - test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46) - test6txs=[CTransaction(test4tx)] - trueDummy(test4tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True) - self.block_submit(self.nodes[0], [test4tx]) - - self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") - test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) - test6txs.append(CTransaction(test5tx)) - test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True) - self.block_submit(self.nodes[0], [test5tx], True) - - self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]") - for i in test6txs: - self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True) - self.block_submit(self.nodes[0], test6txs, True, True) - - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - - def block_submit(self, node, txs, witness = False, accept = False): - block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) - block.nVersion = 4 - for tx in txs: - tx.rehash() - block.vtx.append(tx) - block.hashMerkleRoot = block.calc_merkle_root() - witness and add_witness_commitment(block) - block.rehash() - block.solve() - node.submitblock(bytes_to_hex_str(block.serialize(True))) - if (accept): - assert_equal(node.getbestblockhash(), block.hash) - self.tip = block.sha256 - self.lastblockhash = block.hash - self.lastblocktime += 1 - self.lastblockheight += 1 - else: - assert_equal(node.getbestblockhash(), self.lastblockhash) - -if __name__ == '__main__': - NULLDUMMYTest().main() diff --git a/test/functional/p2p-fullblocktest.py b/test/functional/p2p-fullblocktest.py deleted file mode 100755 index fe9bbda14b..0000000000 --- a/test/functional/p2p-fullblocktest.py +++ /dev/null @@ -1,1293 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test block processing. - -This reimplements tests from the bitcoinj/FullBlockTestGenerator used -by the pull-tester. - -We use the testing framework in which we expect a particular answer from -each test. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * -import time -from test_framework.key import CECKey -from test_framework.script import * -from test_framework.mininode import network_thread_start -import struct - -class PreviousSpendableOutput(): - def __init__(self, tx = CTransaction(), n = -1): - self.tx = tx - self.n = n # the output we're spending - -# Use this class for tests that require behavior other than normal "mininode" behavior. -# For now, it is used to serialize a bloated varint (b64). -class CBrokenBlock(CBlock): - def __init__(self, header=None): - super(CBrokenBlock, self).__init__(header) - - def initialize(self, base_block): - self.vtx = copy.deepcopy(base_block.vtx) - self.hashMerkleRoot = self.calc_merkle_root() - - def serialize(self, with_witness=False): - r = b"" - r += super(CBlock, self).serialize() - r += struct.pack(" b1 (0) -> b2 (1) - block(1, spend=out[0]) - save_spendable_output() - yield accepted() - - block(2, spend=out[1]) - yield accepted() - save_spendable_output() - - # so fork like this: - # - # genesis -> b1 (0) -> b2 (1) - # \-> b3 (1) - # - # Nothing should happen at this point. We saw b2 first so it takes priority. - tip(1) - b3 = block(3, spend=out[1]) - txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) - yield rejected() - - - # Now we add another block to make the alternative chain longer. - # - # genesis -> b1 (0) -> b2 (1) - # \-> b3 (1) -> b4 (2) - block(4, spend=out[2]) - yield accepted() - - - # ... and back to the first chain. - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b3 (1) -> b4 (2) - tip(2) - block(5, spend=out[2]) - save_spendable_output() - yield rejected() - - block(6, spend=out[3]) - yield accepted() - - # Try to create a fork that double-spends - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b7 (2) -> b8 (4) - # \-> b3 (1) -> b4 (2) - tip(5) - block(7, spend=out[2]) - yield rejected() - - block(8, spend=out[4]) - yield rejected() - - # Try to create a block that has too much fee - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b9 (4) - # \-> b3 (1) -> b4 (2) - tip(6) - block(9, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - # Create a fork that ends in a block with too much fee (the one that causes the reorg) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b10 (3) -> b11 (4) - # \-> b3 (1) -> b4 (2) - tip(5) - block(10, spend=out[3]) - yield rejected() - - block(11, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - - # Try again, but with a valid fork first - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b14 (5) - # (b12 added last) - # \-> b3 (1) -> b4 (2) - tip(5) - b12 = block(12, spend=out[3]) - save_spendable_output() - b13 = block(13, spend=out[4]) - # Deliver the block header for b12, and the block b13. - # b13 should be accepted but the tip won't advance until b12 is delivered. - yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) - - save_spendable_output() - # b14 is invalid, but the node won't know that until it tries to connect - # Tip still can't advance because b12 is missing - block(14, spend=out[5], additional_coinbase_value=1) - yield rejected() - - yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. - - # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) - # \-> b3 (1) -> b4 (2) - - # Test that a block with a lot of checksigs is okay - lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) - tip(13) - block(15, spend=out[5], script=lots_of_checksigs) - yield accepted() - save_spendable_output() - - - # Test that a block with too many checksigs is rejected - too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) - block(16, spend=out[6], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # Attempt to spend a transaction created on a different fork - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) - # \-> b3 (1) -> b4 (2) - tip(15) - block(17, spend=txout_b3) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Attempt to spend a transaction created on a different fork (on a fork this time) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) - # \-> b18 (b3.vtx[1]) -> b19 (6) - # \-> b3 (1) -> b4 (2) - tip(13) - block(18, spend=txout_b3) - yield rejected() - - block(19, spend=out[6]) - yield rejected() - - # Attempt to spend a coinbase at depth too low - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) - # \-> b3 (1) -> b4 (2) - tip(15) - block(20, spend=out[7]) - yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) - - # Attempt to spend a coinbase at depth too low (on a fork this time) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) - # \-> b21 (6) -> b22 (5) - # \-> b3 (1) -> b4 (2) - tip(13) - block(21, spend=out[6]) - yield rejected() - - block(22, spend=out[5]) - yield rejected() - - # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) - # \-> b24 (6) -> b25 (7) - # \-> b3 (1) -> b4 (2) - tip(15) - b23 = block(23, spend=out[6]) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) - b23 = update_block(23, [tx]) - # Make sure the math above worked out to produce a max-sized block - assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) - yield accepted() - save_spendable_output() - - # Make the next block one byte bigger and check that it fails - tip(15) - b24 = block(24, spend=out[6]) - script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 - script_output = CScript([b'\x00' * (script_length+1)]) - tx.vout = [CTxOut(0, script_output)] - b24 = update_block(24, [tx]) - assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) - yield rejected(RejectResult(16, b'bad-blk-length')) - - block(25, spend=out[7]) - yield rejected() - - # Create blocks with a coinbase input script size out of range - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) - # \-> ... (6) -> ... (7) - # \-> b3 (1) -> b4 (2) - tip(15) - b26 = block(26, spend=out[6]) - b26.vtx[0].vin[0].scriptSig = b'\x00' - b26.vtx[0].rehash() - # update_block causes the merkle root to get updated, even with no new - # transactions, and updates the required state. - b26 = update_block(26, []) - yield rejected(RejectResult(16, b'bad-cb-length')) - - # Extend the b26 chain to make sure bitcoind isn't accepting b26 - block(27, spend=out[7]) - yield rejected(False) - - # Now try a too-large-coinbase script - tip(15) - b28 = block(28, spend=out[6]) - b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 - b28.vtx[0].rehash() - b28 = update_block(28, []) - yield rejected(RejectResult(16, b'bad-cb-length')) - - # Extend the b28 chain to make sure bitcoind isn't accepting b28 - block(29, spend=out[7]) - yield rejected(False) - - # b30 has a max-sized coinbase scriptSig. - tip(23) - b30 = block(30) - b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 - b30.vtx[0].rehash() - b30 = update_block(30, []) - yield accepted() - save_spendable_output() - - # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY - # - # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) - # \-> b36 (11) - # \-> b34 (10) - # \-> b32 (9) - # - - # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. - lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - b31 = block(31, spend=out[8], script=lots_of_multisigs) - assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) - yield accepted() - save_spendable_output() - - # this goes over the limit because the coinbase has one sigop - too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) - b32 = block(32, spend=out[9], script=too_many_multisigs) - assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # CHECKMULTISIGVERIFY - tip(31) - lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - block(33, spend=out[9], script=lots_of_multisigs) - yield accepted() - save_spendable_output() - - too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) - block(34, spend=out[10], script=too_many_multisigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # CHECKSIGVERIFY - tip(33) - lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) - b35 = block(35, spend=out[10], script=lots_of_checksigs) - yield accepted() - save_spendable_output() - - too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) - block(36, spend=out[11], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # Check spending of a transaction in a block which failed to connect - # - # b6 (3) - # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) - # \-> b37 (11) - # \-> b38 (11/37) - # - - # save 37's spendable output, but then double-spend out11 to invalidate the block - tip(35) - b37 = block(37, spend=out[11]) - txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) - tx = create_and_sign_tx(out[11].tx, out[11].n, 0) - b37 = update_block(37, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid - tip(35) - block(38, spend=txout_b37) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Check P2SH SigOp counting - # - # - # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) - # \-> b40 (12) - # - # b39 - create some P2SH outputs that will require 6 sigops to spend: - # - # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG - # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL - # - tip(35) - b39 = block(39) - b39_outputs = 0 - b39_sigops_per_output = 6 - - # Build the redeem script, hash it, use hash to create the p2sh script - redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) - redeem_script_hash = hash160(redeem_script) - p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) - - # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE - # This must be signed because it is spending a coinbase - spend = out[11] - tx = create_tx(spend.tx, spend.n, 1, p2sh_script) - tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) - self.sign_tx(tx, spend.tx, spend.n) - tx.rehash() - b39 = update_block(39, [tx]) - b39_outputs += 1 - - # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE - tx_new = None - tx_last = tx - total_size=len(b39.serialize()) - while(total_size < MAX_BLOCK_BASE_SIZE): - tx_new = create_tx(tx_last, 1, 1, p2sh_script) - tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) - tx_new.rehash() - total_size += len(tx_new.serialize()) - if total_size >= MAX_BLOCK_BASE_SIZE: - break - b39.vtx.append(tx_new) # add tx to block - tx_last = tx_new - b39_outputs += 1 - - b39 = update_block(39, []) - yield accepted() - save_spendable_output() - - - # Test sigops in P2SH redeem scripts - # - # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. - # The first tx has one sigop and then at the end we add 2 more to put us just over the max. - # - # b41 does the same, less one, so it has the maximum sigops permitted. - # - tip(39) - b40 = block(40, spend=out[12]) - sigops = get_legacy_sigopcount_block(b40) - numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output - assert_equal(numTxes <= b39_outputs, True) - - lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) - new_txs = [] - for i in range(1, numTxes+1): - tx = CTransaction() - tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) - tx.vin.append(CTxIn(lastOutpoint, b'')) - # second input is corresponding P2SH output from b39 - tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) - # Note: must pass the redeem_script (not p2sh_script) to the signature hash function - (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) - sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) - scriptSig = CScript([sig, redeem_script]) - - tx.vin[1].scriptSig = scriptSig - tx.rehash() - new_txs.append(tx) - lastOutpoint = COutPoint(tx.sha256, 0) - - b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1 - tx = CTransaction() - tx.vin.append(CTxIn(lastOutpoint, b'')) - tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) - tx.rehash() - new_txs.append(tx) - update_block(40, new_txs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - # same as b40, but one less sigop - tip(39) - block(41, spend=None) - update_block(41, b40.vtx[1:-1]) - b41_sigops_to_fill = b40_sigops_to_fill - 1 - tx = CTransaction() - tx.vin.append(CTxIn(lastOutpoint, b'')) - tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) - tx.rehash() - update_block(41, [tx]) - yield accepted() - - # Fork off of b39 to create a constant base again - # - # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) - # \-> b41 (12) - # - tip(39) - block(42, spend=out[12]) - yield rejected() - save_spendable_output() - - block(43, spend=out[13]) - yield accepted() - save_spendable_output() - - - # Test a number of really invalid scenarios - # - # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) - # \-> ??? (15) - - # The next few blocks are going to be created "by hand" since they'll do funky things, such as having - # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. - height = self.block_heights[self.tip.sha256] + 1 - coinbase = create_coinbase(height, self.coinbase_pubkey) - b44 = CBlock() - b44.nTime = self.tip.nTime + 1 - b44.hashPrevBlock = self.tip.sha256 - b44.nBits = 0x207fffff - b44.vtx.append(coinbase) - b44.hashMerkleRoot = b44.calc_merkle_root() - b44.solve() - self.tip = b44 - self.block_heights[b44.sha256] = height - self.blocks[44] = b44 - yield accepted() - - # A block with a non-coinbase as the first tx - non_coinbase = create_tx(out[15].tx, out[15].n, 1) - b45 = CBlock() - b45.nTime = self.tip.nTime + 1 - b45.hashPrevBlock = self.tip.sha256 - b45.nBits = 0x207fffff - b45.vtx.append(non_coinbase) - b45.hashMerkleRoot = b45.calc_merkle_root() - b45.calc_sha256() - b45.solve() - self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 - self.tip = b45 - self.blocks[45] = b45 - yield rejected(RejectResult(16, b'bad-cb-missing')) - - # A block with no txns - tip(44) - b46 = CBlock() - b46.nTime = b44.nTime+1 - b46.hashPrevBlock = b44.sha256 - b46.nBits = 0x207fffff - b46.vtx = [] - b46.hashMerkleRoot = 0 - b46.solve() - self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 - self.tip = b46 - assert 46 not in self.blocks - self.blocks[46] = b46 - s = ser_uint256(b46.hashMerkleRoot) - yield rejected(RejectResult(16, b'bad-blk-length')) - - # A block with invalid work - tip(44) - b47 = block(47, solve=False) - target = uint256_from_compact(b47.nBits) - while b47.sha256 < target: #changed > to < - b47.nNonce += 1 - b47.rehash() - yield rejected(RejectResult(16, b'high-hash')) - - # A block with timestamp > 2 hrs in the future - tip(44) - b48 = block(48, solve=False) - b48.nTime = int(time.time()) + 60 * 60 * 3 - b48.solve() - yield rejected(RejectResult(16, b'time-too-new')) - - # A block with an invalid merkle hash - tip(44) - b49 = block(49) - b49.hashMerkleRoot += 1 - b49.solve() - yield rejected(RejectResult(16, b'bad-txnmrklroot')) - - # A block with an incorrect POW limit - tip(44) - b50 = block(50) - b50.nBits = b50.nBits - 1 - b50.solve() - yield rejected(RejectResult(16, b'bad-diffbits')) - - # A block with two coinbase txns - tip(44) - b51 = block(51) - cb2 = create_coinbase(51, self.coinbase_pubkey) - b51 = update_block(51, [cb2]) - yield rejected(RejectResult(16, b'bad-cb-multiple')) - - # A block w/ duplicate txns - # Note: txns have to be in the right position in the merkle tree to trigger this error - tip(44) - b52 = block(52, spend=out[15]) - tx = create_tx(b52.vtx[1], 0, 1) - b52 = update_block(52, [tx, tx]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - # Test block timestamps - # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) - # \-> b54 (15) - # - tip(43) - block(53, spend=out[14]) - yield rejected() # rejected since b44 is at same height - save_spendable_output() - - # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) - b54 = block(54, spend=out[15]) - b54.nTime = b35.nTime - 1 - b54.solve() - yield rejected(RejectResult(16, b'time-too-old')) - - # valid timestamp - tip(53) - b55 = block(55, spend=out[15]) - b55.nTime = b35.nTime - update_block(55, []) - yield accepted() - save_spendable_output() - - - # Test CVE-2012-2459 - # - # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) - # \-> b57 (16) - # \-> b56p2 (16) - # \-> b56 (16) - # - # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without - # affecting the merkle root of a block, while still invalidating it. - # See: src/consensus/merkle.h - # - # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. - # Result: OK - # - # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle - # root but duplicate transactions. - # Result: Fails - # - # b57p2 has six transactions in its merkle tree: - # - coinbase, tx, tx1, tx2, tx3, tx4 - # Merkle root calculation will duplicate as necessary. - # Result: OK. - # - # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches - # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates - # that the error was caught early, avoiding a DOS vulnerability.) - - # b57 - a good block with 2 txs, don't submit until end - tip(55) - b57 = block(57) - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - b57 = update_block(57, [tx, tx1]) - - # b56 - copy b57, add a duplicate tx - tip(55) - b56 = copy.deepcopy(b57) - self.blocks[56] = b56 - assert_equal(len(b56.vtx),3) - b56 = update_block(56, [tx1]) - assert_equal(b56.hash, b57.hash) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - # b57p2 - a good block with 6 tx'es, don't submit until end - tip(55) - b57p2 = block("57p2") - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - tx2 = create_tx(tx1, 0, 1) - tx3 = create_tx(tx2, 0, 1) - tx4 = create_tx(tx3, 0, 1) - b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) - - # b56p2 - copy b57p2, duplicate two non-consecutive tx's - tip(55) - b56p2 = copy.deepcopy(b57p2) - self.blocks["b56p2"] = b56p2 - assert_equal(b56p2.hash, b57p2.hash) - assert_equal(len(b56p2.vtx),6) - b56p2 = update_block("b56p2", [tx3, tx4]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - tip("57p2") - yield accepted() - - tip(57) - yield rejected() #rejected because 57p2 seen first - save_spendable_output() - - # Test a few invalid tx types - # - # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> ??? (17) - # - - # tx with prevout.n out of range - tip(57) - b58 = block(58, spend=out[17]) - tx = CTransaction() - assert(len(out[17].tx.vout) < 42) - tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) - tx.vout.append(CTxOut(0, b"")) - tx.calc_sha256() - b58 = update_block(58, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # tx with output value > input value out of range - tip(57) - b59 = block(59) - tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) - b59 = update_block(59, [tx]) - yield rejected(RejectResult(16, b'bad-txns-in-belowout')) - - # reset to good chain - tip(57) - b60 = block(60, spend=out[17]) - yield accepted() - save_spendable_output() - - # Test BIP30 - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b61 (18) - # - # Blocks are not allowed to contain a transaction whose id matches that of an earlier, - # not-fully-spent transaction in the same chain. To test, make identical coinbases; - # the second one should be rejected. - # - tip(60) - b61 = block(61, spend=out[18]) - b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases - b61.vtx[0].rehash() - b61 = update_block(61, []) - assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) - yield rejected(RejectResult(16, b'bad-txns-BIP30')) - - - # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b62 (18) - # - tip(60) - b62 = block(62) - tx = CTransaction() - tx.nLockTime = 0xffffffff #this locktime is non-final - assert(out[18].n < len(out[18].tx.vout)) - tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence - tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) - assert(tx.vin[0].nSequence < 0xffffffff) - tx.calc_sha256() - b62 = update_block(62, [tx]) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - - - # Test a non-final coinbase is also rejected - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b63 (-) - # - tip(60) - b63 = block(63) - b63.vtx[0].nLockTime = 0xffffffff - b63.vtx[0].vin[0].nSequence = 0xDEADBEEF - b63.vtx[0].rehash() - b63 = update_block(63, []) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - - - # This checks that a block with a bloated VARINT between the block_header and the array of tx such that - # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, - # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not - # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. - # - # What matters is that the receiving node should not reject the bloated block, and then reject the canonical - # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) - # \ - # b64a (18) - # b64a is a bloated block (non-canonical varint) - # b64 is a good block (same as b64 but w/ canonical varint) - # - tip(60) - regular_block = block("64a", spend=out[18]) - - # make it a "broken_block," with non-canonical serialization - b64a = CBrokenBlock(regular_block) - b64a.initialize(regular_block) - self.blocks["64a"] = b64a - self.tip = b64a - tx = CTransaction() - - # use canonical serialization to calculate size - script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) - b64a = update_block("64a", [tx]) - assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) - yield TestInstance([[self.tip, None]]) - - # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore - self.test.block_store.erase(b64a.sha256) - - tip(60) - b64 = CBlock(b64a) - b64.vtx = copy.deepcopy(b64a.vtx) - assert_equal(b64.hash, b64a.hash) - assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) - self.blocks[64] = b64 - update_block(64, []) - yield accepted() - save_spendable_output() - - # Spend an output created in the block itself - # - # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # - tip(64) - block(65) - tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 0) - update_block(65, [tx1, tx2]) - yield accepted() - save_spendable_output() - - # Attempt to spend an output created later in the same block - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # \-> b66 (20) - tip(65) - block(66) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - update_block(66, [tx2, tx1]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Attempt to double-spend a transaction created in a block - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # \-> b67 (20) - # - # - tip(65) - block(67) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - tx3 = create_and_sign_tx(tx1, 0, 2) - update_block(67, [tx1, tx2, tx3]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # More tests of block subsidy - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) - # \-> b68 (20) - # - # b68 - coinbase with an extra 10 satoshis, - # creates a tx that has 9 satoshis from out[20] go to fees - # this fails because the coinbase is trying to claim 1 satoshi too much in fees - # - # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee - # this succeeds - # - tip(65) - block(68, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) - update_block(68, [tx]) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - tip(65) - b69 = block(69, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) - update_block(69, [tx]) - yield accepted() - save_spendable_output() - - # Test spending the outpoint of a non-existent transaction - # - # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) - # \-> b70 (21) - # - tip(69) - block(70, spend=out[21]) - bogus_tx = CTransaction() - bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) - tx.vout.append(CTxOut(1, b"")) - update_block(70, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - - # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) - # - # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) - # \-> b71 (21) - # - # b72 is a good block. - # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. - # - tip(69) - b72 = block(72) - tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) - tx2 = create_and_sign_tx(tx1, 0, 1) - b72 = update_block(72, [tx1, tx2]) # now tip is 72 - b71 = copy.deepcopy(b72) - b71.vtx.append(tx2) # add duplicate tx2 - self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 - self.blocks[71] = b71 - - assert_equal(len(b71.vtx), 4) - assert_equal(len(b72.vtx), 3) - assert_equal(b72.sha256, b71.sha256) - - tip(71) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - tip(72) - yield accepted() - save_spendable_output() - - - # Test some invalid scripts and MAX_BLOCK_SIGOPS - # - # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) - # \-> b** (22) - # - - # b73 - tx with excessive sigops that are placed after an excessively large script element. - # The purpose of the test is to make sure those sigops are counted. - # - # script is a bytearray of size 20,526 - # - # bytearray[0-19,998] : OP_CHECKSIG - # bytearray[19,999] : OP_PUSHDATA4 - # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) - # bytearray[20,004-20,525]: unread data (script_element) - # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) - # - tip(72) - b73 = block(73) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 - - element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 - a[MAX_BLOCK_SIGOPS] = element_size % 256 - a[MAX_BLOCK_SIGOPS+1] = element_size // 256 - a[MAX_BLOCK_SIGOPS+2] = 0 - a[MAX_BLOCK_SIGOPS+3] = 0 - - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b73 = update_block(73, [tx]) - assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - # b74/75 - if we push an invalid script element, all prevous sigops are counted, - # but sigops after the element are not counted. - # - # The invalid script element is that the push_data indicates that - # there will be a large amount of data (0xffffff bytes), but we only - # provide a much smaller number. These bytes are CHECKSIGS so they would - # cause b75 to fail for excessive sigops, if those bytes were counted. - # - # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element - # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element - # - # - tip(72) - b74 = block(74) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS] = 0x4e - a[MAX_BLOCK_SIGOPS+1] = 0xfe - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - a[MAX_BLOCK_SIGOPS+4] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b74 = update_block(74, [tx]) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - tip(72) - b75 = block(75) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e - a[MAX_BLOCK_SIGOPS] = 0xff - a[MAX_BLOCK_SIGOPS+1] = 0xff - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b75 = update_block(75, [tx]) - yield accepted() - save_spendable_output() - - # Check that if we push an element filled with CHECKSIGs, they are not counted - tip(75) - b76 = block(76) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs - tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) - b76 = update_block(76, [tx]) - yield accepted() - save_spendable_output() - - # Test transaction resurrection - # - # -> b77 (24) -> b78 (25) -> b79 (26) - # \-> b80 (25) -> b81 (26) -> b82 (27) - # - # b78 creates a tx, which is spent in b79. After b82, both should be in mempool - # - # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the - # rather obscure reason that the Python signature code does not distinguish between - # Low-S and High-S values (whereas the bitcoin code has custom code which does so); - # as a result of which, the odds are 50% that the python code will use the right - # value and the transaction will be accepted into the mempool. Until we modify the - # test framework to support low-S signing, we are out of luck. - # - # To get around this issue, we construct transactions which are not signed and which - # spend to OP_TRUE. If the standard-ness rules change, this test would need to be - # updated. (Perhaps to spend to a P2SH OP_TRUE script) - # - tip(76) - block(77) - tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) - update_block(77, [tx77]) - yield accepted() - save_spendable_output() - - block(78) - tx78 = create_tx(tx77, 0, 9*COIN) - update_block(78, [tx78]) - yield accepted() - - block(79) - tx79 = create_tx(tx78, 0, 8*COIN) - update_block(79, [tx79]) - yield accepted() - - # mempool should be empty - assert_equal(len(self.nodes[0].getrawmempool()), 0) - - tip(77) - block(80, spend=out[25]) - yield rejected() - save_spendable_output() - - block(81, spend=out[26]) - yield rejected() # other chain is same length - save_spendable_output() - - block(82, spend=out[27]) - yield accepted() # now this chain is longer, triggers re-org - save_spendable_output() - - # now check that tx78 and tx79 have been put back into the peer's mempool - mempool = self.nodes[0].getrawmempool() - assert_equal(len(mempool), 2) - assert(tx78.hash in mempool) - assert(tx79.hash in mempool) - - - # Test invalid opcodes in dead execution paths. - # - # -> b81 (26) -> b82 (27) -> b83 (28) - # - block(83) - op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] - script = CScript(op_codes) - tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) - - tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) - tx2.vin[0].scriptSig = CScript([OP_FALSE]) - tx2.rehash() - - update_block(83, [tx1, tx2]) - yield accepted() - save_spendable_output() - - - # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) - # - # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) - # \-> b85 (29) -> b86 (30) \-> b89a (32) - # - # - block(84) - tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.calc_sha256() - self.sign_tx(tx1, out[29].tx, out[29].n) - tx1.rehash() - tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) - tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) - tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) - tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) - - update_block(84, [tx1,tx2,tx3,tx4,tx5]) - yield accepted() - save_spendable_output() - - tip(83) - block(85, spend=out[29]) - yield rejected() - - block(86, spend=out[30]) - yield accepted() - - tip(84) - block(87, spend=out[30]) - yield rejected() - save_spendable_output() - - block(88, spend=out[31]) - yield accepted() - save_spendable_output() - - # trying to spend the OP_RETURN output is rejected - block("89a", spend=out[32]) - tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) - update_block("89a", [tx]) - yield rejected() - - - # Test re-org of a week's worth of blocks (1088 blocks) - # This test takes a minute or two and can be accomplished in memory - # - if self.options.runbarelyexpensive: - tip(88) - LARGE_REORG_SIZE = 1088 - test1 = TestInstance(sync_every_block=False) - spend=out[32] - for i in range(89, LARGE_REORG_SIZE + 89): - b = block(i, spend) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) - b = update_block(i, [tx]) - assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) - test1.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - spend = get_spendable_output() - - yield test1 - chain1_tip = i - - # now create alt chain of same length - tip(88) - test2 = TestInstance(sync_every_block=False) - for i in range(89, LARGE_REORG_SIZE + 89): - block("alt"+str(i)) - test2.blocks_and_transactions.append([self.tip, False]) - yield test2 - - # extend alt chain to trigger re-org - block("alt" + str(chain1_tip + 1)) - yield accepted() - - # ... and re-org back to the first chain - tip(chain1_tip) - block(chain1_tip + 1) - yield rejected() - block(chain1_tip + 2) - yield accepted() - - chain1_tip += 2 - - - -if __name__ == '__main__': - FullBlockTest().main() diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py deleted file mode 100755 index 0bfe94622f..0000000000 --- a/test/functional/p2p-versionbits-warning.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test version bits warning system. - -Generate chains with block versions that appear to be signalling unknown -soft-forks, and test that warning alerts are generated. -""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import re -from test_framework.blocktools import create_block, create_coinbase - -VB_PERIOD = 144 # versionbits period length for regtest -VB_THRESHOLD = 108 # versionbits activation threshold for regtest -VB_TOP_BITS = 0x20000000 -VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment - -WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" -WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) -VB_PATTERN = re.compile("^Warning.*versionbit") - -class TestNode(P2PInterface): - def on_inv(self, message): - pass - -class VersionBitsWarningTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def setup_network(self): - self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") - # Open and close to create zero-length file - with open(self.alert_filename, 'w', encoding='utf8') as _: - pass - self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] - self.setup_nodes() - - # Send numblocks blocks via peer with nVersionToUse set. - def send_blocks_with_version(self, peer, numblocks, nVersionToUse): - tip = self.nodes[0].getbestblockhash() - height = self.nodes[0].getblockcount() - block_time = self.nodes[0].getblockheader(tip)["time"]+1 - tip = int(tip, 16) - - for _ in range(numblocks): - block = create_block(tip, create_coinbase(height+1), block_time) - block.nVersion = nVersionToUse - block.solve() - peer.send_message(msg_block(block)) - block_time += 1 - height += 1 - tip = block.sha256 - peer.sync_with_ping() - - def test_versionbits_in_alert_file(self): - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text = f.read() - assert(VB_PATTERN.match(alert_text)) - - def run_test(self): - # Setup the p2p connection and start up the network thread. - self.nodes[0].add_p2p_connection(TestNode()) - - network_thread_start() - - # Test logic begins here - self.nodes[0].p2p.wait_for_verack() - - # 1. Have the node mine one period worth of blocks - self.nodes[0].generate(VB_PERIOD) - - # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD - # blocks signaling some unknown bit. - nVersion = VB_TOP_BITS | (1<= VB_THRESHOLD blocks signaling - # some unknown bit - self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) - self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) - # Might not get a versionbits-related alert yet, as we should - # have gotten a different alert due to more than 51/100 blocks - # being of unexpected version. - # Check that get*info() shows some kind of error. - assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"]) - assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"]) - - # Mine a period worth of expected blocks so the generic block-version warning - # is cleared, and restart the node. This should move the versionbit state - # to ACTIVE. - self.nodes[0].generate(VB_PERIOD) - self.stop_nodes() - # Empty out the alert file - with open(self.alert_filename, 'w', encoding='utf8') as _: - pass - self.start_nodes() - - # Connecting one block should be enough to generate an error. - self.nodes[0].generate(1) - assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["warnings"]) - assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"]) - self.stop_nodes() - self.test_versionbits_in_alert_file() - - # Test framework expects the node to still be running... - self.start_nodes() - -if __name__ == '__main__': - VersionBitsWarningTest().main() diff --git a/test/functional/proxy_test.py b/test/functional/proxy_test.py deleted file mode 100755 index 2eb1be47a5..0000000000 --- a/test/functional/proxy_test.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test bitcoind with different proxy configuration. - -Test plan: -- Start bitcoind's with different proxy configurations -- Use addnode to initiate connections -- Verify that proxies are connected to, and the right connection command is given -- Proxy configurations to test on bitcoind side: - - `-proxy` (proxy everything) - - `-onion` (proxy just onions) - - `-proxyrandomize` Circuit randomization -- Proxy configurations to test on proxy side, - - support no authentication (other proxy) - - support no authentication + user/pass authentication (Tor) - - proxy on IPv6 - -- Create various proxies (as threads) -- Create bitcoinds that connect to them -- Manipulate the bitcoinds using addnode (onetry) an observe effects - -addnode connect to IPv4 -addnode connect to IPv6 -addnode connect to onion -addnode connect to generic DNS name -""" - -import socket -import os - -from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - PORT_MIN, - PORT_RANGE, - assert_equal, -) -from test_framework.netutil import test_ipv6_local - -RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports - -class ProxyTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - - def setup_nodes(self): - self.have_ipv6 = test_ipv6_local() - # Create two proxies on different ports - # ... one unauthenticated - self.conf1 = Socks5Configuration() - self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) - self.conf1.unauth = True - self.conf1.auth = False - # ... one supporting authenticated and unauthenticated (Tor) - self.conf2 = Socks5Configuration() - self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) - self.conf2.unauth = True - self.conf2.auth = True - if self.have_ipv6: - # ... one on IPv6 with similar configuration - self.conf3 = Socks5Configuration() - self.conf3.af = socket.AF_INET6 - self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) - self.conf3.unauth = True - self.conf3.auth = True - else: - self.log.warning("Testing without local IPv6 support") - - self.serv1 = Socks5Server(self.conf1) - self.serv1.start() - self.serv2 = Socks5Server(self.conf2) - self.serv2.start() - if self.have_ipv6: - self.serv3 = Socks5Server(self.conf3) - self.serv3.start() - - # Note: proxies are not used to connect to local nodes - # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost - args = [ - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], - ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], - [] - ] - if self.have_ipv6: - args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] - self.add_nodes(self.num_nodes, extra_args=args) - self.start_nodes() - - def node_test(self, node, proxies, auth, test_onion=True): - rv = [] - # Test: outgoing IPv4 connection through node - node.addnode("15.61.23.23:1234", "onetry") - cmd = proxies[0].queue.get() - assert(isinstance(cmd, Socks5Command)) - # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"15.61.23.23") - assert_equal(cmd.port, 1234) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - if self.have_ipv6: - # Test: outgoing IPv6 connection through node - node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") - cmd = proxies[1].queue.get() - assert(isinstance(cmd, Socks5Command)) - # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") - assert_equal(cmd.port, 5443) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - if test_onion: - # Test: outgoing onion connection through node - node.addnode("bitcoinostk4e4re.onion:8333", "onetry") - cmd = proxies[2].queue.get() - assert(isinstance(cmd, Socks5Command)) - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") - assert_equal(cmd.port, 8333) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - # Test: outgoing DNS name connection through node - node.addnode("node.noumenon:8333", "onetry") - cmd = proxies[3].queue.get() - assert(isinstance(cmd, Socks5Command)) - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"node.noumenon") - assert_equal(cmd.port, 8333) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - return rv - - def run_test(self): - # basic -proxy - self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) - - # -proxy plus -onion - self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) - - # -proxy plus -onion, -proxyrandomize - rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) - # Check that credentials as used for -proxyrandomize connections are unique - credentials = set((x.username,x.password) for x in rv) - assert_equal(len(credentials), len(rv)) - - if self.have_ipv6: - # proxy on IPv6 localhost - self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) - - def networks_dict(d): - r = {} - for x in d['networks']: - r[x['name']] = x - return r - - # test RPC getnetworkinfo - n0 = networks_dict(self.nodes[0].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: - assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) - assert_equal(n0[net]['proxy_randomize_credentials'], True) - assert_equal(n0['onion']['reachable'], True) - - n1 = networks_dict(self.nodes[1].getnetworkinfo()) - for net in ['ipv4','ipv6']: - assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) - assert_equal(n1[net]['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) - assert_equal(n1['onion']['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['reachable'], True) - - n2 = networks_dict(self.nodes[2].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: - assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) - assert_equal(n2[net]['proxy_randomize_credentials'], True) - assert_equal(n2['onion']['reachable'], True) - - if self.have_ipv6: - n3 = networks_dict(self.nodes[3].getnetworkinfo()) - for net in ['ipv4','ipv6']: - assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) - assert_equal(n3[net]['proxy_randomize_credentials'], False) - assert_equal(n3['onion']['reachable'], False) - -if __name__ == '__main__': - ProxyTest().main() - diff --git a/test/functional/pruning.py b/test/functional/pruning.py deleted file mode 100755 index 49ad7f838c..0000000000 --- a/test/functional/pruning.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the pruning code. - -WARNING: -This test uses 4GB of disk space. -This test takes 30 mins or more (up to 2 hours) -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import time -import os - -MIN_BLOCKS_TO_KEEP = 288 - -# Rescans start at the earliest block up to 2 hours before a key timestamp, so -# the manual prune RPC avoids pruning blocks in the same window to be -# compatible with pruning based on key creation time. -TIMESTAMP_WINDOW = 2 * 60 * 60 - - -def calc_usage(blockdir): - return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) - -class PruneTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 6 - - # Create nodes 0 and 1 to mine. - # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] - # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) - # Create nodes 5 to test wallet in prune mode, but do not connect - self.extra_args = [self.full_node_default_args, - self.full_node_default_args, - ["-maxreceivebuffer=20000", "-prune=550"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], - ["-prune=550"]] - - def setup_network(self): - self.setup_nodes() - - self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" - - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[2], 0) - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[0], 4) - sync_blocks(self.nodes[0:5]) - - def setup_nodes(self): - self.add_nodes(self.num_nodes, self.extra_args, timewait=900) - self.start_nodes() - - def create_big_chain(self): - # Start by creating some coinbases we can spend later - self.nodes[1].generate(200) - sync_blocks(self.nodes[0:2]) - self.nodes[0].generate(150) - # Then mine enough full blocks to create more than 550MiB of data - for i in range(645): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - sync_blocks(self.nodes[0:5]) - - def test_height_min(self): - if not os.path.isfile(self.prunedir+"blk00000.dat"): - raise AssertionError("blk00000.dat is missing, pruning too early") - self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) - self.log.info("Mining 25 more blocks should cause the first block file to be pruned") - # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - waitstart = time.time() - while os.path.isfile(self.prunedir+"blk00000.dat"): - time.sleep(0.1) - if time.time() - waitstart > 30: - raise AssertionError("blk00000.dat not pruned when it should be") - - self.log.info("Success") - usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): - raise AssertionError("Pruning target not being met") - - def create_chain_with_staleblocks(self): - # Create stale blocks in manageable sized chunks - self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") - - for j in range(12): - # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain - # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects - # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine - self.stop_node(0) - self.start_node(0, extra_args=self.full_node_default_args) - # Mine 24 blocks in node 1 - for i in range(24): - if j == 0: - mine_large_block(self.nodes[1], self.utxo_cache_1) - else: - # Add node1's wallet transactions back to the mempool, to - # avoid the mined blocks from being too small. - self.nodes[1].resendwallettransactions() - self.nodes[1].generate(1) #tx's already in mempool from previous disconnects - - # Reorg back with 25 block chain from node 0 - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - # Create connections in the order so both nodes can see the reorg at the same time - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[2], 0) - sync_blocks(self.nodes[0:3]) - - self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) - - def reorg_test(self): - # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip - # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain - # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) - # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) - - height = self.nodes[1].getblockcount() - self.log.info("Current block height: %d" % height) - - invalidheight = height-287 - badhash = self.nodes[1].getblockhash(invalidheight) - self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) - self.nodes[1].invalidateblock(badhash) - - # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want - # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) - mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) - curhash = self.nodes[1].getblockhash(invalidheight - 1) - while curhash != mainchainhash: - self.nodes[1].invalidateblock(curhash) - curhash = self.nodes[1].getblockhash(invalidheight - 1) - - assert(self.nodes[1].getblockcount() == invalidheight - 1) - self.log.info("New best height: %d" % self.nodes[1].getblockcount()) - - # Reboot node1 to clear those giant tx's from mempool - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) - - self.log.info("Generating new longer chain of 300 more blocks") - self.nodes[1].generate(300) - - self.log.info("Reconnect nodes") - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[2], 1) - sync_blocks(self.nodes[0:3], timeout=120) - - self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) - self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) - - self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") - - # Get node0's wallet transactions back in its mempool, to avoid the - # mined blocks from being too small. - self.nodes[0].resendwallettransactions() - - for i in range(22): - # This can be slow, so do this in multiple RPC calls to avoid - # RPC timeouts. - self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects - sync_blocks(self.nodes[0:3], timeout=300) - - usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): - raise AssertionError("Pruning target not being met") - - return invalidheight,badhash - - def reorg_back(self): - # Verify that a block on the old main chain fork has been pruned away - assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - self.log.info("Will need to redownload block %d" % self.forkheight) - - # Verify that we have enough history to reorg back to the fork point - # Although this is more than 288 blocks, because this chain was written more recently - # and only its other 299 small and 220 large block are in the block files after it, - # its expected to still be retained - self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) - - first_reorg_height = self.nodes[2].getblockcount() - curchainhash = self.nodes[2].getblockhash(self.mainchainheight) - self.nodes[2].invalidateblock(curchainhash) - goalbestheight = self.mainchainheight - goalbesthash = self.mainchainhash2 - - # As of 0.10 the current block download logic is not able to reorg to the original chain created in - # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to - # redownload its missing blocks. - # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain - # because it has all the block data. - # However it must mine enough blocks to have a more work chain than the reorg_test chain in order - # to trigger node 2's block download logic. - # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg - if self.nodes[2].getblockcount() < self.mainchainheight: - blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) - self.nodes[0].invalidateblock(curchainhash) - assert(self.nodes[0].getblockcount() == self.mainchainheight) - assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) - goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] - goalbestheight = first_reorg_height + 1 - - self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") - waitstart = time.time() - while self.nodes[2].getblockcount() < goalbestheight: - time.sleep(0.1) - if time.time() - waitstart > 900: - raise AssertionError("Node 2 didn't reorg to proper height") - assert(self.nodes[2].getbestblockhash() == goalbesthash) - # Verify we can now have the data for a block previously pruned - assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) - - def manual_test(self, node_number, use_timestamp): - # at this point, node has 995 blocks and has not yet run in prune mode - self.start_node(node_number) - node = self.nodes[node_number] - assert_equal(node.getblockcount(), 995) - assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) - - # now re-start in manual pruning mode - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=1"]) - node = self.nodes[node_number] - assert_equal(node.getblockcount(), 995) - - def height(index): - if use_timestamp: - return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW - else: - return index - - def prune(index, expected_ret=None): - ret = node.pruneblockchain(height(index)) - # Check the return value. When use_timestamp is True, just check - # that the return value is less than or equal to the expected - # value, because when more than one block is generated per second, - # a timestamp will not be granular enough to uniquely identify an - # individual block. - if expected_ret is None: - expected_ret = index - if use_timestamp: - assert_greater_than(ret, 0) - assert_greater_than(expected_ret + 1, ret) - else: - assert_equal(ret, expected_ret) - - def has_block(index): - return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) - - # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) - assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) - - # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) - node.generate(6) - assert_equal(node.getblockchaininfo()["blocks"], 1001) - - # negative heights should raise an exception - assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) - - # height=100 too low to prune first block file so this is a no-op - prune(100) - if not has_block(0): - raise AssertionError("blk00000.dat is missing when should still be there") - - # Does nothing - node.pruneblockchain(height(0)) - if not has_block(0): - raise AssertionError("blk00000.dat is missing when should still be there") - - # height=500 should prune first file - prune(500) - if has_block(0): - raise AssertionError("blk00000.dat is still there, should be pruned by now") - if not has_block(1): - raise AssertionError("blk00001.dat is missing when should still be there") - - # height=650 should prune second file - prune(650) - if has_block(1): - raise AssertionError("blk00001.dat is still there, should be pruned by now") - - # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. - prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) - if not has_block(2): - raise AssertionError("blk00002.dat is still there, should be pruned by now") - - # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) - node.generate(288) - prune(1000) - if has_block(2): - raise AssertionError("blk00002.dat is still there, should be pruned by now") - if has_block(3): - raise AssertionError("blk00003.dat is still there, should be pruned by now") - - # stop node, start back up with auto-prune at 550MB, make sure still runs - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=550"]) - - self.log.info("Success") - - def wallet_test(self): - # check that the pruning node's wallet is still in good shape - self.log.info("Stop and start pruning node to trigger wallet rescan") - self.stop_node(2) - self.start_node(2, extra_args=["-prune=550"]) - self.log.info("Success") - - # check that wallet loads successfully when restarting a pruned node after IBD. - # this was reported to fail in #7494. - self.log.info("Syncing node 5 to test wallet") - connect_nodes(self.nodes[0], 5) - nds = [self.nodes[0], self.nodes[5]] - sync_blocks(nds, wait=5, timeout=300) - self.stop_node(5) #stop and start to trigger rescan - self.start_node(5, extra_args=["-prune=550"]) - self.log.info("Success") - - def run_test(self): - self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") - self.log.info("Mining a big blockchain of 995 blocks") - - # Determine default relay fee - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Cache for utxos, as the listunspent may take a long time later in the test - self.utxo_cache_0 = [] - self.utxo_cache_1 = [] - - self.create_big_chain() - # Chain diagram key: - # * blocks on main chain - # +,&,$,@ blocks on other forks - # X invalidated block - # N1 Node 1 - # - # Start by mining a simple chain that all nodes have - # N0=N1=N2 **...*(995) - - # stop manual-pruning node with 995 blocks - self.stop_node(3) - self.stop_node(4) - - self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") - self.test_height_min() - # Extend this chain past the PruneAfterHeight - # N0=N1=N2 **...*(1020) - - self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") - self.create_chain_with_staleblocks() - # Disconnect N0 - # And mine a 24 block chain on N1 and a separate 25 block chain on N0 - # N1=N2 **...*+...+(1044) - # N0 **...**...**(1045) - # - # reconnect nodes causing reorg on N1 and N2 - # N1=N2 **...*(1020) *...**(1045) - # \ - # +...+(1044) - # - # repeat this process until you have 12 stale forks hanging off the - # main chain on N1 and N2 - # N0 *************************...***************************(1320) - # - # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) - # \ \ \ - # +...+(1044) &.. $...$(1319) - - # Save some current chain state for later use - self.mainchainheight = self.nodes[2].getblockcount() #1320 - self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) - - self.log.info("Check that we can survive a 288 block reorg still") - (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) - # Now create a 288 block reorg by mining a longer chain on N1 - # First disconnect N1 - # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain - # N1 **...*(1020) **...**(1032)X.. - # \ - # ++...+(1031)X.. - # - # Now mine 300 more blocks on N1 - # N1 **...*(1020) **...**(1032) @@...@(1332) - # \ \ - # \ X... - # \ \ - # ++...+(1031)X.. .. - # - # Reconnect nodes and mine 220 more blocks on N1 - # N1 **...*(1020) **...**(1032) @@...@@@(1552) - # \ \ - # \ X... - # \ \ - # ++...+(1031)X.. .. - # - # N2 **...*(1020) **...**(1032) @@...@@@(1552) - # \ \ - # \ *...**(1320) - # \ \ - # ++...++(1044) .. - # - # N0 ********************(1032) @@...@@@(1552) - # \ - # *...**(1320) - - self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") - self.reorg_back() - # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) - # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to - # original main chain (*), but will require redownload of some blocks - # In order to have a peer we think we can download from, must also perform this invalidation - # on N0 and mine a new longest chain to trigger. - # Final result: - # N0 ********************(1032) **...****(1553) - # \ - # X@...@@@(1552) - # - # N2 **...*(1020) **...**(1032) **...****(1553) - # \ \ - # \ X@...@@@(1552) - # \ - # +.. - # - # N1 doesn't change because 1033 on main chain (*) is invalid - - self.log.info("Test manual pruning with block indices") - self.manual_test(3, use_timestamp=False) - - self.log.info("Test manual pruning with timestamps") - self.manual_test(4, use_timestamp=True) - - self.log.info("Test wallet re-scan") - self.wallet_test() - - self.log.info("Done") - -if __name__ == '__main__': - PruneTest().main() diff --git a/test/functional/reindex.py b/test/functional/reindex.py deleted file mode 100755 index ac67e6e9ba..0000000000 --- a/test/functional/reindex.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test running bitcoind with -reindex and -reindex-chainstate options. - -- Start a single node and generate 3 blocks. -- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. -- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal -import time - -class ReindexTest(BitcoinTestFramework): - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def reindex(self, justchainstate=False): - self.nodes[0].generate(3) - blockcount = self.nodes[0].getblockcount() - self.stop_nodes() - extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] - self.start_nodes(extra_args) - while self.nodes[0].getblockcount() < blockcount: - time.sleep(0.1) - assert_equal(self.nodes[0].getblockcount(), blockcount) - self.log.info("Success") - - def run_test(self): - self.reindex(False) - self.reindex(True) - self.reindex(False) - self.reindex(True) - -if __name__ == '__main__': - ReindexTest().main() diff --git a/test/functional/replace-by-fee.py b/test/functional/replace-by-fee.py deleted file mode 100755 index 6b7ab0f43e..0000000000 --- a/test/functional/replace-by-fee.py +++ /dev/null @@ -1,565 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the RBF code.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import * -from test_framework.mininode import * - -MAX_REPLACEMENT_LIMIT = 100 - -def txToHex(tx): - return bytes_to_hex_str(tx.serialize()) - -def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): - """Create a txout with a given amount and scriptPubKey - - Mines coins as needed. - - confirmed - txouts created will be confirmed in the blockchain; - unconfirmed otherwise. - """ - fee = 1*COIN - while node.getbalance() < satoshi_round((amount + fee)/COIN): - node.generate(100) - - new_addr = node.getnewaddress() - txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) - tx1 = node.getrawtransaction(txid, 1) - txid = int(txid, 16) - i = None - - for i, txout in enumerate(tx1['vout']): - if txout['scriptPubKey']['addresses'] == [new_addr]: - break - assert i is not None - - tx2 = CTransaction() - tx2.vin = [CTxIn(COutPoint(txid, i))] - tx2.vout = [CTxOut(amount, scriptPubKey)] - tx2.rehash() - - signed_tx = node.signrawtransaction(txToHex(tx2)) - - txid = node.sendrawtransaction(signed_tx['hex'], True) - - # If requested, ensure txouts are confirmed. - if confirmed: - mempool_size = len(node.getrawmempool()) - while mempool_size > 0: - node.generate(1) - new_size = len(node.getrawmempool()) - # Error out if we have something stuck in the mempool, as this - # would likely be a bug. - assert(new_size < mempool_size) - mempool_size = new_size - - return COutPoint(int(txid, 16), 0) - -class ReplaceByFeeTest(BitcoinTestFramework): - - def set_test_params(self): - self.num_nodes = 2 - self.extra_args= [["-maxorphantx=1000", - "-whitelist=127.0.0.1", - "-limitancestorcount=50", - "-limitancestorsize=101", - "-limitdescendantcount=200", - "-limitdescendantsize=101"], - ["-mempoolreplacement=0"]] - - def run_test(self): - # Leave IBD - self.nodes[0].generate(1) - - make_utxo(self.nodes[0], 1*COIN) - - # Ensure nodes are synced - self.sync_all() - - self.log.info("Running test simple doublespend...") - self.test_simple_doublespend() - - self.log.info("Running test doublespend chain...") - self.test_doublespend_chain() - - self.log.info("Running test doublespend tree...") - self.test_doublespend_tree() - - self.log.info("Running test replacement feeperkb...") - self.test_replacement_feeperkb() - - self.log.info("Running test spends of conflicting outputs...") - self.test_spends_of_conflicting_outputs() - - self.log.info("Running test new unconfirmed inputs...") - self.test_new_unconfirmed_inputs() - - self.log.info("Running test too many replacements...") - self.test_too_many_replacements() - - self.log.info("Running test opt-in...") - self.test_opt_in() - - self.log.info("Running test RPC...") - self.test_rpc() - - self.log.info("Running test prioritised transactions...") - self.test_prioritised_transactions() - - self.log.info("Passed") - - def test_simple_doublespend(self): - """Simple doublespend""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # make_utxo may have generated a bunch of blocks, so we need to sync - # before we can spend the coins generated, or else the resulting - # transactions might not be accepted by our peers. - self.sync_all() - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - self.sync_all() - - # Should fail because we haven't changed the fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - # This will raise an exception due to transaction replacement being disabled - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) - - # Extra 0.1 BTC fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - # Replacement still disabled even with "enough fee" - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) - # Works when enabled - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - - mempool = self.nodes[0].getrawmempool() - - assert (tx1a_txid not in mempool) - assert (tx1b_txid in mempool) - - assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) - - # Second node is running mempoolreplacement=0, will not replace originally-seen txn - mempool = self.nodes[1].getrawmempool() - assert tx1a_txid in mempool - assert tx1b_txid not in mempool - - def test_doublespend_chain(self): - """Doublespend of a long chain""" - - initial_nValue = 50*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - - prevout = tx0_outpoint - remaining_value = initial_nValue - chain_txids = [] - while remaining_value > 10*COIN: - remaining_value -= 1*COIN - tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] - tx.vout = [CTxOut(remaining_value, CScript([1]))] - tx_hex = txToHex(tx) - txid = self.nodes[0].sendrawtransaction(tx_hex, True) - chain_txids.append(txid) - prevout = COutPoint(int(txid, 16), 0) - - # Whether the double-spend is allowed is evaluated by including all - # child fees - 40 BTC - so this attempt is rejected. - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - # Accepted with sufficient fee - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - self.nodes[0].sendrawtransaction(dbl_tx_hex, True) - - mempool = self.nodes[0].getrawmempool() - for doublespent_txid in chain_txids: - assert(doublespent_txid not in mempool) - - def test_doublespend_tree(self): - """Doublespend of a big tree of transactions""" - - initial_nValue = 50*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - - def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): - if _total_txs is None: - _total_txs = [0] - if _total_txs[0] >= max_txs: - return - - txout_value = (initial_value - fee) // tree_width - if txout_value < fee: - return - - vout = [CTxOut(txout_value, CScript([i+1])) - for i in range(tree_width)] - tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] - tx.vout = vout - tx_hex = txToHex(tx) - - assert(len(tx.serialize()) < 100000) - txid = self.nodes[0].sendrawtransaction(tx_hex, True) - yield tx - _total_txs[0] += 1 - - txid = int(txid, 16) - - for i, txout in enumerate(tx.vout): - for x in branch(COutPoint(txid, i), txout_value, - max_txs, - tree_width=tree_width, fee=fee, - _total_txs=_total_txs): - yield x - - fee = int(0.0001*COIN) - n = MAX_REPLACEMENT_LIMIT - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) - assert_equal(len(tree_txs), n) - - # Attempt double-spend, will fail because too little fee paid - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - # 1 BTC fee is enough - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - self.nodes[0].sendrawtransaction(dbl_tx_hex, True) - - mempool = self.nodes[0].getrawmempool() - - for tx in tree_txs: - tx.rehash() - assert (tx.hash not in mempool) - - # Try again, but with more total transactions than the "max txs - # double-spent at once" anti-DoS limit. - for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): - fee = int(0.0001*COIN) - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) - assert_equal(len(tree_txs), n) - - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - # This will raise an exception - assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - for tx in tree_txs: - tx.rehash() - self.nodes[0].getrawtransaction(tx.hash) - - def test_replacement_feeperkb(self): - """Replacement requires fee-per-KB to be higher""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Higher fee, but the fee per KB is much lower, so the replacement is - # rejected. - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - def test_spends_of_conflicting_outputs(self): - """Replacements that spend conflicting tx outputs are rejected""" - utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) - utxo2 = make_utxo(self.nodes[0], 3*COIN) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(utxo1, nSequence=0)] - tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - tx1a_txid = int(tx1a_txid, 16) - - # Direct spend an output of the transaction we're replacing. - tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] - tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) - tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) - - # Spend tx1a's output to test the indirect case. - tx1b = CTransaction() - tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] - tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1b_hex = txToHex(tx1b) - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - tx1b_txid = int(tx1b_txid, 16) - - tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), - CTxIn(COutPoint(tx1b_txid, 0))] - tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) - - def test_new_unconfirmed_inputs(self): - """Replacements that add new unconfirmed inputs are rejected""" - confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) - unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) - - tx1 = CTransaction() - tx1.vin = [CTxIn(confirmed_utxo)] - tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1_hex = txToHex(tx1) - self.nodes[0].sendrawtransaction(tx1_hex, True) - - tx2 = CTransaction() - tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] - tx2.vout = tx1.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True) - - def test_too_many_replacements(self): - """Replacements that evict too many transactions are rejected""" - # Try directly replacing more than MAX_REPLACEMENT_LIMIT - # transactions - - # Start by creating a single transaction with many outputs - initial_nValue = 10*COIN - utxo = make_utxo(self.nodes[0], initial_nValue) - fee = int(0.0001*COIN) - split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) - - outputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): - outputs.append(CTxOut(split_value, CScript([1]))) - - splitting_tx = CTransaction() - splitting_tx.vin = [CTxIn(utxo, nSequence=0)] - splitting_tx.vout = outputs - splitting_tx_hex = txToHex(splitting_tx) - - txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) - txid = int(txid, 16) - - # Now spend each of those outputs individually - for i in range(MAX_REPLACEMENT_LIMIT+1): - tx_i = CTransaction() - tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] - tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] - tx_i_hex = txToHex(tx_i) - self.nodes[0].sendrawtransaction(tx_i_hex, True) - - # Now create doublespend of the whole lot; should fail. - # Need a big enough fee to cover all spending transactions and have - # a higher fee rate - double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) - inputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): - inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) - double_tx = CTransaction() - double_tx.vin = inputs - double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) - - # This will raise an exception - assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True) - - # If we remove an input, it should pass - double_tx = CTransaction() - double_tx.vin = inputs[0:-1] - double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) - self.nodes[0].sendrawtransaction(double_tx_hex, True) - - def test_opt_in(self): - """Replacing should only work if orig tx opted in""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # Create a non-opting in transaction - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Shouldn't be able to double-spend - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # Create a different non-opting in transaction - tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] - tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) - tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) - - # Still shouldn't be able to double-spend - tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx2b_hex = txToHex(tx2b) - - # This will raise an exception - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True) - - # Now create a new transaction that spends from tx1a and tx2a - # opt-in on one of the inputs - # Transaction should be replaceable on either input - - tx1a_txid = int(tx1a_txid, 16) - tx2a_txid = int(tx2a_txid, 16) - - tx3a = CTransaction() - tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), - CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] - tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] - tx3a_hex = txToHex(tx3a) - - self.nodes[0].sendrawtransaction(tx3a_hex, True) - - tx3b = CTransaction() - tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] - tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] - tx3b_hex = txToHex(tx3b) - - tx3c = CTransaction() - tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] - tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] - tx3c_hex = txToHex(tx3c) - - self.nodes[0].sendrawtransaction(tx3b_hex, True) - # If tx3b was accepted, tx3c won't look like a replacement, - # but make sure it is accepted anyway - self.nodes[0].sendrawtransaction(tx3c_hex, True) - - def test_prioritised_transactions(self): - # Ensure that fee deltas used via prioritisetransaction are - # correctly used by replacement logic - - # 1. Check that feeperkb uses modified fees - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Higher fee, but the actual fee per KB is much lower. - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] - tx1b_hex = txToHex(tx1b) - - # Verify tx1b cannot replace tx1a. - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - # Use prioritisetransaction to set tx1a's fee to 0. - self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) - - # Now tx1b should be able to replace tx1a - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - - assert(tx1b_txid in self.nodes[0].getrawmempool()) - - # 2. Check that absolute fee checks use modified fee. - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) - self.nodes[0].sendrawtransaction(tx2a_hex, True) - - # Lower fee, but we'll prioritise it - tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] - tx2b.rehash() - tx2b_hex = txToHex(tx2b) - - # Verify tx2b cannot replace tx2a. - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True) - - # Now prioritise tx2b to have a higher modified fee - self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) - - # tx2b should now be accepted - tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) - - assert(tx2b_txid in self.nodes[0].getrawmempool()) - - def test_rpc(self): - us0 = self.nodes[0].listunspent()[0] - ins = [us0] - outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)} - rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) - rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) - json0 = self.nodes[0].decoderawtransaction(rawtx0) - json1 = self.nodes[0].decoderawtransaction(rawtx1) - assert_equal(json0["vin"][0]["sequence"], 4294967293) - assert_equal(json1["vin"][0]["sequence"], 4294967295) - - rawtx2 = self.nodes[0].createrawtransaction([], outs) - frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) - frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) - - json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) - json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) - assert_equal(json0["vin"][0]["sequence"], 4294967293) - assert_equal(json1["vin"][0]["sequence"], 4294967294) - -if __name__ == '__main__': - ReplaceByFeeTest().main() diff --git a/test/functional/segwit.py b/test/functional/segwit.py deleted file mode 100755 index ba6373fa33..0000000000 --- a/test/functional/segwit.py +++ /dev/null @@ -1,638 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the SegWit changeover logic.""" - -from test_framework.address import ( - key_to_p2sh_p2wpkh, - key_to_p2wpkh, - program_to_witness, - script_to_p2sh_p2wsh, - script_to_p2wsh, -) -from test_framework.blocktools import witness_script, send_to_witness -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex -from test_framework.address import script_to_p2sh, key_to_p2pkh -from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE -from io import BytesIO - -NODE_0 = 0 -NODE_2 = 2 -WIT_V0 = 0 -WIT_V1 = 1 - -def getutxo(txid): - utxo = {} - utxo["vout"] = 0 - utxo["txid"] = txid - return utxo - -def find_unspent(node, min_value): - for utxo in node.listunspent(): - if utxo['amount'] >= min_value: - return utxo - -class SegWitTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. - self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], - ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], - ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] - - def setup_network(self): - super().setup_network() - connect_nodes(self.nodes[0], 2) - self.sync_all() - - def success_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - block = node.generate(1) - assert_equal(len(node.getblock(block[0])["tx"]), 2) - sync_blocks(self.nodes) - - def skip_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - block = node.generate(1) - assert_equal(len(node.getblock(block[0])["tx"]), 1) - sync_blocks(self.nodes) - - def fail_accept(self, node, error_msg, txid, sign, redeem_script=""): - assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - - def fail_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1) - sync_blocks(self.nodes) - - def run_test(self): - self.nodes[0].generate(161) #block 161 - - self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - tmpl = self.nodes[0].getblocktemplate({}) - assert(tmpl['sizelimit'] == 1000000) - assert('weightlimit' not in tmpl) - assert(tmpl['sigoplimit'] == 20000) - assert(tmpl['transactions'][0]['hash'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 2) - tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) - assert(tmpl['sizelimit'] == 1000000) - assert('weightlimit' not in tmpl) - assert(tmpl['sigoplimit'] == 20000) - assert(tmpl['transactions'][0]['hash'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 2) - self.nodes[0].generate(1) #block 162 - - balance_presetup = self.nodes[0].getbalance() - self.pubkey = [] - p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh - wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness - for i in range(3): - newaddress = self.nodes[i].getnewaddress() - self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) - multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) - p2sh_addr = self.nodes[i].addwitnessaddress(newaddress) - bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False) - p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address'] - bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address'] - assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1])) - assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1])) - assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript)) - assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) - p2sh_ids.append([]) - wit_ids.append([]) - for v in range(2): - p2sh_ids[i].append([]) - wit_ids[i].append([]) - - for i in range(5): - for n in range(3): - for v in range(2): - wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) - p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) - - self.nodes[0].generate(1) #block 163 - sync_blocks(self.nodes) - - # Make sure all nodes recognize the transactions as theirs - assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50) - assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) - assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) - - self.nodes[0].generate(260) #block 423 - sync_blocks(self.nodes) - - self.log.info("Verify default node can't accept any witness format txs before fork") - # unsigned, no scriptsig - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False) - # unsigned with redeem script - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0])) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0])) - # signed - self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True) - - self.log.info("Verify witness txs are skipped for mining before the fork") - self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424 - self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425 - self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426 - self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427 - - # TODO: An old node would see these txs without witnesses and be able to mine them - - self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork") - self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428 - self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429 - - self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False) - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False) - - self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork") - self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430 - self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431 - - self.log.info("Verify previous witness txs skipped for mining can now be mined") - assert_equal(len(self.nodes[2].getrawmempool()), 4) - block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) - sync_blocks(self.nodes) - assert_equal(len(self.nodes[2].getrawmempool()), 0) - segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] - assert_equal(len(segwit_tx_list), 5) - - self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag") - assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False)) - assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False)) - for i in range(len(segwit_tx_list)): - tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i])) - assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i])) - assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness())) - - self.log.info("Verify witness txs without witness data are invalid after the fork") - self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False) - self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False) - self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2])) - self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2])) - - self.log.info("Verify default node can now use witness txs") - self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432 - self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433 - self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434 - self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435 - - self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork") - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) - assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data - assert(tmpl['weightlimit'] == 4000000) - assert(tmpl['sigoplimit'] == 80000) - assert(tmpl['transactions'][0]['txid'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 8) - - self.nodes[0].generate(1) # Mine a block to clear the gbt cache - - self.log.info("Non-segwit miners are able to use GBT response after activation.") - # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) -> - # tx2 (segwit input, paying to a non-segwit output) -> - # tx3 (non-segwit input, paying to a non-segwit output). - # tx1 is allowed to appear in the block, but no others. - txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) - hex_tx = self.nodes[0].gettransaction(txid)['hex'] - tx = FromHex(CTransaction(), hex_tx) - assert(tx.wit.is_null()) # This should not be a segwit input - assert(txid1 in self.nodes[0].getrawmempool()) - - # Now create tx2, which will spend from txid1. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) - tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) - tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] - txid2 = self.nodes[0].sendrawtransaction(tx2_hex) - tx = FromHex(CTransaction(), tx2_hex) - assert(not tx.wit.is_null()) - - # Now create tx3, which will spend from txid2 - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) - tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee - tx.calc_sha256() - txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) - assert(tx.wit.is_null()) - assert(txid3 in self.nodes[0].getrawmempool()) - - # Now try calling getblocktemplate() without segwit support. - template = self.nodes[0].getblocktemplate() - - # Check that tx1 is the only transaction of the 3 in the template. - template_txids = [ t['txid'] for t in template['transactions'] ] - assert(txid2 not in template_txids and txid3 not in template_txids) - assert(txid1 in template_txids) - - # Check that running with segwit support results in all 3 being included. - template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) - template_txids = [ t['txid'] for t in template['transactions'] ] - assert(txid1 in template_txids) - assert(txid2 in template_txids) - assert(txid3 in template_txids) - - # Check that wtxid is properly reported in mempool entry - assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) - - # Mine a block to clear the gbt cache again. - self.nodes[0].generate(1) - - self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent") - - # Some public keys to be used later - pubkeys = [ - "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb - "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97 - "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV - "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd - "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66 - "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K - "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ - ] - - # Import a compressed key and an uncompressed key, generate some multisig addresses - self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn") - uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] - self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") - compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] - assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) - assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) - - self.nodes[0].importpubkey(pubkeys[0]) - compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] - self.nodes[0].importpubkey(pubkeys[1]) - compressed_solvable_address.append(key_to_p2pkh(pubkeys[1])) - self.nodes[0].importpubkey(pubkeys[2]) - uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])] - - spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress - spendable_after_importaddress = [] # These outputs should be seen after importaddress - solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable - unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress - solvable_anytime = [] # These outputs should be solvable after importpubkey - unseen_anytime = [] # These outputs should never be seen - - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) - compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address']) - unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"] - - # Test multisig_without_privkey - # We have 2 public keys without private keys, use addmultisigaddress to add to wallet. - # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. - - multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address'] - script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) - solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL])) - - for i in compressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # bare and p2sh multisig with compressed keys should always be spendable - spendable_anytime.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress - spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with compressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress - spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - # P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable - spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in uncompressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # bare and p2sh multisig with uncompressed keys should always be spendable - spendable_anytime.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with uncompressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress - spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) - # Witness output types with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - for i in compressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - # Multisig without private is not seen after addmultisigaddress, but seen after importaddress - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh]) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress - solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - for i in uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress - solvable_after_importaddress.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with uncompressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress - solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) - # Witness output types with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - op1 = CScript([OP_1]) - op0 = CScript([OP_0]) - # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V - unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)] - unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") - unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG]) - unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)]) - p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL]) - p2wshop1 = CScript([OP_0, sha256(op1)]) - unsolvable_after_importaddress.append(unsolvablep2pkh) - unsolvable_after_importaddress.append(unsolvablep2wshp2pkh) - unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script - unsolvable_after_importaddress.append(p2wshop1) - unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided - unsolvable_after_importaddress.append(p2shop0) - - spendable_txid = [] - solvable_txid = [] - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1)) - self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0) - - importlist = [] - for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - bare = hex_str_to_bytes(v['hex']) - importlist.append(bytes_to_hex_str(bare)) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) - else: - pubkey = hex_str_to_bytes(v['pubkey']) - p2pk = CScript([pubkey, OP_CHECKSIG]) - p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) - importlist.append(bytes_to_hex_str(p2pk)) - importlist.append(bytes_to_hex_str(p2pkh)) - importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)]))) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)]))) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)]))) - - importlist.append(bytes_to_hex_str(unsolvablep2pkh)) - importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh)) - importlist.append(bytes_to_hex_str(op1)) - importlist.append(bytes_to_hex_str(p2wshop1)) - - for i in importlist: - # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC - # exceptions and continue. - try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True) - - self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only - self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey - - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) - self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # addwitnessaddress should refuse to return a witness address if an uncompressed key is used - # note that no witness address should be returned by unsolvable addresses - for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address: - assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) - - # addwitnessaddress should return a witness addresses even if keys are not in the wallet - self.nodes[0].addwitnessaddress(multisig_without_privkey_address) - - for i in compressed_spendable_address + compressed_solvable_address: - witaddress = self.nodes[0].addwitnessaddress(i) - # addwitnessaddress should return the same address if it is a known P2SH-witness address - assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) - - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) - self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # Repeat some tests. This time we don't add witness scripts with importaddress - # Import a compressed key and an uncompressed key, generate some multisig addresses - self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH") - uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"] - self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw") - compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"] - - self.nodes[0].importpubkey(pubkeys[5]) - compressed_solvable_address = [key_to_p2pkh(pubkeys[5])] - self.nodes[0].importpubkey(pubkeys[6]) - uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])] - - spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress - solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable - unseen_anytime = [] # These outputs should never be seen - solvable_anytime = [] # These outputs should be solvable after importpubkey - unseen_anytime = [] # These outputs should never be seen - - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) - compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) - - premature_witaddress = [] - - for i in compressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress - spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) - premature_witaddress.append(script_to_p2sh(p2wsh)) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2WPKH, P2SH_P2WPKH are always spendable - spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in uncompressed_spendable_address + uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in compressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - # P2WSH multisig without private key are seen after addwitnessaddress - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) - premature_witaddress.append(script_to_p2sh(p2wsh)) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable - solvable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - self.mine_and_test_listunspent(spendable_anytime, 2) - self.mine_and_test_listunspent(solvable_anytime, 1) - self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0) - - # addwitnessaddress should refuse to return a witness address if an uncompressed key is used - # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress - # premature_witaddress are not accepted until the script is added with addwitnessaddress first - for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress: - # This will raise an exception - assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) - - # after importaddress it should pass addwitnessaddress - v = self.nodes[0].validateaddress(compressed_solvable_address[1]) - self.nodes[0].importaddress(v['hex'],"",False,True) - for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress: - witaddress = self.nodes[0].addwitnessaddress(i) - assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) - - spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1)) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works - v1_addr = program_to_witness(1, [3,5]) - v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1}) - v1_decoded = self.nodes[1].decoderawtransaction(v1_tx) - assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr) - assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305") - - # Check that spendable outputs are really spendable - self.create_and_mine_tx_from_txids(spendable_txid) - - # import all the private keys so solvable addresses become spendable - self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb") - self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97") - self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV") - self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd") - self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66") - self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K") - self.create_and_mine_tx_from_txids(solvable_txid) - - # Test that importing native P2WPKH/P2WSH scripts works - for use_p2wsh in [False, True]: - if use_p2wsh: - scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a" - transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000" - else: - scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87" - transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000" - - self.nodes[1].importaddress(scriptPubKey, "", False) - rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex'] - rawtxfund = self.nodes[1].signrawtransaction(rawtxfund)["hex"] - txid = self.nodes[1].sendrawtransaction(rawtxfund) - - assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) - assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) - - # Assert it is properly saved - self.stop_node(1) - self.start_node(1) - assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) - assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) - - def mine_and_test_listunspent(self, script_list, ismine): - utxo = find_unspent(self.nodes[0], 50) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout']))) - for i in script_list: - tx.vout.append(CTxOut(10000000, i)) - tx.rehash() - signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] - txid = self.nodes[0].sendrawtransaction(signresults, True) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - watchcount = 0 - spendcount = 0 - for i in self.nodes[0].listunspent(): - if (i['txid'] == txid): - watchcount += 1 - if (i['spendable'] == True): - spendcount += 1 - if (ismine == 2): - assert_equal(spendcount, len(script_list)) - elif (ismine == 1): - assert_equal(watchcount, len(script_list)) - assert_equal(spendcount, 0) - else: - assert_equal(watchcount, 0) - return txid - - def p2sh_address_to_script(self,v): - bare = CScript(hex_str_to_bytes(v['hex'])) - p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) - p2wsh = CScript([OP_0, sha256(bare)]) - p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) - return([bare, p2sh, p2wsh, p2sh_p2wsh]) - - def p2pkh_address_to_script(self,v): - pubkey = hex_str_to_bytes(v['pubkey']) - p2wpkh = CScript([OP_0, hash160(pubkey)]) - p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) - p2pk = CScript([pubkey, OP_CHECKSIG]) - p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) - p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL]) - p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL]) - p2wsh_p2pk = CScript([OP_0, sha256(p2pk)]) - p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)]) - p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL]) - p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) - return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] - - def create_and_mine_tx_from_txids(self, txids, success = True): - tx = CTransaction() - for i in txids: - txtmp = CTransaction() - txraw = self.nodes[0].getrawtransaction(i) - f = BytesIO(hex_str_to_bytes(txraw)) - txtmp.deserialize(f) - for j in range(len(txtmp.vout)): - tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j))) - tx.vout.append(CTxOut(0, CScript())) - tx.rehash() - signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] - self.nodes[0].sendrawtransaction(signresults, True) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - -if __name__ == '__main__': - SegWitTest().main() diff --git a/test/functional/smartfees.py b/test/functional/smartfees.py deleted file mode 100755 index 68453e50f4..0000000000 --- a/test/functional/smartfees.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test fee estimation code.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE -from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN - -# Construct 2 trivial P2SH's and the ScriptSigs that spend them -# So we can create many transactions without needing to spend -# time signing. -redeem_script_1 = CScript([OP_1, OP_DROP]) -redeem_script_2 = CScript([OP_2, OP_DROP]) -P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL]) -P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL]) - -# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 -SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] - -global log - -def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): - """ - Create and send a transaction with a random fee. - The transaction pays to a trivial P2SH script, and assumes that its inputs - are of the same form. - The function takes a list of confirmed outputs and unconfirmed outputs - and attempts to use the confirmed list first for its inputs. - It adds the newly created outputs to the unconfirmed list. - Returns (raw transaction, fee) - """ - # It's best to exponentially distribute our random fees - # because the buckets are exponentially spaced. - # Exponentially distributed from 1-128 * fee_increment - rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) - # Total fee ranges from min_fee to min_fee + 127*fee_increment - fee = min_fee - fee_increment + satoshi_round(rand_fee) - tx = CTransaction() - total_in = Decimal("0.00000000") - while total_in <= (amount + fee) and len(conflist) > 0: - t = conflist.pop(0) - total_in += t["amount"] - tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) - if total_in <= amount + fee: - while total_in <= (amount + fee) and len(unconflist) > 0: - t = unconflist.pop(0) - total_in += t["amount"] - tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) - if total_in <= amount + fee: - raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) - tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1)) - tx.vout.append(CTxOut(int(amount*COIN), P2SH_2)) - # These transactions don't need to be signed, but we still have to insert - # the ScriptSig that will satisfy the ScriptPubKey. - for inp in tx.vin: - inp.scriptSig = SCRIPT_SIG[inp.prevout.n] - txid = from_node.sendrawtransaction(ToHex(tx), True) - unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) - unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) - - return (ToHex(tx), fee) - -def split_inputs(from_node, txins, txouts, initial_split = False): - """ - We need to generate a lot of inputs so we can generate a ton of transactions. - This function takes an input from txins, and creates and sends a transaction - which splits the value into 2 outputs which are appended to txouts. - Previously this was designed to be small inputs so they wouldn't have - a high coin age when the notion of priority still existed. - """ - prevtxout = txins.pop() - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b"")) - - half_change = satoshi_round(prevtxout["amount"]/2) - rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") - tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1)) - tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2)) - - # If this is the initial split we actually need to sign the transaction - # Otherwise we just need to insert the proper ScriptSig - if (initial_split) : - completetx = from_node.signrawtransaction(ToHex(tx))["hex"] - else : - tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] - completetx = ToHex(tx) - txid = from_node.sendrawtransaction(completetx, True) - txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) - txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) - -def check_estimates(node, fees_seen, max_invalid, print_estimates = True): - """ - This function calls estimatefee and verifies that the estimates - meet certain invariants. - """ - all_estimates = [ node.estimatefee(i) for i in range(1,26) ] - if print_estimates: - log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) - delta = 1.0e-6 # account for rounding error - last_e = max(fees_seen) - for e in [x for x in all_estimates if x >= 0]: - # Estimates should be within the bounds of what transactions fees actually were: - if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): - raise AssertionError("Estimated fee (%f) out of range (%f,%f)" - %(float(e), min(fees_seen), max(fees_seen))) - # Estimates should be monotonically decreasing - if float(e)-delta > last_e: - raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" - %(float(e),float(last_e))) - last_e = e - valid_estimate = False - invalid_estimates = 0 - for i,e in enumerate(all_estimates): # estimate is for i+1 - if e >= 0: - valid_estimate = True - if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n) - assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta) - - else: - invalid_estimates += 1 - - # estimatesmartfee should still be valid - approx_estimate = node.estimatesmartfee(i+1)["feerate"] - answer_found = node.estimatesmartfee(i+1)["blocks"] - assert(approx_estimate > 0) - assert(answer_found > i+1) - - # Once we're at a high enough confirmation count that we can give an estimate - # We should have estimates for all higher confirmation counts - if valid_estimate: - raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") - - # Check on the expected number of different confirmation counts - # that we might not have valid estimates for - if invalid_estimates > max_invalid: - raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) - return all_estimates - - -class EstimateFeeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 3 - - def setup_network(self): - """ - We'll setup the network to have 3 nodes that all mine with different parameters. - But first we need to use one node to create a lot of outputs - which we will use to generate our transactions. - """ - self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], - ["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"], - ["-blockmaxsize=8000", "-maxorphantx=1000"]]) - # Use node0 to mine blocks for input splitting - # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, - # (17k is room enough for 110 or so transactions) - # Node2 is a stingy miner, that - # produces too small blocks (room for only 55 or so transactions) - - - def transact_and_mine(self, numblocks, mining_node): - min_fee = Decimal("0.00001") - # We will now mine numblocks blocks generating on average 100 transactions between each block - # We shuffle our confirmed txout set before each set of transactions - # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible - # resorting to tx's that depend on the mempool when those run out - for i in range(numblocks): - random.shuffle(self.confutxo) - for j in range(random.randrange(100-50,100+50)): - from_index = random.randint(1,2) - (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, - self.memutxo, Decimal("0.005"), min_fee, min_fee) - tx_kbytes = (len(txhex) // 2) / 1000.0 - self.fees_per_kb.append(float(fee)/tx_kbytes) - sync_mempools(self.nodes[0:3], wait=.1) - mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] - sync_blocks(self.nodes[0:3], wait=.1) - # update which txouts are confirmed - newmem = [] - for utx in self.memutxo: - if utx["txid"] in mined: - self.confutxo.append(utx) - else: - newmem.append(utx) - self.memutxo = newmem - - def run_test(self): - self.log.info("This test is time consuming, please be patient") - self.log.info("Splitting inputs so we can generate tx's") - - # Make log handler available to helper functions - global log - log = self.log - - # Start node0 - self.start_node(0) - self.txouts = [] - self.txouts2 = [] - # Split a coinbase into two transaction puzzle outputs - split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) - - # Mine - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - - # Repeatedly split those 2 outputs, doubling twice for each rep - # Use txouts to monitor the available utxo, since these won't be tracked in wallet - reps = 0 - while (reps < 5): - #Double txouts to txouts2 - while (len(self.txouts)>0): - split_inputs(self.nodes[0], self.txouts, self.txouts2) - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - #Double txouts2 to txouts - while (len(self.txouts2)>0): - split_inputs(self.nodes[0], self.txouts2, self.txouts) - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - reps += 1 - self.log.info("Finished splitting") - - # Now we can connect the other nodes, didn't want to connect them earlier - # so the estimates would not be affected by the splitting transactions - self.start_node(1) - self.start_node(2) - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[2], 1) - - self.sync_all() - - self.fees_per_kb = [] - self.memutxo = [] - self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting - self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") - - for i in range(2): - self.log.info("Creating transactions and mining them with a block size that can't keep up") - # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine - self.transact_and_mine(10, self.nodes[2]) - check_estimates(self.nodes[1], self.fees_per_kb, 14) - - self.log.info("Creating transactions and mining them at a block size that is just big enough") - # Generate transactions while mining 10 more blocks, this time with node1 - # which mines blocks with capacity just above the rate that transactions are being created - self.transact_and_mine(10, self.nodes[1]) - check_estimates(self.nodes[1], self.fees_per_kb, 2) - - # Finish by mining a normal-sized block: - while len(self.nodes[1].getrawmempool()) > 0: - self.nodes[1].generate(1) - - sync_blocks(self.nodes[0:3], wait=.1) - self.log.info("Final estimates after emptying mempools") - check_estimates(self.nodes[1], self.fees_per_kb, 2) - -if __name__ == '__main__': - EstimateFeeTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index b8b6ee98bf..6aad0f9b9d 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -58,10 +58,10 @@ BASE_SCRIPTS= [ 'wallet-hd.py', 'walletbackup.py', # vv Tests less than 5m vv - 'p2p-fullblocktest.py', + 'feature_block.py', 'fundrawtransaction.py', 'p2p-compactblocks.py', - 'segwit.py', + 'feature_segwit.py', # vv Tests less than 2m vv 'wallet.py', 'wallet-accounts.py', @@ -76,10 +76,10 @@ BASE_SCRIPTS= [ 'merkle_blocks.py', 'receivedby.py', 'abandonconflict.py', - 'bip68-112-113-p2p.py', + 'feature_csv_activation.py', 'rawtransactions.py', 'address_types.py', - 'reindex.py', + 'feature_reindex.py', # vv Tests less than 30s vv 'keypool-topup.py', 'zmq_test.py', @@ -97,7 +97,7 @@ BASE_SCRIPTS= [ 'multiwallet.py --usecli', 'httpbasics.py', 'multi_rpc.py', - 'proxy_test.py', + 'feature_proxy.py', 'signrawtransactions.py', 'disconnect_ban.py', 'decodescript.py', @@ -110,11 +110,11 @@ BASE_SCRIPTS= [ 'prioritise_transaction.py', 'invalidblockrequest.py', 'invalidtxrequest.py', - 'p2p-versionbits-warning.py', + 'feature_versionbits_warning.py', 'preciousblock.py', 'importprunedfunds.py', 'signmessages.py', - 'nulldummy.py', + 'feature_nulldummy.py', 'import-rescan.py', 'mining.py', 'bumpfee.py', @@ -122,17 +122,17 @@ BASE_SCRIPTS= [ 'listsinceblock.py', 'p2p-leaktests.py', 'wallet-encryption.py', - 'bipdersig-p2p.py', - 'bip65-cltv-p2p.py', + 'feature_dersig.py', + 'feature_cltv.py', 'uptime.py', 'resendwallettransactions.py', - 'minchainwork.py', + 'feature_minchainwork.py', 'p2p-fingerprint.py', - 'uacomment.py', + 'feature_uacomment.py', 'p2p-acceptblock.py', 'feature_logging.py', 'node_network_limited.py', - 'conf_args.py', + 'feature_config_args.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] @@ -140,29 +140,29 @@ BASE_SCRIPTS= [ EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel - 'pruning.py', + 'feature_pruning.py', # vv Tests less than 20m vv - 'smartfees.py', + 'feature_fee_estimation.py', # vv Tests less than 5m vv - 'maxuploadtarget.py', + 'feature_maxuploadtarget.py', 'mempool_packages.py', - 'dbcrash.py', + 'feature_dbcrash.py', # vv Tests less than 2m vv - 'bip68-sequence.py', + 'feature_bip68_sequence.py', 'getblocktemplate_longpoll.py', 'p2p-timeouts.py', # vv Tests less than 60s vv - 'bip9-softforks.py', + 'feature_bip9_softforks.py', 'p2p-feefilter.py', 'rpcbind_test.py', # vv Tests less than 30s vv - 'assumevalid.py', + 'feature_assumevalid.py', 'example_test.py', 'txn_doublespend.py', 'txn_clone.py --mineblock', - 'notifications.py', + 'feature_notifications.py', 'invalidateblock.py', - 'replace-by-fee.py', + 'feature_rbf.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests @@ -474,7 +474,7 @@ class TestResult(): def check_script_prefixes(): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 77 + EXPECTED_VIOLATION_COUNT = 60 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming diff --git a/test/functional/uacomment.py b/test/functional/uacomment.py deleted file mode 100755 index 0b2c64ab69..0000000000 --- a/test/functional/uacomment.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the -uacomment option.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class UacommentTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - self.log.info("test multiple -uacomment") - test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1] - assert_equal(test_uacomment, "(testnode0)") - - self.restart_node(0, ["-uacomment=foo"]) - foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1] - assert_equal(foo_uacomment, "(testnode0; foo)") - - self.log.info("test -uacomment max length") - self.stop_node(0) - expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments." - self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected) - - self.log.info("test -uacomment unsafe characters") - for unsafe_char in ['/', ':', '(', ')']: - expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters" - self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected) - -if __name__ == '__main__': - UacommentTest().main() -- cgit v1.2.3 From 90600bc7db2a8047c93bc10d403e862141ada770 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:29 +1000 Subject: [tests] Rename wallet_* functional tests. --- test/functional/abandonconflict.py | 158 -------- test/functional/address_types.py | 294 -------------- test/functional/bumpfee.py | 300 -------------- test/functional/disablewallet.py | 34 -- test/functional/import-rescan.py | 187 --------- test/functional/importmulti.py | 451 --------------------- test/functional/importprunedfunds.py | 114 ------ test/functional/keypool-topup.py | 74 ---- test/functional/keypool.py | 84 ---- test/functional/listsinceblock.py | 280 ------------- test/functional/multiwallet.py | 133 ------ test/functional/receivedby.py | 120 ------ test/functional/resendwallettransactions.py | 29 -- test/functional/test_runner.py | 54 +-- test/functional/txn_clone.py | 166 -------- test/functional/txn_doublespend.py | 143 ------- test/functional/wallet-accounts.py | 206 ---------- test/functional/wallet-dump.py | 144 ------- test/functional/wallet-encryption.py | 80 ---- test/functional/wallet-hd.py | 122 ------ test/functional/wallet.py | 446 -------------------- test/functional/wallet_abandonconflict.py | 158 ++++++++ test/functional/wallet_accounts.py | 206 ++++++++++ test/functional/wallet_address_types.py | 294 ++++++++++++++ test/functional/wallet_backup.py | 205 ++++++++++ test/functional/wallet_basic.py | 446 ++++++++++++++++++++ test/functional/wallet_bumpfee.py | 300 ++++++++++++++ test/functional/wallet_disable.py | 34 ++ test/functional/wallet_dump.py | 144 +++++++ test/functional/wallet_encryption.py | 80 ++++ test/functional/wallet_hd.py | 122 ++++++ test/functional/wallet_import_rescan.py | 187 +++++++++ test/functional/wallet_importmulti.py | 451 +++++++++++++++++++++ test/functional/wallet_importprunedfunds.py | 114 ++++++ test/functional/wallet_keypool.py | 84 ++++ test/functional/wallet_keypool_topup.py | 74 ++++ test/functional/wallet_listreceivedby.py | 120 ++++++ test/functional/wallet_listsinceblock.py | 280 +++++++++++++ test/functional/wallet_multiwallet.py | 133 ++++++ test/functional/wallet_resendwallettransactions.py | 29 ++ test/functional/wallet_txn_clone.py | 166 ++++++++ test/functional/wallet_txn_doublespend.py | 143 +++++++ test/functional/wallet_zapwallettxes.py | 78 ++++ test/functional/walletbackup.py | 205 ---------- test/functional/zapwallettxes.py | 78 ---- 45 files changed, 3875 insertions(+), 3875 deletions(-) delete mode 100755 test/functional/abandonconflict.py delete mode 100755 test/functional/address_types.py delete mode 100755 test/functional/bumpfee.py delete mode 100755 test/functional/disablewallet.py delete mode 100755 test/functional/import-rescan.py delete mode 100755 test/functional/importmulti.py delete mode 100755 test/functional/importprunedfunds.py delete mode 100755 test/functional/keypool-topup.py delete mode 100755 test/functional/keypool.py delete mode 100755 test/functional/listsinceblock.py delete mode 100755 test/functional/multiwallet.py delete mode 100755 test/functional/receivedby.py delete mode 100755 test/functional/resendwallettransactions.py delete mode 100755 test/functional/txn_clone.py delete mode 100755 test/functional/txn_doublespend.py delete mode 100755 test/functional/wallet-accounts.py delete mode 100755 test/functional/wallet-dump.py delete mode 100755 test/functional/wallet-encryption.py delete mode 100755 test/functional/wallet-hd.py delete mode 100755 test/functional/wallet.py create mode 100755 test/functional/wallet_abandonconflict.py create mode 100755 test/functional/wallet_accounts.py create mode 100755 test/functional/wallet_address_types.py create mode 100755 test/functional/wallet_backup.py create mode 100755 test/functional/wallet_basic.py create mode 100755 test/functional/wallet_bumpfee.py create mode 100755 test/functional/wallet_disable.py create mode 100755 test/functional/wallet_dump.py create mode 100755 test/functional/wallet_encryption.py create mode 100755 test/functional/wallet_hd.py create mode 100755 test/functional/wallet_import_rescan.py create mode 100755 test/functional/wallet_importmulti.py create mode 100755 test/functional/wallet_importprunedfunds.py create mode 100755 test/functional/wallet_keypool.py create mode 100755 test/functional/wallet_keypool_topup.py create mode 100755 test/functional/wallet_listreceivedby.py create mode 100755 test/functional/wallet_listsinceblock.py create mode 100755 test/functional/wallet_multiwallet.py create mode 100755 test/functional/wallet_resendwallettransactions.py create mode 100755 test/functional/wallet_txn_clone.py create mode 100755 test/functional/wallet_txn_doublespend.py create mode 100755 test/functional/wallet_zapwallettxes.py delete mode 100755 test/functional/walletbackup.py delete mode 100755 test/functional/zapwallettxes.py (limited to 'test') diff --git a/test/functional/abandonconflict.py b/test/functional/abandonconflict.py deleted file mode 100755 index 14964438af..0000000000 --- a/test/functional/abandonconflict.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the abandontransaction RPC. - - The abandontransaction RPC marks a transaction and all its in-wallet - descendants as abandoned which allows their inputs to be respent. It can be - used to replace "stuck" or evicted transactions. It only works on transactions - which are not included in a block and are not currently in the mempool. It has - no effect on transactions which are already conflicted or abandoned. -""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class AbandonConflictTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.extra_args = [["-minrelaytxfee=0.00001"], []] - - def run_test(self): - self.nodes[1].generate(100) - sync_blocks(self.nodes) - balance = self.nodes[0].getbalance() - txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - sync_mempools(self.nodes) - self.nodes[1].generate(1) - - sync_blocks(self.nodes) - newbalance = self.nodes[0].getbalance() - assert(balance - newbalance < Decimal("0.001")) #no more than fees lost - balance = newbalance - - # Disconnect nodes so node0's transactions don't get into node1's mempool - disconnect_nodes(self.nodes[0], 1) - - # Identify the 10btc outputs - nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) - nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) - nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) - - inputs =[] - # spend 10btc outputs from txA and txB - inputs.append({"txid":txA, "vout":nA}) - inputs.append({"txid":txB, "vout":nB}) - outputs = {} - - outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") - outputs[self.nodes[1].getnewaddress()] = Decimal("5") - signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) - txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) - - # Identify the 14.99998btc output - nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) - - #Create a child tx spending AB1 and C - inputs = [] - inputs.append({"txid":txAB1, "vout":nAB}) - inputs.append({"txid":txC, "vout":nC}) - outputs = {} - outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") - signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) - txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) - - # In mempool txs from self should increase balance from change - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996")) - balance = newbalance - - # Restart the node with a higher min relay fee so the parent tx is no longer in mempool - # TODO: redo with eviction - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) - - # Verify txs no longer in either node's mempool - assert_equal(len(self.nodes[0].getrawmempool()), 0) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - - # Not in mempool txs from self should only reduce balance - # inputs are still spent, but change not received - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("24.9996")) - # Unconfirmed received funds that are not in mempool, also shouldn't show - # up in unconfirmed balance - unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() - assert_equal(unconfbalance, newbalance) - # Also shouldn't show up in listunspent - assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) - balance = newbalance - - # Abandon original transaction and verify inputs are available again - # including that the child tx was also abandoned - self.nodes[0].abandontransaction(txAB1) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance + Decimal("30")) - balance = newbalance - - # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - assert_equal(self.nodes[0].getbalance(), balance) - - # But if its received again then it is unabandoned - # And since now in mempool, the change is available - # But its child tx remains abandoned - self.nodes[0].sendrawtransaction(signed["hex"]) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) - balance = newbalance - - # Send child tx again so its unabandoned - self.nodes[0].sendrawtransaction(signed2["hex"]) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) - balance = newbalance - - # Remove using high relay fee again - self.stop_node(0) - self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("24.9996")) - balance = newbalance - - # Create a double spend of AB1 by spending again from only A's 10 output - # Mine double spend from node 1 - inputs =[] - inputs.append({"txid":txA, "vout":nA}) - outputs = {} - outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") - tx = self.nodes[0].createrawtransaction(inputs, outputs) - signed = self.nodes[0].signrawtransaction(tx) - self.nodes[1].sendrawtransaction(signed["hex"]) - self.nodes[1].generate(1) - - connect_nodes(self.nodes[0], 1) - sync_blocks(self.nodes) - - # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance + Decimal("20")) - balance = newbalance - - # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 - # Invalidate the block with the double spend and B's 10 BTC output should no longer be available - # Don't think C's should either - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - newbalance = self.nodes[0].getbalance() - #assert_equal(newbalance, balance - Decimal("10")) - self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") - self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") - self.log.info(str(balance) + " -> " + str(newbalance) + " ?") - -if __name__ == '__main__': - AbandonConflictTest().main() diff --git a/test/functional/address_types.py b/test/functional/address_types.py deleted file mode 100755 index 38a3425214..0000000000 --- a/test/functional/address_types.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test that the wallet can send and receive using all combinations of address types. - -There are 5 nodes-under-test: - - node0 uses legacy addresses - - node1 uses p2sh/segwit addresses - - node2 uses p2sh/segwit addresses and bech32 addresses for change - - node3 uses bech32 addresses - - node4 uses a p2sh/segwit addresses for change - -node5 exists to generate new blocks. - -## Multisig address test - -Test that adding a multisig address with: - - an uncompressed pubkey always gives a legacy address - - only compressed pubkeys gives the an `-addresstype` address - -## Sending to address types test - -A series of tests, iterating over node0-node4. In each iteration of the test, one node sends: - - 10/101th of its balance to itself (using getrawchangeaddress for single key addresses) - - 20/101th to the next node - - 30/101th to the node after that - - 40/101th to the remaining node - - 1/101th remains as fee+change - -Iterate over each node for single key addresses, and then over each node for -multisig addresses. - -Repeat test, but with explicit address_type parameters passed to getnewaddress -and getrawchangeaddress: - - node0 and node3 send to p2sh. - - node1 sends to bech32. - - node2 sends to legacy. - -As every node sends coins after receiving, this also -verifies that spending coins sent to all these address types works. - -## Change type test - -Test that the nodes generate the correct change address type: - - node0 always uses a legacy change address. - - node1 uses a bech32 addresses for change if any destination address is bech32. - - node2 always uses a bech32 address for change - - node3 always uses a bech32 address for change - - node4 always uses p2sh/segwit output for change. -""" - -from decimal import Decimal -import itertools - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_greater_than, - assert_raises_rpc_error, - connect_nodes_bi, - sync_blocks, - sync_mempools, -) - -class AddressTypeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 6 - self.extra_args = [ - ["-addresstype=legacy"], - ["-addresstype=p2sh-segwit"], - ["-addresstype=p2sh-segwit", "-changetype=bech32"], - ["-addresstype=bech32"], - ["-changetype=p2sh-segwit"], - [] - ] - - def setup_network(self): - self.setup_nodes() - - # Fully mesh-connect nodes for faster mempool sync - for i, j in itertools.product(range(self.num_nodes), repeat=2): - if i > j: - connect_nodes_bi(self.nodes, i, j) - self.sync_all() - - def get_balances(self, confirmed=True): - """Return a list of confirmed or unconfirmed balances.""" - if confirmed: - return [self.nodes[i].getbalance() for i in range(4)] - else: - return [self.nodes[i].getunconfirmedbalance() for i in range(4)] - - def test_address(self, node, address, multisig, typ): - """Run sanity checks on an address.""" - info = self.nodes[node].validateaddress(address) - assert(info['isvalid']) - if not multisig and typ == 'legacy': - # P2PKH - assert(not info['isscript']) - assert(not info['iswitness']) - assert('pubkey' in info) - elif not multisig and typ == 'p2sh-segwit': - # P2SH-P2WPKH - assert(info['isscript']) - assert(not info['iswitness']) - assert_equal(info['script'], 'witness_v0_keyhash') - assert('pubkey' in info) - elif not multisig and typ == 'bech32': - # P2WPKH - assert(not info['isscript']) - assert(info['iswitness']) - assert_equal(info['witness_version'], 0) - assert_equal(len(info['witness_program']), 40) - assert('pubkey' in info) - elif typ == 'legacy': - # P2SH-multisig - assert(info['isscript']) - assert_equal(info['script'], 'multisig') - assert(not info['iswitness']) - assert('pubkeys' in info) - elif typ == 'p2sh-segwit': - # P2SH-P2WSH-multisig - assert(info['isscript']) - assert_equal(info['script'], 'witness_v0_scripthash') - assert(not info['iswitness']) - assert(info['embedded']['isscript']) - assert_equal(info['embedded']['script'], 'multisig') - assert(info['embedded']['iswitness']) - assert_equal(info['embedded']['witness_version'], 0) - assert_equal(len(info['embedded']['witness_program']), 64) - assert('pubkeys' in info['embedded']) - elif typ == 'bech32': - # P2WSH-multisig - assert(info['isscript']) - assert_equal(info['script'], 'multisig') - assert(info['iswitness']) - assert_equal(info['witness_version'], 0) - assert_equal(len(info['witness_program']), 64) - assert('pubkeys' in info) - else: - # Unknown type - assert(False) - - def test_change_output_type(self, node_sender, destinations, expected_type): - txid = self.nodes[node_sender].sendmany(fromaccount="", amounts=dict.fromkeys(destinations, 0.001)) - raw_tx = self.nodes[node_sender].getrawtransaction(txid) - tx = self.nodes[node_sender].decoderawtransaction(raw_tx) - - # Make sure the transaction has change: - assert_equal(len(tx["vout"]), len(destinations) + 1) - - # Make sure the destinations are included, and remove them: - output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]] - change_addresses = [d for d in output_addresses if d not in destinations] - assert_equal(len(change_addresses), 1) - - self.log.debug("Check if change address " + change_addresses[0] + " is " + expected_type) - self.test_address(node_sender, change_addresses[0], multisig=False, typ=expected_type) - - def run_test(self): - # Mine 101 blocks on node5 to bring nodes out of IBD and make sure that - # no coinbases are maturing for the nodes-under-test during the test - self.nodes[5].generate(101) - sync_blocks(self.nodes) - - uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee" - uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77" - compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52" - compressed_2 = "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073" - - # addmultisigaddress with at least 1 uncompressed key should return a legacy address. - for node in range(4): - self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy') - self.test_address(node, self.nodes[node].addmultisigaddress(2, [compressed_1, uncompressed_2])['address'], True, 'legacy') - self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, compressed_2])['address'], True, 'legacy') - # addmultisigaddress with all compressed keys should return the appropriate address type (even when the keys are not ours). - self.test_address(0, self.nodes[0].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'legacy') - self.test_address(1, self.nodes[1].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit') - self.test_address(2, self.nodes[2].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit') - self.test_address(3, self.nodes[3].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'bech32') - - for explicit_type, multisig, from_node in itertools.product([False, True], [False, True], range(4)): - address_type = None - if explicit_type and not multisig: - if from_node == 1: - address_type = 'bech32' - elif from_node == 0 or from_node == 3: - address_type = 'p2sh-segwit' - else: - address_type = 'legacy' - self.log.info("Sending from node {} ({}) with{} multisig using {}".format(from_node, self.extra_args[from_node], "" if multisig else "out", "default" if address_type is None else address_type)) - old_balances = self.get_balances() - self.log.debug("Old balances are {}".format(old_balances)) - to_send = (old_balances[from_node] / 101).quantize(Decimal("0.00000001")) - sends = {} - - self.log.debug("Prepare sends") - for n, to_node in enumerate(range(from_node, from_node + 4)): - to_node %= 4 - change = False - if not multisig: - if from_node == to_node: - # When sending non-multisig to self, use getrawchangeaddress - address = self.nodes[to_node].getrawchangeaddress(address_type=address_type) - change = True - else: - address = self.nodes[to_node].getnewaddress(address_type=address_type) - else: - addr1 = self.nodes[to_node].getnewaddress() - addr2 = self.nodes[to_node].getnewaddress() - address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])['address'] - - # Do some sanity checking on the created address - if address_type is not None: - typ = address_type - elif to_node == 0: - typ = 'legacy' - elif to_node == 1 or (to_node == 2 and not change): - typ = 'p2sh-segwit' - else: - typ = 'bech32' - self.test_address(to_node, address, multisig, typ) - - # Output entry - sends[address] = to_send * 10 * (1 + n) - - self.log.debug("Sending: {}".format(sends)) - self.nodes[from_node].sendmany("", sends) - sync_mempools(self.nodes) - - unconf_balances = self.get_balances(False) - self.log.debug("Check unconfirmed balances: {}".format(unconf_balances)) - assert_equal(unconf_balances[from_node], 0) - for n, to_node in enumerate(range(from_node + 1, from_node + 4)): - to_node %= 4 - assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n)) - - # node5 collects fee and block subsidy to keep accounting simple - self.nodes[5].generate(1) - sync_blocks(self.nodes) - - new_balances = self.get_balances() - self.log.debug("Check new balances: {}".format(new_balances)) - # We don't know what fee was set, so we can only check bounds on the balance of the sending node - assert_greater_than(new_balances[from_node], to_send * 10) - assert_greater_than(to_send * 11, new_balances[from_node]) - for n, to_node in enumerate(range(from_node + 1, from_node + 4)): - to_node %= 4 - assert_equal(new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n)) - - # Get one p2sh/segwit address from node2 and two bech32 addresses from node3: - to_address_p2sh = self.nodes[2].getnewaddress() - to_address_bech32_1 = self.nodes[3].getnewaddress() - to_address_bech32_2 = self.nodes[3].getnewaddress() - - # Fund node 4: - self.nodes[5].sendtoaddress(self.nodes[4].getnewaddress(), Decimal("1")) - self.nodes[5].generate(1) - sync_blocks(self.nodes) - assert_equal(self.nodes[4].getbalance(), 1) - - self.log.info("Nodes with addresstype=legacy never use a P2WPKH change output") - self.test_change_output_type(0, [to_address_bech32_1], 'legacy') - - self.log.info("Nodes with addresstype=p2sh-segwit only use a P2WPKH change output if any destination address is bech32:") - self.test_change_output_type(1, [to_address_p2sh], 'p2sh-segwit') - self.test_change_output_type(1, [to_address_bech32_1], 'bech32') - self.test_change_output_type(1, [to_address_p2sh, to_address_bech32_1], 'bech32') - self.test_change_output_type(1, [to_address_bech32_1, to_address_bech32_2], 'bech32') - - self.log.info("Nodes with change_type=bech32 always use a P2WPKH change output:") - self.test_change_output_type(2, [to_address_bech32_1], 'bech32') - self.test_change_output_type(2, [to_address_p2sh], 'bech32') - - self.log.info("Nodes with addresstype=bech32 always use a P2WPKH change output (unless changetype is set otherwise):") - self.test_change_output_type(3, [to_address_bech32_1], 'bech32') - self.test_change_output_type(3, [to_address_p2sh], 'bech32') - - self.log.info('getrawchangeaddress defaults to addresstype if -changetype is not set and argument is absent') - self.test_address(3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='bech32') - - self.log.info('getrawchangeaddress fails with invalid changetype argument') - assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23') - - self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output") - self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit') - self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit') - self.log.info("Except for getrawchangeaddress if specified:") - self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit') - self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32') - -if __name__ == '__main__': - AddressTypeTest().main() diff --git a/test/functional/bumpfee.py b/test/functional/bumpfee.py deleted file mode 100755 index 2cd4127854..0000000000 --- a/test/functional/bumpfee.py +++ /dev/null @@ -1,300 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the bumpfee RPC. - -Verifies that the bumpfee RPC creates replacement transactions successfully when -its preconditions are met, and returns appropriate errors in other cases. - -This module consists of around a dozen individual test cases implemented in the -top-level functions named as test_. The test functions -can be disabled or reordered if needed for debugging. If new test cases are -added in the future, they should try to follow the same convention and not -make assumptions about execution order. -""" - -from test_framework.blocktools import send_to_witness -from test_framework.test_framework import BitcoinTestFramework -from test_framework import blocktools -from test_framework.mininode import CTransaction -from test_framework.util import * - -import io - -# Sequence number that is BIP 125 opt-in and BIP 68-compliant -BIP125_SEQUENCE_NUMBER = 0xfffffffd - -WALLET_PASSPHRASE = "test" -WALLET_PASSPHRASE_TIMEOUT = 3600 - - -class BumpFeeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.setup_clean_chain = True - self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)] - for i in range(self.num_nodes)] - - def run_test(self): - # Encrypt wallet for test_locked_wallet_fails test - self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE) - self.start_node(1) - self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT) - - connect_nodes_bi(self.nodes, 0, 1) - self.sync_all() - - peer_node, rbf_node = self.nodes - rbf_node_address = rbf_node.getnewaddress() - - # fund rbf node with 10 coins of 0.001 btc (100,000 satoshis) - self.log.info("Mining blocks...") - peer_node.generate(110) - self.sync_all() - for i in range(25): - peer_node.sendtoaddress(rbf_node_address, 0.001) - self.sync_all() - peer_node.generate(1) - self.sync_all() - assert_equal(rbf_node.getbalance(), Decimal("0.025")) - - self.log.info("Running tests") - dest_address = peer_node.getnewaddress() - test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address) - test_segwit_bumpfee_succeeds(rbf_node, dest_address) - test_nonrbf_bumpfee_fails(peer_node, dest_address) - test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address) - test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address) - test_small_output_fails(rbf_node, dest_address) - test_dust_to_fee(rbf_node, dest_address) - test_settxfee(rbf_node, dest_address) - test_rebumping(rbf_node, dest_address) - test_rebumping_not_replaceable(rbf_node, dest_address) - test_unconfirmed_not_spendable(rbf_node, rbf_node_address) - test_bumpfee_metadata(rbf_node, dest_address) - test_locked_wallet_fails(rbf_node, dest_address) - self.log.info("Success") - - -def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address): - rbfid = spend_one_input(rbf_node, dest_address) - rbftx = rbf_node.gettransaction(rbfid) - sync_mempools((rbf_node, peer_node)) - assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool() - bumped_tx = rbf_node.bumpfee(rbfid) - assert_equal(bumped_tx["errors"], []) - assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0 - # check that bumped_tx propagates, original tx was evicted and has a wallet conflict - sync_mempools((rbf_node, peer_node)) - assert bumped_tx["txid"] in rbf_node.getrawmempool() - assert bumped_tx["txid"] in peer_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() - assert rbfid not in peer_node.getrawmempool() - oldwtx = rbf_node.gettransaction(rbfid) - assert len(oldwtx["walletconflicts"]) > 0 - # check wallet transaction replaces and replaced_by values - bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"]) - assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"]) - assert_equal(bumpedwtx["replaces_txid"], rbfid) - - -def test_segwit_bumpfee_succeeds(rbf_node, dest_address): - # Create a transaction with segwit output, then create an RBF transaction - # which spends it, and make sure bumpfee can be called on it. - - segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001")) - segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress()) - rbf_node.addwitnessaddress(segwit_out["address"]) - segwitid = send_to_witness( - use_p2wsh=False, - node=rbf_node, - utxo=segwit_in, - pubkey=segwit_out["pubkey"], - encode_p2sh=False, - amount=Decimal("0.0009"), - sign=True) - - rbfraw = rbf_node.createrawtransaction([{ - 'txid': segwitid, - 'vout': 0, - "sequence": BIP125_SEQUENCE_NUMBER - }], {dest_address: Decimal("0.0005"), - rbf_node.getrawchangeaddress(): Decimal("0.0003")}) - rbfsigned = rbf_node.signrawtransaction(rbfraw) - rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"]) - assert rbfid in rbf_node.getrawmempool() - - bumped_tx = rbf_node.bumpfee(rbfid) - assert bumped_tx["txid"] in rbf_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() - - -def test_nonrbf_bumpfee_fails(peer_node, dest_address): - # cannot replace a non RBF transaction (from node which did not enable RBF) - not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000")) - assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid) - - -def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address): - # cannot bump fee unless the tx has only inputs that we own. - # here, the rbftx has a peer_node coin and then adds a rbf_node input - # Note that this test depends upon the RPC code checking input ownership prior to change outputs - # (since it can't use fundrawtransaction, it lacks a proper change output) - utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)] - inputs = [{ - "txid": utxo["txid"], - "vout": utxo["vout"], - "address": utxo["address"], - "sequence": BIP125_SEQUENCE_NUMBER - } for utxo in utxos] - output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001") - rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val}) - signedtx = rbf_node.signrawtransaction(rawtx) - signedtx = peer_node.signrawtransaction(signedtx["hex"]) - rbfid = rbf_node.sendrawtransaction(signedtx["hex"]) - assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet", - rbf_node.bumpfee, rbfid) - - -def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address): - # cannot bump fee if the transaction has a descendant - # parent is send-to-self, so we don't have to check which output is change when creating the child tx - parent_id = spend_one_input(rbf_node, rbf_node_address) - tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000}) - tx = rbf_node.signrawtransaction(tx) - rbf_node.sendrawtransaction(tx["hex"]) - assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id) - - -def test_small_output_fails(rbf_node, dest_address): - # cannot bump fee with a too-small output - rbfid = spend_one_input(rbf_node, dest_address) - rbf_node.bumpfee(rbfid, {"totalFee": 50000}) - - rbfid = spend_one_input(rbf_node, dest_address) - assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001}) - - -def test_dust_to_fee(rbf_node, dest_address): - # check that if output is reduced to dust, it will be converted to fee - # the bumped tx sets fee=49,900, but it converts to 50,000 - rbfid = spend_one_input(rbf_node, dest_address) - fulltx = rbf_node.getrawtransaction(rbfid, 1) - bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900}) - full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1) - assert_equal(bumped_tx["fee"], Decimal("0.00050000")) - assert_equal(len(fulltx["vout"]), 2) - assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated - - -def test_settxfee(rbf_node, dest_address): - # check that bumpfee reacts correctly to the use of settxfee (paytxfee) - rbfid = spend_one_input(rbf_node, dest_address) - requested_feerate = Decimal("0.00025000") - rbf_node.settxfee(requested_feerate) - bumped_tx = rbf_node.bumpfee(rbfid) - actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"] - # Assert that the difference between the requested feerate and the actual - # feerate of the bumped transaction is small. - assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate)) - rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee - - -def test_rebumping(rbf_node, dest_address): - # check that re-bumping the original tx fails, but bumping the bumper succeeds - rbfid = spend_one_input(rbf_node, dest_address) - bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000}) - assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000}) - rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000}) - - -def test_rebumping_not_replaceable(rbf_node, dest_address): - # check that re-bumping a non-replaceable bump tx fails - rbfid = spend_one_input(rbf_node, dest_address) - bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False}) - assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"], - {"totalFee": 20000}) - - -def test_unconfirmed_not_spendable(rbf_node, rbf_node_address): - # check that unconfirmed outputs from bumped transactions are not spendable - rbfid = spend_one_input(rbf_node, rbf_node_address) - rbftx = rbf_node.gettransaction(rbfid)["hex"] - assert rbfid in rbf_node.getrawmempool() - bumpid = rbf_node.bumpfee(rbfid)["txid"] - assert bumpid in rbf_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() - - # check that outputs from the bump transaction are not spendable - # due to the replaces_txid check in CWallet::AvailableCoins - assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], []) - - # submit a block with the rbf tx to clear the bump tx out of the mempool, - # then call abandon to make sure the wallet doesn't attempt to resubmit the - # bump tx, then invalidate the block so the rbf tx will be put back in the - # mempool. this makes it possible to check whether the rbf tx outputs are - # spendable before the rbf tx is confirmed. - block = submit_block_with_tx(rbf_node, rbftx) - rbf_node.abandontransaction(bumpid) - rbf_node.invalidateblock(block.hash) - assert bumpid not in rbf_node.getrawmempool() - assert rbfid in rbf_node.getrawmempool() - - # check that outputs from the rbf tx are not spendable before the - # transaction is confirmed, due to the replaced_by_txid check in - # CWallet::AvailableCoins - assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], []) - - # check that the main output from the rbf tx is spendable after confirmed - rbf_node.generate(1) - assert_equal( - sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False) - if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1) - - -def test_bumpfee_metadata(rbf_node, dest_address): - rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value") - bumped_tx = rbf_node.bumpfee(rbfid) - bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"]) - assert_equal(bumped_wtx["comment"], "comment value") - assert_equal(bumped_wtx["to"], "to value") - - -def test_locked_wallet_fails(rbf_node, dest_address): - rbfid = spend_one_input(rbf_node, dest_address) - rbf_node.walletlock() - assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.", - rbf_node.bumpfee, rbfid) - - -def spend_one_input(node, dest_address): - tx_input = dict( - sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000"))) - rawtx = node.createrawtransaction( - [tx_input], {dest_address: Decimal("0.00050000"), - node.getrawchangeaddress(): Decimal("0.00049000")}) - signedtx = node.signrawtransaction(rawtx) - txid = node.sendrawtransaction(signedtx["hex"]) - return txid - - -def submit_block_with_tx(node, tx): - ctx = CTransaction() - ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx))) - - tip = node.getbestblockhash() - height = node.getblockcount() + 1 - block_time = node.getblockheader(tip)["mediantime"] + 1 - block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time) - block.vtx.append(ctx) - block.rehash() - block.hashMerkleRoot = block.calc_merkle_root() - blocktools.add_witness_commitment(block) - block.solve() - node.submitblock(bytes_to_hex_str(block.serialize(True))) - return block - - -if __name__ == "__main__": - BumpFeeTest().main() diff --git a/test/functional/disablewallet.py b/test/functional/disablewallet.py deleted file mode 100755 index b0627d88ac..0000000000 --- a/test/functional/disablewallet.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test a node with the -disablewallet option. - -- Test that validateaddress RPC works when running with -disablewallet -- Test that it is not possible to mine to an invalid address. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class DisableWalletTest (BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-disablewallet"]] - - def run_test (self): - # Make sure wallet is really disabled - assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo) - x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') - assert(x['isvalid'] == False) - x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') - assert(x['isvalid'] == True) - - # Checking mining to an address without a wallet. Generating to a valid address should succeed - # but generating to an invalid address will fail. - self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') - assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') - -if __name__ == '__main__': - DisableWalletTest ().main () diff --git a/test/functional/import-rescan.py b/test/functional/import-rescan.py deleted file mode 100755 index d193a99d5b..0000000000 --- a/test/functional/import-rescan.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test wallet import RPCs. - -Test rescan behavior of importaddress, importpubkey, importprivkey, and -importmulti RPCs with different types of keys and rescan options. - -In the first part of the test, node 0 creates an address for each type of -import RPC call and sends BTC to it. Then other nodes import the addresses, -and the test makes listtransactions and getbalance calls to confirm that the -importing node either did or did not execute rescans picking up the send -transactions. - -In the second part of the test, node 0 sends more BTC to each address, and the -test makes more listtransactions and getbalance calls to confirm that the -importing nodes pick up the new transactions regardless of whether rescans -happened previously. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times) - -import collections -import enum -import itertools - -Call = enum.Enum("Call", "single multi") -Data = enum.Enum("Data", "address pub priv") -Rescan = enum.Enum("Rescan", "no yes late_timestamp") - - -class Variant(collections.namedtuple("Variant", "call data rescan prune")): - """Helper for importing one key and verifying scanned transactions.""" - - def try_rpc(self, func, *args, **kwargs): - if self.expect_disabled: - assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs) - else: - return func(*args, **kwargs) - - def do_import(self, timestamp): - """Call one key import RPC.""" - - if self.call == Call.single: - if self.data == Data.address: - response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, - self.rescan == Rescan.yes) - elif self.data == Data.pub: - response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, - self.rescan == Rescan.yes) - elif self.data == Data.priv: - response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes) - assert_equal(response, None) - - elif self.call == Call.multi: - response = self.node.importmulti([{ - "scriptPubKey": { - "address": self.address["address"] - }, - "timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0), - "pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [], - "keys": [self.key] if self.data == Data.priv else [], - "label": self.label, - "watchonly": self.data != Data.priv - }], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)}) - assert_equal(response, [{"success": True}]) - - def check(self, txid=None, amount=None, confirmations=None): - """Verify that getbalance/listtransactions return expected values.""" - - balance = self.node.getbalance(self.label, 0, True) - assert_equal(balance, self.expected_balance) - - txs = self.node.listtransactions(self.label, 10000, 0, True) - assert_equal(len(txs), self.expected_txs) - - if txid is not None: - tx, = [tx for tx in txs if tx["txid"] == txid] - assert_equal(tx["account"], self.label) - assert_equal(tx["address"], self.address["address"]) - assert_equal(tx["amount"], amount) - assert_equal(tx["category"], "receive") - assert_equal(tx["label"], self.label) - assert_equal(tx["txid"], txid) - assert_equal(tx["confirmations"], confirmations) - assert_equal("trusted" not in tx, True) - # Verify the transaction is correctly marked watchonly depending on - # whether the transaction pays to an imported public key or - # imported private key. The test setup ensures that transaction - # inputs will not be from watchonly keys (important because - # involvesWatchonly will be true if either the transaction output - # or inputs are watchonly). - if self.data != Data.priv: - assert_equal(tx["involvesWatchonly"], True) - else: - assert_equal("involvesWatchonly" not in tx, True) - - -# List of Variants for each way a key or address could be imported. -IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))] - -# List of nodes to import keys to. Half the nodes will have pruning disabled, -# half will have it enabled. Different nodes will be used for imports that are -# expected to cause rescans, and imports that are not expected to cause -# rescans, in order to prevent rescans during later imports picking up -# transactions associated with earlier imports. This makes it easier to keep -# track of expected balances and transactions. -ImportNode = collections.namedtuple("ImportNode", "prune rescan") -IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)] - -# Rescans start at the earliest block up to 2 hours before the key timestamp. -TIMESTAMP_WINDOW = 2 * 60 * 60 - - -class ImportRescanTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 + len(IMPORT_NODES) - - def setup_network(self): - extra_args = [["-addresstype=legacy"] for _ in range(self.num_nodes)] - for i, import_node in enumerate(IMPORT_NODES, 2): - if import_node.prune: - extra_args[i] += ["-prune=1"] - - self.add_nodes(self.num_nodes, extra_args) - self.start_nodes() - for i in range(1, self.num_nodes): - connect_nodes(self.nodes[i], 0) - - def run_test(self): - # Create one transaction on node 0 with a unique amount and label for - # each possible type of wallet import RPC. - for i, variant in enumerate(IMPORT_VARIANTS): - variant.label = "label {} {}".format(i, variant) - variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label)) - variant.key = self.nodes[1].dumpprivkey(variant.address["address"]) - variant.initial_amount = 10 - (i + 1) / 4.0 - variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount) - - # Generate a block containing the initial transactions, then another - # block further in the future (past the rescan window). - self.nodes[0].generate(1) - assert_equal(self.nodes[0].getrawmempool(), []) - timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] - set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - # For each variation of wallet key import, invoke the import RPC and - # check the results from getbalance and listtransactions. - for variant in IMPORT_VARIANTS: - variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single - expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled - variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))] - variant.do_import(timestamp) - if expect_rescan: - variant.expected_balance = variant.initial_amount - variant.expected_txs = 1 - variant.check(variant.initial_txid, variant.initial_amount, 2) - else: - variant.expected_balance = 0 - variant.expected_txs = 0 - variant.check() - - # Create new transactions sending to each address. - for i, variant in enumerate(IMPORT_VARIANTS): - variant.sent_amount = 10 - (2 * i + 1) / 8.0 - variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount) - - # Generate a block containing the new transactions. - self.nodes[0].generate(1) - assert_equal(self.nodes[0].getrawmempool(), []) - sync_blocks(self.nodes) - - # Check the latest results from getbalance and listtransactions. - for variant in IMPORT_VARIANTS: - if not variant.expect_disabled: - variant.expected_balance += variant.sent_amount - variant.expected_txs += 1 - variant.check(variant.sent_txid, variant.sent_amount, 1) - else: - variant.check() - -if __name__ == "__main__": - ImportRescanTest().main() diff --git a/test/functional/importmulti.py b/test/functional/importmulti.py deleted file mode 100755 index be9be83839..0000000000 --- a/test/functional/importmulti.py +++ /dev/null @@ -1,451 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the importmulti RPC.""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class ImportMultiTest (BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]] - self.setup_clean_chain = True - - def setup_network(self): - self.setup_nodes() - - def run_test (self): - self.log.info("Mining blocks...") - self.nodes[0].generate(1) - self.nodes[1].generate(1) - timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] - - node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - - #Check only one address - assert_equal(node0_address1['ismine'], True) - - #Node 1 sync test - assert_equal(self.nodes[1].getblockcount(),1) - - #Address Test - before import - address_info = self.nodes[1].validateaddress(node0_address1['address']) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], False) - - - # RPC importmulti ----------------------------------------------- - - # Bitcoin Address - self.log.info("Should import an address") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - watchonly_address = address['address'] - watchonly_timestamp = timestamp - - self.log.info("Should not import an invalid address") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": "not valid address", - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Invalid address') - - # ScriptPubKey + internal - self.log.info("Should import a scriptPubKey with internal flag") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "internal": True - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - - # ScriptPubKey + !internal - self.log.info("Should not import a scriptPubKey without internal flag") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # Address + Public key + !Internal - self.log.info("Should import an address with public key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "pubkeys": [ address['pubkey'] ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - - - # ScriptPubKey + Public key + internal - self.log.info("Should import a scriptPubKey with internal and with public key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "pubkeys": [ address['pubkey'] ], - "internal": True - }] - result = self.nodes[1].importmulti(request) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - - # ScriptPubKey + Public key + !internal - self.log.info("Should not import a scriptPubKey without internal and with public key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "pubkeys": [ address['pubkey'] ] - }] - result = self.nodes[1].importmulti(request) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - # Address + Private key + !watchonly - self.log.info("Should import an address with private key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], True) - assert_equal(address_assert['timestamp'], timestamp) - - self.log.info("Should not import an address with private key if is already imported") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -4) - assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') - - # Address + Private key + watchonly - self.log.info("Should not import an address with private key and with watchonly") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ], - "watchonly": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - # ScriptPubKey + Private key + internal - self.log.info("Should import a scriptPubKey with internal and with private key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ], - "internal": True - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], True) - assert_equal(address_assert['timestamp'], timestamp) - - # ScriptPubKey + Private key + !internal - self.log.info("Should not import a scriptPubKey without internal and with private key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # P2SH address - sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) - self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) - self.nodes[1].generate(1) - timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] - - self.log.info("Should import a p2sh") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) - assert_equal(address_assert['isscript'], True) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['timestamp'], timestamp) - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], False) - - - # P2SH + Redeem script - sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) - self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) - self.nodes[1].generate(1) - timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] - - self.log.info("Should import a p2sh with respective redeem script") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) - assert_equal(address_assert['timestamp'], timestamp) - - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], True) - - - # P2SH + Redeem script + Private Keys + !Watchonly - sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) - self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) - self.nodes[1].generate(1) - timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] - - self.log.info("Should import a p2sh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'], - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) - assert_equal(address_assert['timestamp'], timestamp) - - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], True) - - # P2SH + Redeem script + Private Keys + Watchonly - sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) - self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) - self.nodes[1].generate(1) - timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] - - self.log.info("Should import a p2sh with respective redeem script and private keys") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": multi_sig_script['address'] - }, - "timestamp": "now", - "redeemscript": multi_sig_script['redeemScript'], - "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], - "watchonly": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -8) - assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') - - - # Address + Public key + !Internal + Wrong pubkey - self.log.info("Should not import an address with a wrong public key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "pubkeys": [ address2['pubkey'] ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Consistency check failed') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # ScriptPubKey + Public key + internal + Wrong pubkey - self.log.info("Should not import a scriptPubKey with internal and with a wrong public key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - request = [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "pubkeys": [ address2['pubkey'] ], - "internal": True - }] - result = self.nodes[1].importmulti(request) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Consistency check failed') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # Address + Private key + !watchonly + Wrong private key - self.log.info("Should not import an address with a wrong private key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": address['address'] - }, - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address2['address']) ] - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Consistency check failed') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # ScriptPubKey + Private key + internal + Wrong private key - self.log.info("Should not import a scriptPubKey with internal and with a wrong private key") - address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) - result = self.nodes[1].importmulti([{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "now", - "keys": [ self.nodes[0].dumpprivkey(address2['address']) ], - "internal": True - }]) - assert_equal(result[0]['success'], False) - assert_equal(result[0]['error']['code'], -5) - assert_equal(result[0]['error']['message'], 'Consistency check failed') - address_assert = self.nodes[1].validateaddress(address['address']) - assert_equal(address_assert['iswatchonly'], False) - assert_equal(address_assert['ismine'], False) - assert_equal('timestamp' in address_assert, False) - - - # Importing existing watch only address with new timestamp should replace saved timestamp. - assert_greater_than(timestamp, watchonly_timestamp) - self.log.info("Should replace previously saved watch only timestamp.") - result = self.nodes[1].importmulti([{ - "scriptPubKey": { - "address": watchonly_address, - }, - "timestamp": "now", - }]) - assert_equal(result[0]['success'], True) - address_assert = self.nodes[1].validateaddress(watchonly_address) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], timestamp) - watchonly_timestamp = timestamp - - - # restart nodes to check for proper serialization/deserialization of watch only address - self.stop_nodes() - self.start_nodes() - address_assert = self.nodes[1].validateaddress(watchonly_address) - assert_equal(address_assert['iswatchonly'], True) - assert_equal(address_assert['ismine'], False) - assert_equal(address_assert['timestamp'], watchonly_timestamp) - - # Bad or missing timestamps - self.log.info("Should throw on invalid or missing timestamp values") - assert_raises_rpc_error(-3, 'Missing required timestamp field for key', - self.nodes[1].importmulti, [{ - "scriptPubKey": address['scriptPubKey'], - }]) - assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string', - self.nodes[1].importmulti, [{ - "scriptPubKey": address['scriptPubKey'], - "timestamp": "", - }]) - - -if __name__ == '__main__': - ImportMultiTest ().main () diff --git a/test/functional/importprunedfunds.py b/test/functional/importprunedfunds.py deleted file mode 100755 index 6b2919b5ae..0000000000 --- a/test/functional/importprunedfunds.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the importprunedfunds and removeprunedfunds RPCs.""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class ImportPrunedFundsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - - def run_test(self): - self.log.info("Mining blocks...") - self.nodes[0].generate(101) - - self.sync_all() - - # address - address1 = self.nodes[0].getnewaddress() - # pubkey - address2 = self.nodes[0].getnewaddress() - # privkey - address3 = self.nodes[0].getnewaddress() - address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey - - #Check only one address - address_info = self.nodes[0].validateaddress(address1) - assert_equal(address_info['ismine'], True) - - self.sync_all() - - #Node 1 sync test - assert_equal(self.nodes[1].getblockcount(),101) - - #Address Test - before import - address_info = self.nodes[1].validateaddress(address1) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], False) - - address_info = self.nodes[1].validateaddress(address2) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], False) - - address_info = self.nodes[1].validateaddress(address3) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], False) - - #Send funds to self - txnid1 = self.nodes[0].sendtoaddress(address1, 0.1) - self.nodes[0].generate(1) - rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex'] - proof1 = self.nodes[0].gettxoutproof([txnid1]) - - txnid2 = self.nodes[0].sendtoaddress(address2, 0.05) - self.nodes[0].generate(1) - rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex'] - proof2 = self.nodes[0].gettxoutproof([txnid2]) - - txnid3 = self.nodes[0].sendtoaddress(address3, 0.025) - self.nodes[0].generate(1) - rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex'] - proof3 = self.nodes[0].gettxoutproof([txnid3]) - - self.sync_all() - - #Import with no affiliated address - assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1) - - balance1 = self.nodes[1].getbalance("", 0, True) - assert_equal(balance1, Decimal(0)) - - #Import with affiliated address with no rescan - self.nodes[1].importaddress(address2, "add2", False) - self.nodes[1].importprunedfunds(rawtxn2, proof2) - balance2 = self.nodes[1].getbalance("add2", 0, True) - assert_equal(balance2, Decimal('0.05')) - - #Import with private key with no rescan - self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False) - self.nodes[1].importprunedfunds(rawtxn3, proof3) - balance3 = self.nodes[1].getbalance("add3", 0, False) - assert_equal(balance3, Decimal('0.025')) - balance3 = self.nodes[1].getbalance("*", 0, True) - assert_equal(balance3, Decimal('0.075')) - - #Addresses Test - after import - address_info = self.nodes[1].validateaddress(address1) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], False) - address_info = self.nodes[1].validateaddress(address2) - assert_equal(address_info['iswatchonly'], True) - assert_equal(address_info['ismine'], False) - address_info = self.nodes[1].validateaddress(address3) - assert_equal(address_info['iswatchonly'], False) - assert_equal(address_info['ismine'], True) - - #Remove transactions - assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1) - - balance1 = self.nodes[1].getbalance("*", 0, True) - assert_equal(balance1, Decimal('0.075')) - - self.nodes[1].removeprunedfunds(txnid2) - balance2 = self.nodes[1].getbalance("*", 0, True) - assert_equal(balance2, Decimal('0.025')) - - self.nodes[1].removeprunedfunds(txnid3) - balance3 = self.nodes[1].getbalance("*", 0, True) - assert_equal(balance3, Decimal('0.0')) - -if __name__ == '__main__': - ImportPrunedFundsTest().main() diff --git a/test/functional/keypool-topup.py b/test/functional/keypool-topup.py deleted file mode 100755 index e7af3c3987..0000000000 --- a/test/functional/keypool-topup.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test HD Wallet keypool restore function. - -Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. - -- Start node1, shutdown and backup wallet. -- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. -- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. -- connect node1 to node0. Verify that they sync and node1 receives its funds.""" -import shutil - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - connect_nodes_bi, - sync_blocks, -) - -class KeypoolRestoreTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [[], ['-keypool=100', '-keypoolmin=20']] - - def run_test(self): - self.tmpdir = self.options.tmpdir - self.nodes[0].generate(101) - - self.log.info("Make backup of wallet") - - self.stop_node(1) - - shutil.copyfile(self.tmpdir + "/node1/regtest/wallets/wallet.dat", self.tmpdir + "/wallet.bak") - self.start_node(1, self.extra_args[1]) - connect_nodes_bi(self.nodes, 0, 1) - - self.log.info("Generate keys for wallet") - - for _ in range(90): - addr_oldpool = self.nodes[1].getnewaddress() - for _ in range(20): - addr_extpool = self.nodes[1].getnewaddress() - - self.log.info("Send funds to wallet") - - self.nodes[0].sendtoaddress(addr_oldpool, 10) - self.nodes[0].generate(1) - self.nodes[0].sendtoaddress(addr_extpool, 5) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - self.log.info("Restart node with wallet backup") - - self.stop_node(1) - - shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallets/wallet.dat") - - self.log.info("Verify keypool is restored and balance is correct") - - self.start_node(1, self.extra_args[1]) - connect_nodes_bi(self.nodes, 0, 1) - self.sync_all() - - assert_equal(self.nodes[1].getbalance(), 15) - assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive") - - # Check that we have marked all keys up to the used keypool key as used - assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'") - -if __name__ == '__main__': - KeypoolRestoreTest().main() diff --git a/test/functional/keypool.py b/test/functional/keypool.py deleted file mode 100755 index 45a5eed8ec..0000000000 --- a/test/functional/keypool.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet keypool and interaction with wallet encryption/locking.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class KeyPoolTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def run_test(self): - nodes = self.nodes - addr_before_encrypting = nodes[0].getnewaddress() - addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting) - wallet_info_old = nodes[0].getwalletinfo() - assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid']) - - # Encrypt wallet and wait to terminate - nodes[0].node_encrypt_wallet('test') - # Restart node 0 - self.start_node(0) - # Keep creating keys - addr = nodes[0].getnewaddress() - addr_data = nodes[0].validateaddress(addr) - wallet_info = nodes[0].getwalletinfo() - assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid']) - assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid']) - assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) - - # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) - nodes[0].walletpassphrase('test', 12000) - nodes[0].keypoolrefill(6) - nodes[0].walletlock() - wi = nodes[0].getwalletinfo() - assert_equal(wi['keypoolsize_hd_internal'], 6) - assert_equal(wi['keypoolsize'], 6) - - # drain the internal keys - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - addr = set() - # the next one should fail - assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress) - - # drain the external keys - addr.add(nodes[0].getnewaddress()) - addr.add(nodes[0].getnewaddress()) - addr.add(nodes[0].getnewaddress()) - addr.add(nodes[0].getnewaddress()) - addr.add(nodes[0].getnewaddress()) - addr.add(nodes[0].getnewaddress()) - assert(len(addr) == 6) - # the next one should fail - assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) - - # refill keypool with three new addresses - nodes[0].walletpassphrase('test', 1) - nodes[0].keypoolrefill(3) - - # test walletpassphrase timeout - time.sleep(1.1) - assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0) - - # drain them by mining - nodes[0].generate(1) - nodes[0].generate(1) - nodes[0].generate(1) - assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1) - - nodes[0].walletpassphrase('test', 100) - nodes[0].keypoolrefill(100) - wi = nodes[0].getwalletinfo() - assert_equal(wi['keypoolsize_hd_internal'], 100) - assert_equal(wi['keypoolsize'], 100) - -if __name__ == '__main__': - KeyPoolTest().main() diff --git a/test/functional/listsinceblock.py b/test/functional/listsinceblock.py deleted file mode 100755 index 67e7744bf8..0000000000 --- a/test/functional/listsinceblock.py +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the listsincelast RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error - -class ListSinceBlockTest (BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - - def run_test(self): - self.nodes[2].generate(101) - self.sync_all() - - self.test_no_blockhash() - self.test_invalid_blockhash() - self.test_reorg() - self.test_double_spend() - self.test_double_send() - - def test_no_blockhash(self): - txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) - blockhash, = self.nodes[2].generate(1) - self.sync_all() - - txs = self.nodes[0].listtransactions() - assert_array_result(txs, {"txid": txid}, { - "category": "receive", - "amount": 1, - "blockhash": blockhash, - "confirmations": 1, - }) - assert_equal( - self.nodes[0].listsinceblock(), - {"lastblock": blockhash, - "removed": [], - "transactions": txs}) - assert_equal( - self.nodes[0].listsinceblock(""), - {"lastblock": blockhash, - "removed": [], - "transactions": txs}) - - def test_invalid_blockhash(self): - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, - "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, - "0000000000000000000000000000000000000000000000000000000000000000") - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, - "invalid-hex") - - def test_reorg(self): - ''' - `listsinceblock` did not behave correctly when handed a block that was - no longer in the main chain: - - ab0 - / \ - aa1 [tx0] bb1 - | | - aa2 bb2 - | | - aa3 bb3 - | - bb4 - - Consider a client that has only seen block `aa3` above. It asks the node - to `listsinceblock aa3`. But at some point prior the main chain switched - to the bb chain. - - Previously: listsinceblock would find height=4 for block aa3 and compare - this to height=5 for the tip of the chain (bb4). It would then return - results restricted to bb3-bb4. - - Now: listsinceblock finds the fork at ab0 and returns results in the - range bb1-bb4. - - This test only checks that [tx0] is present. - ''' - - # Split network into two - self.split_network() - - # send to nodes[0] from nodes[2] - senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) - - # generate on both sides - lastblockhash = self.nodes[1].generate(6)[5] - self.nodes[2].generate(7) - self.log.info('lastblockhash=%s' % (lastblockhash)) - - self.sync_all([self.nodes[:2], self.nodes[2:]]) - - self.join_network() - - # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0] - lsbres = self.nodes[0].listsinceblock(lastblockhash) - found = False - for tx in lsbres['transactions']: - if tx['txid'] == senttx: - found = True - break - assert found - - def test_double_spend(self): - ''' - This tests the case where the same UTXO is spent twice on two separate - blocks as part of a reorg. - - ab0 - / \ - aa1 [tx1] bb1 [tx2] - | | - aa2 bb2 - | | - aa3 bb3 - | - bb4 - - Problematic case: - - 1. User 1 receives BTC in tx1 from utxo1 in block aa1. - 2. User 2 receives BTC in tx2 from utxo1 (same) in block bb1 - 3. User 1 sees 2 confirmations at block aa3. - 4. Reorg into bb chain. - 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now - invalidated. - - Currently the solution to this is to detect that a reorg'd block is - asked for in listsinceblock, and to iterate back over existing blocks up - until the fork point, and to include all transactions that relate to the - node wallet. - ''' - - self.sync_all() - - # Split network into two - self.split_network() - - # share utxo between nodes[1] and nodes[2] - utxos = self.nodes[2].listunspent() - utxo = utxos[0] - privkey = self.nodes[2].dumpprivkey(utxo['address']) - self.nodes[1].importprivkey(privkey) - - # send from nodes[1] using utxo to nodes[0] - change = '%.8f' % (float(utxo['amount']) - 1.0003) - recipientDict = { - self.nodes[0].getnewaddress(): 1, - self.nodes[1].getnewaddress(): change, - } - utxoDicts = [{ - 'txid': utxo['txid'], - 'vout': utxo['vout'], - }] - txid1 = self.nodes[1].sendrawtransaction( - self.nodes[1].signrawtransaction( - self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex']) - - # send from nodes[2] using utxo to nodes[3] - recipientDict2 = { - self.nodes[3].getnewaddress(): 1, - self.nodes[2].getnewaddress(): change, - } - self.nodes[2].sendrawtransaction( - self.nodes[2].signrawtransaction( - self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex']) - - # generate on both sides - lastblockhash = self.nodes[1].generate(3)[2] - self.nodes[2].generate(4) - - self.join_network() - - self.sync_all() - - # gettransaction should work for txid1 - assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1" - - # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] - lsbres = self.nodes[0].listsinceblock(lastblockhash) - assert any(tx['txid'] == txid1 for tx in lsbres['removed']) - - # but it should not include 'removed' if include_removed=false - lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False) - assert 'removed' not in lsbres2 - - def test_double_send(self): - ''' - This tests the case where the same transaction is submitted twice on two - separate blocks as part of a reorg. The former will vanish and the - latter will appear as the true transaction (with confirmations dropping - as a result). - - ab0 - / \ - aa1 [tx1] bb1 - | | - aa2 bb2 - | | - aa3 bb3 [tx1] - | - bb4 - - Asserted: - - 1. tx1 is listed in listsinceblock. - 2. It is included in 'removed' as it was removed, even though it is now - present in a different block. - 3. It is listed with a confirmations count of 2 (bb3, bb4), not - 3 (aa1, aa2, aa3). - ''' - - self.sync_all() - - # Split network into two - self.split_network() - - # create and sign a transaction - utxos = self.nodes[2].listunspent() - utxo = utxos[0] - change = '%.8f' % (float(utxo['amount']) - 1.0003) - recipientDict = { - self.nodes[0].getnewaddress(): 1, - self.nodes[2].getnewaddress(): change, - } - utxoDicts = [{ - 'txid': utxo['txid'], - 'vout': utxo['vout'], - }] - signedtxres = self.nodes[2].signrawtransaction( - self.nodes[2].createrawtransaction(utxoDicts, recipientDict)) - assert signedtxres['complete'] - - signedtx = signedtxres['hex'] - - # send from nodes[1]; this will end up in aa1 - txid1 = self.nodes[1].sendrawtransaction(signedtx) - - # generate bb1-bb2 on right side - self.nodes[2].generate(2) - - # send from nodes[2]; this will end up in bb3 - txid2 = self.nodes[2].sendrawtransaction(signedtx) - - assert_equal(txid1, txid2) - - # generate on both sides - lastblockhash = self.nodes[1].generate(3)[2] - self.nodes[2].generate(2) - - self.join_network() - - self.sync_all() - - # gettransaction should work for txid1 - self.nodes[0].gettransaction(txid1) - - # listsinceblock(lastblockhash) should now include txid1 in transactions - # as well as in removed - lsbres = self.nodes[0].listsinceblock(lastblockhash) - assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) - assert any(tx['txid'] == txid1 for tx in lsbres['removed']) - - # find transaction and ensure confirmations is valid - for tx in lsbres['transactions']: - if tx['txid'] == txid1: - assert_equal(tx['confirmations'], 2) - - # the same check for the removed array; confirmations should STILL be 2 - for tx in lsbres['removed']: - if tx['txid'] == txid1: - assert_equal(tx['confirmations'], 2) - -if __name__ == '__main__': - ListSinceBlockTest().main() diff --git a/test/functional/multiwallet.py b/test/functional/multiwallet.py deleted file mode 100755 index b07e451667..0000000000 --- a/test/functional/multiwallet.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test multiwallet. - -Verify that a bitcoind node can load multiple wallet files -""" -import os -import shutil - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error - -class MultiWalletTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [['-wallet=w1', '-wallet=w2', '-wallet=w3', '-wallet=w'], []] - self.supports_cli = True - - def run_test(self): - node = self.nodes[0] - - data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p) - wallet_dir = lambda *p: data_dir('wallets', *p) - wallet = lambda name: node.get_wallet_rpc(name) - - assert_equal(set(node.listwallets()), {"w1", "w2", "w3", "w"}) - - self.stop_nodes() - - self.assert_start_raises_init_error(0, ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist') - self.assert_start_raises_init_error(0, ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir()) - self.assert_start_raises_init_error(0, ['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir()) - - # should not initialize if there are duplicate wallets - self.assert_start_raises_init_error(0, ['-wallet=w1', '-wallet=w1'], 'Error loading wallet w1. Duplicate -wallet filename specified.') - - # should not initialize if wallet file is a directory - os.mkdir(wallet_dir('w11')) - self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.') - - # should not initialize if one wallet is a copy of another - shutil.copyfile(wallet_dir('w2'), wallet_dir('w22')) - self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid') - - # should not initialize if wallet file is a symlink - os.symlink(wallet_dir('w1'), wallet_dir('w12')) - self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.') - - # should not initialize if the specified walletdir does not exist - self.assert_start_raises_init_error(0, ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist') - # should not initialize if the specified walletdir is not a directory - not_a_dir = wallet_dir('notadir') - open(not_a_dir, 'a').close() - self.assert_start_raises_init_error(0, ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory') - - # if wallets/ doesn't exist, datadir should be the default wallet dir - wallet_dir2 = data_dir('walletdir') - os.rename(wallet_dir(), wallet_dir2) - self.start_node(0, ['-wallet=w4', '-wallet=w5']) - assert_equal(set(node.listwallets()), {"w4", "w5"}) - w5 = wallet("w5") - w5.generate(1) - - # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded - os.rename(wallet_dir2, wallet_dir()) - self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()]) - assert_equal(set(node.listwallets()), {"w4", "w5"}) - w5 = wallet("w5") - w5_info = w5.getwalletinfo() - assert_equal(w5_info['immature_balance'], 50) - - competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir') - os.mkdir(competing_wallet_dir) - self.restart_node(0, ['-walletdir='+competing_wallet_dir]) - self.assert_start_raises_init_error(1, ['-walletdir='+competing_wallet_dir], 'Error initializing wallet database environment') - - self.restart_node(0, self.extra_args[0]) - - w1 = wallet("w1") - w2 = wallet("w2") - w3 = wallet("w3") - w4 = wallet("w") - wallet_bad = wallet("bad") - - w1.generate(1) - - # accessing invalid wallet fails - assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo) - - # accessing wallet RPC without using wallet endpoint fails - assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo) - - # check w1 wallet balance - w1_info = w1.getwalletinfo() - assert_equal(w1_info['immature_balance'], 50) - w1_name = w1_info['walletname'] - assert_equal(w1_name, "w1") - - # check w2 wallet balance - w2_info = w2.getwalletinfo() - assert_equal(w2_info['immature_balance'], 0) - w2_name = w2_info['walletname'] - assert_equal(w2_name, "w2") - - w3_name = w3.getwalletinfo()['walletname'] - assert_equal(w3_name, "w3") - - w4_name = w4.getwalletinfo()['walletname'] - assert_equal(w4_name, "w") - - w1.generate(101) - assert_equal(w1.getbalance(), 100) - assert_equal(w2.getbalance(), 0) - assert_equal(w3.getbalance(), 0) - assert_equal(w4.getbalance(), 0) - - w1.sendtoaddress(w2.getnewaddress(), 1) - w1.sendtoaddress(w3.getnewaddress(), 2) - w1.sendtoaddress(w4.getnewaddress(), 3) - w1.generate(1) - assert_equal(w2.getbalance(), 1) - assert_equal(w3.getbalance(), 2) - assert_equal(w4.getbalance(), 3) - - batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()]) - assert_equal(batch[0]["result"]["chain"], "regtest") - assert_equal(batch[1]["result"]["walletname"], "w1") - -if __name__ == '__main__': - MultiWalletTest().main() diff --git a/test/functional/receivedby.py b/test/functional/receivedby.py deleted file mode 100755 index 1f2b3c8aa7..0000000000 --- a/test/functional/receivedby.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the listreceivedbyaddress RPC.""" -from decimal import Decimal - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import (assert_array_result, - assert_equal, - assert_raises_rpc_error, - ) - -class ReceivedByTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - - def run_test(self): - # Generate block to get out of IBD - self.nodes[0].generate(1) - - self.log.info("listreceivedbyaddress Test") - - # Send from node 0 to 1 - addr = self.nodes[1].getnewaddress() - txid = self.nodes[0].sendtoaddress(addr, 0.1) - self.sync_all() - - # Check not listed in listreceivedbyaddress because has 0 confirmations - assert_array_result(self.nodes[1].listreceivedbyaddress(), - {"address": addr}, - {}, - True) - # Bury Tx under 10 block so it will be returned by listreceivedbyaddress - self.nodes[1].generate(10) - self.sync_all() - assert_array_result(self.nodes[1].listreceivedbyaddress(), - {"address": addr}, - {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) - # With min confidence < 10 - assert_array_result(self.nodes[1].listreceivedbyaddress(5), - {"address": addr}, - {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) - # With min confidence > 10, should not find Tx - assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True) - - # Empty Tx - addr = self.nodes[1].getnewaddress() - assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), - {"address": addr}, - {"address": addr, "account": "", "amount": 0, "confirmations": 0, "txids": []}) - - self.log.info("getreceivedbyaddress Test") - - # Send from node 0 to 1 - addr = self.nodes[1].getnewaddress() - txid = self.nodes[0].sendtoaddress(addr, 0.1) - self.sync_all() - - # Check balance is 0 because of 0 confirmations - balance = self.nodes[1].getreceivedbyaddress(addr) - assert_equal(balance, Decimal("0.0")) - - # Check balance is 0.1 - balance = self.nodes[1].getreceivedbyaddress(addr, 0) - assert_equal(balance, Decimal("0.1")) - - # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress - self.nodes[1].generate(10) - self.sync_all() - balance = self.nodes[1].getreceivedbyaddress(addr) - assert_equal(balance, Decimal("0.1")) - - # Trying to getreceivedby for an address the wallet doesn't own should return an error - assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) - - self.log.info("listreceivedbyaccount + getreceivedbyaccount Test") - - # set pre-state - addrArr = self.nodes[1].getnewaddress() - account = self.nodes[1].getaccount(addrArr) - received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount() if r["account"] == account][0] - balance_by_account = self.nodes[1].getreceivedbyaccount(account) - - txid = self.nodes[0].sendtoaddress(addr, 0.1) - self.sync_all() - - # listreceivedbyaccount should return received_by_account_json because of 0 confirmations - assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account": account}, - received_by_account_json) - - # getreceivedbyaddress should return same balance because of 0 confirmations - balance = self.nodes[1].getreceivedbyaccount(account) - assert_equal(balance, balance_by_account) - - self.nodes[1].generate(10) - self.sync_all() - # listreceivedbyaccount should return updated account balance - assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account": account}, - {"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))}) - - # getreceivedbyaddress should return updates balance - balance = self.nodes[1].getreceivedbyaccount(account) - assert_equal(balance, balance_by_account + Decimal("0.1")) - - # Create a new account named "mynewaccount" that has a 0 balance - self.nodes[1].getaccountaddress("mynewaccount") - received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount(0, True) if r["account"] == "mynewaccount"][0] - - # Test includeempty of listreceivedbyaccount - assert_equal(received_by_account_json["amount"], Decimal("0.0")) - - # Test getreceivedbyaccount for 0 amount accounts - balance = self.nodes[1].getreceivedbyaccount("mynewaccount") - assert_equal(balance, Decimal("0.0")) - -if __name__ == '__main__': - ReceivedByTest().main() diff --git a/test/functional/resendwallettransactions.py b/test/functional/resendwallettransactions.py deleted file mode 100755 index d959bb4c38..0000000000 --- a/test/functional/resendwallettransactions.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test resendwallettransactions RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error - -class ResendWalletTransactionsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['--walletbroadcast=false']] - - def run_test(self): - # Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled. - assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions) - - # Should return an empty array if there aren't unconfirmed wallet transactions. - self.stop_node(0) - self.start_node(0, extra_args=[]) - assert_equal(self.nodes[0].resendwallettransactions(), []) - - # Should return an array with the unconfirmed wallet transaction. - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - assert_equal(self.nodes[0].resendwallettransactions(), [txid]) - -if __name__ == '__main__': - ResendWalletTransactionsTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 6aad0f9b9d..edc1fed8cb 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -55,46 +55,46 @@ TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS= [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel - 'wallet-hd.py', - 'walletbackup.py', + 'wallet_hd.py', + 'wallet_backup.py', # vv Tests less than 5m vv 'feature_block.py', 'fundrawtransaction.py', 'p2p-compactblocks.py', 'feature_segwit.py', # vv Tests less than 2m vv - 'wallet.py', - 'wallet-accounts.py', + 'wallet_basic.py', + 'wallet_accounts.py', 'p2p-segwit.py', - 'wallet-dump.py', + 'wallet_dump.py', 'listtransactions.py', # vv Tests less than 60s vv 'sendheaders.py', - 'zapwallettxes.py', - 'importmulti.py', + 'wallet_zapwallettxes.py', + 'wallet_importmulti.py', 'mempool_limit.py', 'merkle_blocks.py', - 'receivedby.py', - 'abandonconflict.py', + 'wallet_listreceivedby.py', + 'wallet_abandonconflict.py', 'feature_csv_activation.py', 'rawtransactions.py', - 'address_types.py', + 'wallet_address_types.py', 'feature_reindex.py', # vv Tests less than 30s vv - 'keypool-topup.py', + 'wallet_keypool_topup.py', 'zmq_test.py', 'bitcoin_cli.py', 'mempool_resurrect_test.py', - 'txn_doublespend.py --mineblock', - 'txn_clone.py', - 'txn_clone.py --segwit', + 'wallet_txn_doublespend.py --mineblock', + 'wallet_txn_clone.py', + 'wallet_txn_clone.py --segwit', 'getchaintips.py', 'rest.py', 'mempool_spendcoinbase.py', 'mempool_reorg.py', 'mempool_persist.py', - 'multiwallet.py', - 'multiwallet.py --usecli', + 'wallet_multiwallet.py', + 'wallet_multiwallet.py --usecli', 'httpbasics.py', 'multi_rpc.py', 'feature_proxy.py', @@ -103,29 +103,29 @@ BASE_SCRIPTS= [ 'decodescript.py', 'blockchain.py', 'deprecated_rpc.py', - 'disablewallet.py', + 'wallet_disable.py', 'net.py', - 'keypool.py', + 'wallet_keypool.py', 'p2p-mempool.py', 'prioritise_transaction.py', 'invalidblockrequest.py', 'invalidtxrequest.py', 'feature_versionbits_warning.py', 'preciousblock.py', - 'importprunedfunds.py', + 'wallet_importprunedfunds.py', 'signmessages.py', 'feature_nulldummy.py', - 'import-rescan.py', + 'wallet_import_rescan.py', 'mining.py', - 'bumpfee.py', + 'wallet_bumpfee.py', 'rpcnamedargs.py', - 'listsinceblock.py', + 'wallet_listsinceblock.py', 'p2p-leaktests.py', - 'wallet-encryption.py', + 'wallet_encryption.py', 'feature_dersig.py', 'feature_cltv.py', 'uptime.py', - 'resendwallettransactions.py', + 'wallet_resendwallettransactions.py', 'feature_minchainwork.py', 'p2p-fingerprint.py', 'feature_uacomment.py', @@ -158,8 +158,8 @@ EXTENDED_SCRIPTS = [ # vv Tests less than 30s vv 'feature_assumevalid.py', 'example_test.py', - 'txn_doublespend.py', - 'txn_clone.py --mineblock', + 'wallet_txn_doublespend.py', + 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', 'invalidateblock.py', 'feature_rbf.py', @@ -474,7 +474,7 @@ class TestResult(): def check_script_prefixes(): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 60 + EXPECTED_VIOLATION_COUNT = 37 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming diff --git a/test/functional/txn_clone.py b/test/functional/txn_clone.py deleted file mode 100755 index ce26d6e0ee..0000000000 --- a/test/functional/txn_clone.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class TxnMallTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - - def add_options(self, parser): - parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", - help="Test double-spend of 1-confirmed transaction") - parser.add_option("--segwit", dest="segwit", default=False, action="store_true", - help="Test behaviour with SegWit txn (which should fail") - - def setup_network(self): - # Start with split network: - super(TxnMallTest, self).setup_network() - disconnect_nodes(self.nodes[1], 2) - disconnect_nodes(self.nodes[2], 1) - - def run_test(self): - if self.options.segwit: - output_type="p2sh-segwit" - else: - output_type="legacy" - - # All nodes should start with 1,250 BTC: - starting_balance = 1250 - for i in range(4): - assert_equal(self.nodes[i].getbalance(), starting_balance) - self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! - - # Assign coins to foo and bar accounts: - self.nodes[0].settxfee(.001) - - node0_address_foo = self.nodes[0].getnewaddress("foo", output_type) - fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) - fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) - - node0_address_bar = self.nodes[0].getnewaddress("bar", output_type) - fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) - fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) - - assert_equal(self.nodes[0].getbalance(""), - starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) - - # Coins are sent to node1_address - node1_address = self.nodes[1].getnewaddress("from0") - - # Send tx1, and another transaction tx2 that won't be cloned - txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) - txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) - - # Construct a clone of tx1, to be malleated - rawtx1 = self.nodes[0].getrawtransaction(txid1,1) - clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}] - clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"], - rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]} - clone_locktime = rawtx1["locktime"] - clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime) - - # createrawtransaction randomizes the order of its outputs, so swap them if necessary. - # output 0 is at version+#inputs+input+sigstub+sequence+#outputs - # 40 BTC serialized is 00286bee00000000 - pos0 = 2*(4+1+36+1+4+1) - hex40 = "00286bee00000000" - output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0) - if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or - rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40): - output0 = clone_raw[pos0 : pos0 + output_len] - output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len] - clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:] - - # Use a different signature hash type to sign. This creates an equivalent but malleated clone. - # Don't send the clone anywhere yet - tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY") - assert_equal(tx1_clone["complete"], True) - - # Have node0 mine a block, if requested: - if (self.options.mine_block): - self.nodes[0].generate(1) - sync_blocks(self.nodes[0:2]) - - tx1 = self.nodes[0].gettransaction(txid1) - tx2 = self.nodes[0].gettransaction(txid2) - - # Node0's balance should be starting balance, plus 50BTC for another - # matured block, minus tx1 and tx2 amounts, and minus transaction fees: - expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - if self.options.mine_block: expected += 50 - expected += tx1["amount"] + tx1["fee"] - expected += tx2["amount"] + tx2["fee"] - assert_equal(self.nodes[0].getbalance(), expected) - - # foo and bar accounts should be debited: - assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) - assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) - - if self.options.mine_block: - assert_equal(tx1["confirmations"], 1) - assert_equal(tx2["confirmations"], 1) - # Node1's "from0" balance should be both transaction amounts: - assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"])) - else: - assert_equal(tx1["confirmations"], 0) - assert_equal(tx2["confirmations"], 0) - - # Send clone and its parent to miner - self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) - txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) - if self.options.segwit: - assert_equal(txid1, txid1_clone) - return - - # ... mine a block... - self.nodes[2].generate(1) - - # Reconnect the split network, and sync chain: - connect_nodes(self.nodes[1], 2) - self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) - self.nodes[2].sendrawtransaction(tx2["hex"]) - self.nodes[2].generate(1) # Mine another block to make sure we sync - sync_blocks(self.nodes) - - # Re-fetch transaction info: - tx1 = self.nodes[0].gettransaction(txid1) - tx1_clone = self.nodes[0].gettransaction(txid1_clone) - tx2 = self.nodes[0].gettransaction(txid2) - - # Verify expected confirmations - assert_equal(tx1["confirmations"], -2) - assert_equal(tx1_clone["confirmations"], 2) - assert_equal(tx2["confirmations"], 1) - - # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, - # less possible orphaned matured subsidy - expected += 100 - if (self.options.mine_block): - expected -= 50 - assert_equal(self.nodes[0].getbalance(), expected) - assert_equal(self.nodes[0].getbalance("*", 0), expected) - - # Check node0's individual account balances. - # "foo" should have been debited by the equivalent clone of tx1 - assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) - # "bar" should have been debited by (possibly unconfirmed) tx2 - assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) - # "" should have starting balance, less funding txes, plus subsidies - assert_equal(self.nodes[0].getbalance("", 0), starting_balance - - 1219 - + fund_foo_tx["fee"] - - 29 - + fund_bar_tx["fee"] - + 100) - - # Node1's "from0" account balance - assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"])) - -if __name__ == '__main__': - TxnMallTest().main() - diff --git a/test/functional/txn_doublespend.py b/test/functional/txn_doublespend.py deleted file mode 100755 index 01129f3817..0000000000 --- a/test/functional/txn_doublespend.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet accounts properly when there is a double-spend conflict.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class TxnMallTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - - def add_options(self, parser): - parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", - help="Test double-spend of 1-confirmed transaction") - - def setup_network(self): - # Start with split network: - super().setup_network() - disconnect_nodes(self.nodes[1], 2) - disconnect_nodes(self.nodes[2], 1) - - def run_test(self): - # All nodes should start with 1,250 BTC: - starting_balance = 1250 - for i in range(4): - assert_equal(self.nodes[i].getbalance(), starting_balance) - self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! - - # Assign coins to foo and bar accounts: - node0_address_foo = self.nodes[0].getnewaddress("foo") - fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) - fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) - - node0_address_bar = self.nodes[0].getnewaddress("bar") - fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) - fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) - - assert_equal(self.nodes[0].getbalance(""), - starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) - - # Coins are sent to node1_address - node1_address = self.nodes[1].getnewaddress("from0") - - # First: use raw transaction API to send 1240 BTC to node1_address, - # but don't broadcast: - doublespend_fee = Decimal('-.02') - rawtx_input_0 = {} - rawtx_input_0["txid"] = fund_foo_txid - rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) - rawtx_input_1 = {} - rawtx_input_1["txid"] = fund_bar_txid - rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) - inputs = [rawtx_input_0, rawtx_input_1] - change_address = self.nodes[0].getnewaddress() - outputs = {} - outputs[node1_address] = 1240 - outputs[change_address] = 1248 - 1240 + doublespend_fee - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - doublespend = self.nodes[0].signrawtransaction(rawtx) - assert_equal(doublespend["complete"], True) - - # Create two spends using 1 50 BTC coin each - txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) - txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) - - # Have node0 mine a block: - if (self.options.mine_block): - self.nodes[0].generate(1) - sync_blocks(self.nodes[0:2]) - - tx1 = self.nodes[0].gettransaction(txid1) - tx2 = self.nodes[0].gettransaction(txid2) - - # Node0's balance should be starting balance, plus 50BTC for another - # matured block, minus 40, minus 20, and minus transaction fees: - expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - if self.options.mine_block: expected += 50 - expected += tx1["amount"] + tx1["fee"] - expected += tx2["amount"] + tx2["fee"] - assert_equal(self.nodes[0].getbalance(), expected) - - # foo and bar accounts should be debited: - assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"]) - assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"]) - - if self.options.mine_block: - assert_equal(tx1["confirmations"], 1) - assert_equal(tx2["confirmations"], 1) - # Node1's "from0" balance should be both transaction amounts: - assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"])) - else: - assert_equal(tx1["confirmations"], 0) - assert_equal(tx2["confirmations"], 0) - - # Now give doublespend and its parents to miner: - self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) - self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) - doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) - # ... mine a block... - self.nodes[2].generate(1) - - # Reconnect the split network, and sync chain: - connect_nodes(self.nodes[1], 2) - self.nodes[2].generate(1) # Mine another block to make sure we sync - sync_blocks(self.nodes) - assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2) - - # Re-fetch transaction info: - tx1 = self.nodes[0].gettransaction(txid1) - tx2 = self.nodes[0].gettransaction(txid2) - - # Both transactions should be conflicted - assert_equal(tx1["confirmations"], -2) - assert_equal(tx2["confirmations"], -2) - - # Node0's total balance should be starting balance, plus 100BTC for - # two more matured blocks, minus 1240 for the double-spend, plus fees (which are - # negative): - expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee - assert_equal(self.nodes[0].getbalance(), expected) - assert_equal(self.nodes[0].getbalance("*"), expected) - - # Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies + - # fees (which are negative) - assert_equal(self.nodes[0].getbalance("foo"), 1219) - assert_equal(self.nodes[0].getbalance("bar"), 29) - assert_equal(self.nodes[0].getbalance(""), starting_balance - -1219 - - 29 - -1240 - + 100 - + fund_foo_tx["fee"] - + fund_bar_tx["fee"] - + doublespend_fee) - - # Node1's "from0" account balance should be just the doublespend: - assert_equal(self.nodes[1].getbalance("from0"), 1240) - -if __name__ == '__main__': - TxnMallTest().main() - diff --git a/test/functional/wallet-accounts.py b/test/functional/wallet-accounts.py deleted file mode 100755 index ecd1cfc82b..0000000000 --- a/test/functional/wallet-accounts.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test account RPCs. - -RPCs tested are: - - getaccountaddress - - getaddressesbyaccount - - listaddressgroupings - - setaccount - - sendfrom (with account arguments) - - move (with account arguments) -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class WalletAccountsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [[]] - - def run_test(self): - node = self.nodes[0] - # Check that there's no UTXO on any of the nodes - assert_equal(len(node.listunspent()), 0) - - # Note each time we call generate, all generated coins go into - # the same address, so we call twice to get two addresses w/50 each - node.generate(1) - node.generate(101) - assert_equal(node.getbalance(), 100) - - # there should be 2 address groups - # each with 1 address with a balance of 50 Bitcoins - address_groups = node.listaddressgroupings() - assert_equal(len(address_groups), 2) - # the addresses aren't linked now, but will be after we send to the - # common address - linked_addresses = set() - for address_group in address_groups: - assert_equal(len(address_group), 1) - assert_equal(len(address_group[0]), 2) - assert_equal(address_group[0][1], 50) - linked_addresses.add(address_group[0][0]) - - # send 50 from each address to a third address not in this wallet - # There's some fee that will come back to us when the miner reward - # matures. - common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" - txid = node.sendmany( - fromaccount="", - amounts={common_address: 100}, - subtractfeefrom=[common_address], - minconf=1, - ) - tx_details = node.gettransaction(txid) - fee = -tx_details['details'][0]['fee'] - # there should be 1 address group, with the previously - # unlinked addresses now linked (they both have 0 balance) - address_groups = node.listaddressgroupings() - assert_equal(len(address_groups), 1) - assert_equal(len(address_groups[0]), 2) - assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) - assert_equal([a[1] for a in address_groups[0]], [0, 0]) - - node.generate(1) - - # we want to reset so that the "" account has what's expected. - # otherwise we're off by exactly the fee amount as that's mined - # and matures in the next 100 blocks - node.sendfrom("", common_address, fee) - amount_to_send = 1.0 - - # Create accounts and make sure subsequent account API calls - # recognize the account/address associations. - accounts = [Account(name) for name in ("a", "b", "c", "d", "e")] - for account in accounts: - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - - # Send a transaction to each account, and make sure this forces - # getaccountaddress to generate a new receiving address. - for account in accounts: - node.sendtoaddress(account.receive_address, amount_to_send) - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - - # Check the amounts received. - node.generate(1) - for account in accounts: - assert_equal( - node.getreceivedbyaddress(account.addresses[0]), amount_to_send) - assert_equal(node.getreceivedbyaccount(account.name), amount_to_send) - - # Check that sendfrom account reduces listaccounts balances. - for i, account in enumerate(accounts): - to_account = accounts[(i+1) % len(accounts)] - node.sendfrom(account.name, to_account.receive_address, amount_to_send) - node.generate(1) - for account in accounts: - account.add_receive_address(node.getaccountaddress(account.name)) - account.verify(node) - assert_equal(node.getreceivedbyaccount(account.name), 2) - node.move(account.name, "", node.getbalance(account.name)) - account.verify(node) - node.generate(101) - expected_account_balances = {"": 5200} - for account in accounts: - expected_account_balances[account.name] = 0 - assert_equal(node.listaccounts(), expected_account_balances) - assert_equal(node.getbalance(""), 5200) - - # Check that setaccount can assign an account to a new unused address. - for account in accounts: - address = node.getaccountaddress("") - node.setaccount(address, account.name) - account.add_address(address) - account.verify(node) - assert(address not in node.getaddressesbyaccount("")) - - # Check that addmultisigaddress can assign accounts. - for account in accounts: - addresses = [] - for x in range(10): - addresses.append(node.getnewaddress()) - multisig_address = node.addmultisigaddress(5, addresses, account.name)['address'] - account.add_address(multisig_address) - account.verify(node) - node.sendfrom("", multisig_address, 50) - node.generate(101) - for account in accounts: - assert_equal(node.getbalance(account.name), 50) - - # Check that setaccount can change the account of an address from a - # different account. - change_account(node, accounts[0].addresses[0], accounts[0], accounts[1]) - - # Check that setaccount can change the account of an address which - # is the receiving address of a different account. - change_account(node, accounts[0].receive_address, accounts[0], accounts[1]) - - # Check that setaccount can set the account of an address already - # in the account. This is a no-op. - change_account(node, accounts[2].addresses[0], accounts[2], accounts[2]) - - # Check that setaccount can set the account of an address which is - # already the receiving address of the account. It would probably make - # sense for this to be a no-op, but right now it resets the receiving - # address, causing getaccountaddress to return a brand new address. - change_account(node, accounts[2].receive_address, accounts[2], accounts[2]) - -class Account: - def __init__(self, name): - # Account name - self.name = name - # Current receiving address associated with this account. - self.receive_address = None - # List of all addresses assigned with this account - self.addresses = [] - - def add_address(self, address): - assert_equal(address not in self.addresses, True) - self.addresses.append(address) - - def add_receive_address(self, address): - self.add_address(address) - self.receive_address = address - - def verify(self, node): - if self.receive_address is not None: - assert self.receive_address in self.addresses - assert_equal(node.getaccountaddress(self.name), self.receive_address) - - for address in self.addresses: - assert_equal(node.getaccount(address), self.name) - - assert_equal( - set(node.getaddressesbyaccount(self.name)), set(self.addresses)) - - -def change_account(node, address, old_account, new_account): - assert_equal(address in old_account.addresses, True) - node.setaccount(address, new_account.name) - - old_account.addresses.remove(address) - new_account.add_address(address) - - # Calling setaccount on an address which was previously the receiving - # address of a different account should reset the receiving address of - # the old account, causing getaccountaddress to return a brand new - # address. - if address == old_account.receive_address: - new_address = node.getaccountaddress(old_account.name) - assert_equal(new_address not in old_account.addresses, True) - assert_equal(new_address not in new_account.addresses, True) - old_account.add_receive_address(new_address) - - old_account.verify(node) - new_account.verify(node) - - -if __name__ == '__main__': - WalletAccountsTest().main() diff --git a/test/functional/wallet-dump.py b/test/functional/wallet-dump.py deleted file mode 100755 index 77f90ffb81..0000000000 --- a/test/functional/wallet-dump.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the dumpwallet RPC.""" - -import os - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) - - -def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): - """ - Read the given dump, count the addrs that match, count change and reserve. - Also check that the old hd_master is inactive - """ - with open(file_name, encoding='utf8') as inputfile: - found_addr = 0 - found_script_addr = 0 - found_addr_chg = 0 - found_addr_rsv = 0 - hd_master_addr_ret = None - for line in inputfile: - # only read non comment lines - if line[0] != "#" and len(line) > 10: - # split out some data - key_label, comment = line.split("#") - # key = key_label.split(" ")[0] - keytype = key_label.split(" ")[2] - if len(comment) > 1: - addr_keypath = comment.split(" addr=")[1] - addr = addr_keypath.split(" ")[0] - keypath = None - if keytype == "inactivehdmaster=1": - # ensure the old master is still available - assert(hd_master_addr_old == addr) - elif keytype == "hdmaster=1": - # ensure we have generated a new hd master key - assert(hd_master_addr_old != addr) - hd_master_addr_ret = addr - elif keytype == "script=1": - # scripts don't have keypaths - keypath = None - else: - keypath = addr_keypath.rstrip().split("hdkeypath=")[1] - - # count key types - for addrObj in addrs: - if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=": - found_addr += 1 - break - elif keytype == "change=1": - found_addr_chg += 1 - break - elif keytype == "reserve=1": - found_addr_rsv += 1 - break - - # count scripts - for script_addr in script_addrs: - if script_addr == addr.rstrip() and keytype == "script=1": - found_script_addr += 1 - break - - return found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret - - -class WalletDumpTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [["-keypool=90", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] - - def setup_network(self, split=False): - # Use 1 minute timeout because the initial getnewaddress RPC can take - # longer than the default 30 seconds due to an expensive - # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in - # the test often takes even longer. - self.add_nodes(self.num_nodes, self.extra_args, timewait=60) - self.start_nodes() - - def run_test (self): - tmpdir = self.options.tmpdir - - # generate 20 addresses to compare against the dump - test_addr_count = 20 - addrs = [] - for i in range(0,test_addr_count): - addr = self.nodes[0].getnewaddress() - vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath - addrs.append(vaddr) - # Should be a no-op: - self.nodes[0].keypoolrefill() - - # Test scripts dump by adding a P2SH witness and a 1-of-1 multisig address - witness_addr = self.nodes[0].addwitnessaddress(addrs[0]["address"], True) - multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"] - script_addrs = [witness_addr, multisig_addr] - - # dump unencrypted wallet - result = self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump") - assert_equal(result['filename'], os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) - - found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ - read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, script_addrs, None) - assert_equal(found_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_script_addr, 2) # all scripts must be in the dump - assert_equal(found_addr_chg, 50) # 50 blocks where mined - assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys - - #encrypt wallet, restart, unlock and dump - self.nodes[0].node_encrypt_wallet('test') - self.start_node(0) - self.nodes[0].walletpassphrase('test', 10) - # Should be a no-op: - self.nodes[0].keypoolrefill() - self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") - - found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ - read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, script_addrs, hd_master_addr_unenc) - assert_equal(found_addr, test_addr_count) - assert_equal(found_script_addr, 2) - assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now - assert_equal(found_addr_rsv, 90*2) - - # Overwriting should fail - assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump") - - # Restart node with new wallet, and test importwallet - self.stop_node(0) - self.start_node(0, ['-wallet=w2']) - - # Make sure the address is not IsMine before import - result = self.nodes[0].validateaddress(multisig_addr) - assert(result['ismine'] == False) - - self.nodes[0].importwallet(os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) - - # Now check IsMine is true - result = self.nodes[0].validateaddress(multisig_addr) - assert(result['ismine'] == True) - -if __name__ == '__main__': - WalletDumpTest().main () diff --git a/test/functional/wallet-encryption.py b/test/functional/wallet-encryption.py deleted file mode 100755 index 3c927ee484..0000000000 --- a/test/functional/wallet-encryption.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test Wallet encryption""" - -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - assert_greater_than, - assert_greater_than_or_equal, -) - -class WalletEncryptionTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def run_test(self): - passphrase = "WalletPassphrase" - passphrase2 = "SecondWalletPassphrase" - - # Make sure the wallet isn't encrypted first - address = self.nodes[0].getnewaddress() - privkey = self.nodes[0].dumpprivkey(address) - assert_equal(privkey[:1], "c") - assert_equal(len(privkey), 52) - - # Encrypt the wallet - self.nodes[0].node_encrypt_wallet(passphrase) - self.start_node(0) - - # Test that the wallet is encrypted - assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) - - # Check that walletpassphrase works - self.nodes[0].walletpassphrase(passphrase, 2) - assert_equal(privkey, self.nodes[0].dumpprivkey(address)) - - # Check that the timeout is right - time.sleep(2) - assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) - - # Test wrong passphrase - assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10) - - # Test walletlock - self.nodes[0].walletpassphrase(passphrase, 84600) - assert_equal(privkey, self.nodes[0].dumpprivkey(address)) - self.nodes[0].walletlock() - assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) - - # Test passphrase changes - self.nodes[0].walletpassphrasechange(passphrase, passphrase2) - assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10) - self.nodes[0].walletpassphrase(passphrase2, 10) - assert_equal(privkey, self.nodes[0].dumpprivkey(address)) - self.nodes[0].walletlock() - - # Test timeout bounds - assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10) - # Check the timeout - # Check a time less than the limit - expected_time = int(time.time()) + (1 << 30) - 600 - self.nodes[0].walletpassphrase(passphrase2, (1 << 30) - 600) - actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] - assert_greater_than_or_equal(actual_time, expected_time) - assert_greater_than(expected_time + 5, actual_time) # 5 second buffer - # Check a time greater than the limit - expected_time = int(time.time()) + (1 << 30) - 1 - self.nodes[0].walletpassphrase(passphrase2, (1 << 33)) - actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] - assert_greater_than_or_equal(actual_time, expected_time) - assert_greater_than(expected_time + 5, actual_time) # 5 second buffer - -if __name__ == '__main__': - WalletEncryptionTest().main() diff --git a/test/functional/wallet-hd.py b/test/functional/wallet-hd.py deleted file mode 100755 index 9f0e9acb47..0000000000 --- a/test/functional/wallet-hd.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test Hierarchical Deterministic wallet function.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - connect_nodes_bi, -) -import shutil -import os - -class WalletHDTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [[], ['-keypool=0']] - - def run_test (self): - tmpdir = self.options.tmpdir - - # Make sure can't switch off usehd after wallet creation - self.stop_node(1) - self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet') - self.start_node(1) - connect_nodes_bi(self.nodes, 0, 1) - - # Make sure we use hd, keep masterkeyid - masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid'] - assert_equal(len(masterkeyid), 40) - - # create an internal key - change_addr = self.nodes[1].getrawchangeaddress() - change_addrV= self.nodes[1].validateaddress(change_addr) - assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key - - # Import a non-HD private key in the HD wallet - non_hd_add = self.nodes[0].getnewaddress() - self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) - - # This should be enough to keep the master key and the non-HD key - self.nodes[1].backupwallet(tmpdir + "/hd.bak") - #self.nodes[1].dumpwallet(tmpdir + "/hd.dump") - - # Derive some HD addresses and remember the last - # Also send funds to each add - self.nodes[0].generate(101) - hd_add = None - num_hd_adds = 300 - for i in range(num_hd_adds): - hd_add = self.nodes[1].getnewaddress() - hd_info = self.nodes[1].validateaddress(hd_add) - assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'") - assert_equal(hd_info["hdmasterkeyid"], masterkeyid) - self.nodes[0].sendtoaddress(hd_add, 1) - self.nodes[0].generate(1) - self.nodes[0].sendtoaddress(non_hd_add, 1) - self.nodes[0].generate(1) - - # create an internal key (again) - change_addr = self.nodes[1].getrawchangeaddress() - change_addrV= self.nodes[1].validateaddress(change_addr) - assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key - - self.sync_all() - assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) - - self.log.info("Restore backup ...") - self.stop_node(1) - # we need to delete the complete regtest directory - # otherwise node1 would auto-recover all funds in flag the keypool keys as used - shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks")) - shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate")) - shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat")) - self.start_node(1) - - # Assert that derivation is deterministic - hd_add_2 = None - for _ in range(num_hd_adds): - hd_add_2 = self.nodes[1].getnewaddress() - hd_info_2 = self.nodes[1].validateaddress(hd_add_2) - assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'") - assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid) - assert_equal(hd_add, hd_add_2) - connect_nodes_bi(self.nodes, 0, 1) - self.sync_all() - - # Needs rescan - self.stop_node(1) - self.start_node(1, extra_args=self.extra_args[1] + ['-rescan']) - assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) - - # Try a RPC based rescan - self.stop_node(1) - shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks")) - shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate")) - shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat")) - self.start_node(1, extra_args=self.extra_args[1]) - connect_nodes_bi(self.nodes, 0, 1) - self.sync_all() - out = self.nodes[1].rescanblockchain(0, 1) - assert_equal(out['start_height'], 0) - assert_equal(out['stop_height'], 1) - out = self.nodes[1].rescanblockchain() - assert_equal(out['start_height'], 0) - assert_equal(out['stop_height'], self.nodes[1].getblockcount()) - assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) - - # send a tx and make sure its using the internal chain for the changeoutput - txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) - outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'] - keypath = "" - for out in outs: - if out['value'] != 1: - keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath'] - - assert_equal(keypath[0:7], "m/0'/1'") - -if __name__ == '__main__': - WalletHDTest().main () diff --git a/test/functional/wallet.py b/test/functional/wallet.py deleted file mode 100755 index a90dbc8adf..0000000000 --- a/test/functional/wallet.py +++ /dev/null @@ -1,446 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet.""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class WalletTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - - def setup_network(self): - self.add_nodes(4) - self.start_node(0) - self.start_node(1) - self.start_node(2) - connect_nodes_bi(self.nodes,0,1) - connect_nodes_bi(self.nodes,1,2) - connect_nodes_bi(self.nodes,0,2) - self.sync_all([self.nodes[0:3]]) - - def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): - """Return curr_balance after asserting the fee was in range""" - fee = balance_with_fee - curr_balance - assert_fee_amount(fee, tx_size, fee_per_byte * 1000) - return curr_balance - - def get_vsize(self, txn): - return self.nodes[0].decoderawtransaction(txn)['vsize'] - - def run_test(self): - # Check that there's no UTXO on none of the nodes - assert_equal(len(self.nodes[0].listunspent()), 0) - assert_equal(len(self.nodes[1].listunspent()), 0) - assert_equal(len(self.nodes[2].listunspent()), 0) - - self.log.info("Mining blocks...") - - self.nodes[0].generate(1) - - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 50) - assert_equal(walletinfo['balance'], 0) - - self.sync_all([self.nodes[0:3]]) - self.nodes[1].generate(101) - self.sync_all([self.nodes[0:3]]) - - assert_equal(self.nodes[0].getbalance(), 50) - assert_equal(self.nodes[1].getbalance(), 50) - assert_equal(self.nodes[2].getbalance(), 0) - - # Check that only first and second nodes have UTXOs - utxos = self.nodes[0].listunspent() - assert_equal(len(utxos), 1) - assert_equal(len(self.nodes[1].listunspent()), 1) - assert_equal(len(self.nodes[2].listunspent()), 0) - - self.log.info("test gettxout") - confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"] - # First, outputs that are unspent both in the chain and in the - # mempool should appear with or without include_mempool - txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False) - assert_equal(txout['value'], 50) - txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True) - assert_equal(txout['value'], 50) - - # Send 21 BTC from 0 to 2 using sendtoaddress call. - # Locked memory should use at least 32 bytes to sign each transaction - self.log.info("test getmemoryinfo") - memory_before = self.nodes[0].getmemoryinfo() - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) - mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) - memory_after = self.nodes[0].getmemoryinfo() - assert(memory_before['locked']['used'] + 64 <= memory_after['locked']['used']) - - self.log.info("test gettxout (second part)") - # utxo spent in mempool should be visible if you exclude mempool - # but invisible if you include mempool - txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False) - assert_equal(txout['value'], 50) - txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True) - assert txout is None - # new utxo from mempool should be invisible if you exclude mempool - # but visible if you include mempool - txout = self.nodes[0].gettxout(mempool_txid, 0, False) - assert txout is None - txout1 = self.nodes[0].gettxout(mempool_txid, 0, True) - txout2 = self.nodes[0].gettxout(mempool_txid, 1, True) - # note the mempool tx will have randomly assigned indices - # but 10 will go to node2 and the rest will go to node0 - balance = self.nodes[0].getbalance() - assert_equal(set([txout1['value'], txout2['value']]), set([10, balance])) - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 0) - - # Have node0 mine a block, thus it will collect its own fee. - self.nodes[0].generate(1) - self.sync_all([self.nodes[0:3]]) - - # Exercise locking of unspent outputs - unspent_0 = self.nodes[2].listunspent()[0] - unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} - assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0]) - self.nodes[2].lockunspent(False, [unspent_0]) - assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0]) - assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) - assert_equal([unspent_0], self.nodes[2].listlockunspent()) - self.nodes[2].lockunspent(True, [unspent_0]) - assert_equal(len(self.nodes[2].listlockunspent()), 0) - assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction", - self.nodes[2].lockunspent, False, - [{"txid": "0000000000000000000000000000000000", "vout": 0}]) - assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds", - self.nodes[2].lockunspent, False, - [{"txid": unspent_0["txid"], "vout": 999}]) - - # Have node1 generate 100 blocks (so node0 can recover the fee) - self.nodes[1].generate(100) - self.sync_all([self.nodes[0:3]]) - - # node0 should end up with 100 btc in block rewards plus fees, but - # minus the 21 plus fees sent to node2 - assert_equal(self.nodes[0].getbalance(), 100-21) - assert_equal(self.nodes[2].getbalance(), 21) - - # Node0 should have two unspent outputs. - # Create a couple of transactions to send them to node2, submit them through - # node1, and make sure both node0 and node2 pick them up properly: - node0utxos = self.nodes[0].listunspent(1) - assert_equal(len(node0utxos), 2) - - # create both transactions - txns_to_send = [] - for utxo in node0utxos: - inputs = [] - outputs = {} - inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]}) - outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3 - raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) - txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx)) - - # Have node 1 (miner) send the transactions - self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) - self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True) - - # Have node1 mine a block to confirm transactions: - self.nodes[1].generate(1) - self.sync_all([self.nodes[0:3]]) - - assert_equal(self.nodes[0].getbalance(), 0) - assert_equal(self.nodes[2].getbalance(), 94) - assert_equal(self.nodes[2].getbalance("from1"), 94-21) - - # Verify that a spent output cannot be locked anymore - spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]} - assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0]) - - # Send 10 BTC normal - address = self.nodes[0].getnewaddress("test") - fee_per_byte = Decimal('0.001') / 1000 - self.nodes[2].settxfee(fee_per_byte * 1000) - txid = self.nodes[2].sendtoaddress(address, 10, "", "", False) - self.nodes[2].generate(1) - self.sync_all([self.nodes[0:3]]) - node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) - assert_equal(self.nodes[0].getbalance(), Decimal('10')) - - # Send 10 BTC with subtract fee from amount - txid = self.nodes[2].sendtoaddress(address, 10, "", "", True) - self.nodes[2].generate(1) - self.sync_all([self.nodes[0:3]]) - node_2_bal -= Decimal('10') - assert_equal(self.nodes[2].getbalance(), node_2_bal) - node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) - - # Sendmany 10 BTC - txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", []) - self.nodes[2].generate(1) - self.sync_all([self.nodes[0:3]]) - node_0_bal += Decimal('10') - node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) - assert_equal(self.nodes[0].getbalance(), node_0_bal) - - # Sendmany 10 BTC with subtract fee from amount - txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address]) - self.nodes[2].generate(1) - self.sync_all([self.nodes[0:3]]) - node_2_bal -= Decimal('10') - assert_equal(self.nodes[2].getbalance(), node_2_bal) - node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) - - # Test ResendWalletTransactions: - # Create a couple of transactions, then start up a fourth - # node (nodes[3]) and ask nodes[0] to rebroadcast. - # EXPECT: nodes[3] should have those transactions in its mempool. - txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) - txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) - sync_mempools(self.nodes[0:2]) - - self.start_node(3) - connect_nodes_bi(self.nodes, 0, 3) - sync_blocks(self.nodes) - - relayed = self.nodes[0].resendwallettransactions() - assert_equal(set(relayed), {txid1, txid2}) - sync_mempools(self.nodes) - - assert(txid1 in self.nodes[3].getrawmempool()) - - # Exercise balance rpcs - assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) - assert_equal(self.nodes[0].getunconfirmedbalance(), 1) - - #check if we can list zero value tx as available coins - #1. create rawtx - #2. hex-changed one output to 0.0 - #3. sign and send - #4. check if recipient (node0) can list the zero value tx - usp = self.nodes[1].listunspent() - inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}] - outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} - - rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32) - decRawTx = self.nodes[1].decoderawtransaction(rawTx) - signedRawTx = self.nodes[1].signrawtransaction(rawTx) - decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) - zeroValueTxid= decRawTx['txid'] - self.nodes[1].sendrawtransaction(signedRawTx['hex']) - - self.sync_all() - self.nodes[1].generate(1) #mine a block - self.sync_all() - - unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output - found = False - for uTx in unspentTxs: - if uTx['txid'] == zeroValueTxid: - found = True - assert_equal(uTx['amount'], Decimal('0')) - assert(found) - - #do some -walletbroadcast tests - self.stop_nodes() - self.start_node(0, ["-walletbroadcast=0"]) - self.start_node(1, ["-walletbroadcast=0"]) - self.start_node(2, ["-walletbroadcast=0"]) - connect_nodes_bi(self.nodes,0,1) - connect_nodes_bi(self.nodes,1,2) - connect_nodes_bi(self.nodes,0,2) - self.sync_all([self.nodes[0:3]]) - - txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) - txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) - self.nodes[1].generate(1) #mine a block, tx should not be in there - self.sync_all([self.nodes[0:3]]) - assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted - - #now broadcast from another node, mine a block, sync, and check the balance - self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) - self.nodes[1].generate(1) - self.sync_all([self.nodes[0:3]]) - node_2_bal += 2 - txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) - assert_equal(self.nodes[2].getbalance(), node_2_bal) - - #create another tx - txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) - - #restart the nodes with -walletbroadcast=1 - self.stop_nodes() - self.start_node(0) - self.start_node(1) - self.start_node(2) - connect_nodes_bi(self.nodes,0,1) - connect_nodes_bi(self.nodes,1,2) - connect_nodes_bi(self.nodes,0,2) - sync_blocks(self.nodes[0:3]) - - self.nodes[0].generate(1) - sync_blocks(self.nodes[0:3]) - node_2_bal += 2 - - #tx should be added to balance because after restarting the nodes tx should be broadcastet - assert_equal(self.nodes[2].getbalance(), node_2_bal) - - #send a tx with value in a string (PR#6380 +) - txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") - txObj = self.nodes[0].gettransaction(txId) - assert_equal(txObj['amount'], Decimal('-2')) - - txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001") - txObj = self.nodes[0].gettransaction(txId) - assert_equal(txObj['amount'], Decimal('-0.0001')) - - #check if JSON parser can handle scientific notation in strings - txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4") - txObj = self.nodes[0].gettransaction(txId) - assert_equal(txObj['amount'], Decimal('-0.0001')) - - # This will raise an exception because the amount type is wrong - assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4") - - # This will raise an exception since generate does not accept a string - assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2") - - # Import address and private key to check correct behavior of spendable unspents - # 1. Send some coins to generate new UTXO - address_to_import = self.nodes[2].getnewaddress() - txid = self.nodes[0].sendtoaddress(address_to_import, 1) - self.nodes[0].generate(1) - self.sync_all([self.nodes[0:3]]) - - # 2. Import address from node2 to node1 - self.nodes[1].importaddress(address_to_import) - - # 3. Validate that the imported address is watch-only on node1 - assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"]) - - # 4. Check that the unspents after import are not spendable - assert_array_result(self.nodes[1].listunspent(), - {"address": address_to_import}, - {"spendable": False}) - - # 5. Import private key of the previously imported address on node1 - priv_key = self.nodes[2].dumpprivkey(address_to_import) - self.nodes[1].importprivkey(priv_key) - - # 6. Check that the unspents are now spendable on node1 - assert_array_result(self.nodes[1].listunspent(), - {"address": address_to_import}, - {"spendable": True}) - - # Mine a block from node0 to an address from node1 - cbAddr = self.nodes[1].getnewaddress() - blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] - cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] - self.sync_all([self.nodes[0:3]]) - - # Check that the txid and balance is found by node1 - self.nodes[1].gettransaction(cbTxId) - - # check if wallet or blockchain maintenance changes the balance - self.sync_all([self.nodes[0:3]]) - blocks = self.nodes[0].generate(2) - self.sync_all([self.nodes[0:3]]) - balance_nodes = [self.nodes[i].getbalance() for i in range(3)] - block_count = self.nodes[0].getblockcount() - - # Check modes: - # - True: unicode escaped as \u.... - # - False: unicode directly as UTF-8 - for mode in [True, False]: - self.nodes[0].ensure_ascii = mode - # unicode check: Basic Multilingual Plane, Supplementary Plane respectively - for s in [u'рыба', u'𝅘𝅥𝅯']: - addr = self.nodes[0].getaccountaddress(s) - label = self.nodes[0].getaccount(addr) - assert_equal(label, s) - assert(s in self.nodes[0].listaccounts().keys()) - self.nodes[0].ensure_ascii = True # restore to default - - # maintenance tests - maintenance = [ - '-rescan', - '-reindex', - '-zapwallettxes=1', - '-zapwallettxes=2', - # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 - # '-salvagewallet', - ] - chainlimit = 6 - for m in maintenance: - self.log.info("check " + m) - self.stop_nodes() - # set lower ancestor limit for later - self.start_node(0, [m, "-limitancestorcount="+str(chainlimit)]) - self.start_node(1, [m, "-limitancestorcount="+str(chainlimit)]) - self.start_node(2, [m, "-limitancestorcount="+str(chainlimit)]) - while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: - # reindex will leave rpc warm up "early"; Wait for it to finish - time.sleep(0.1) - assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) - - # Exercise listsinceblock with the last two blocks - coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) - assert_equal(coinbase_tx_1["lastblock"], blocks[1]) - assert_equal(len(coinbase_tx_1["transactions"]), 1) - assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) - assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0) - - # ==Check that wallet prefers to use coins that don't exceed mempool limits ===== - - # Get all non-zero utxos together - chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] - singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) - self.nodes[0].generate(1) - node0_balance = self.nodes[0].getbalance() - # Split into two chains - rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) - signedtx = self.nodes[0].signrawtransaction(rawtx) - singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) - self.nodes[0].generate(1) - - # Make a long chain of unconfirmed payments without hitting mempool limit - # Each tx we make leaves only one output of change on a chain 1 longer - # Since the amount to send is always much less than the outputs, we only ever need one output - # So we should be able to generate exactly chainlimit txs for each original output - sending_addr = self.nodes[1].getnewaddress() - txid_list = [] - for i in range(chainlimit*2): - txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) - assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2) - assert_equal(len(txid_list), chainlimit*2) - - # Without walletrejectlongchains, we will still generate a txid - # The tx will be stored in the wallet but not accepted to the mempool - extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')) - assert(extra_txid not in self.nodes[0].getrawmempool()) - assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]) - self.nodes[0].abandontransaction(extra_txid) - total_txs = len(self.nodes[0].listtransactions("*",99999)) - - # Try with walletrejectlongchains - # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf - self.stop_node(0) - self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)]) - - # wait for loadmempool - timeout = 10 - while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2): - time.sleep(0.5) - timeout -= 0.5 - assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2) - - node0_balance = self.nodes[0].getbalance() - # With walletrejectlongchains we will not create the tx and store it in our wallet. - assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) - - # Verify nothing new in wallet - assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999))) - -if __name__ == '__main__': - WalletTest().main() diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py new file mode 100755 index 0000000000..14964438af --- /dev/null +++ b/test/functional/wallet_abandonconflict.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the abandontransaction RPC. + + The abandontransaction RPC marks a transaction and all its in-wallet + descendants as abandoned which allows their inputs to be respent. It can be + used to replace "stuck" or evicted transactions. It only works on transactions + which are not included in a block and are not currently in the mempool. It has + no effect on transactions which are already conflicted or abandoned. +""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class AbandonConflictTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [["-minrelaytxfee=0.00001"], []] + + def run_test(self): + self.nodes[1].generate(100) + sync_blocks(self.nodes) + balance = self.nodes[0].getbalance() + txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) + txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) + txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) + sync_mempools(self.nodes) + self.nodes[1].generate(1) + + sync_blocks(self.nodes) + newbalance = self.nodes[0].getbalance() + assert(balance - newbalance < Decimal("0.001")) #no more than fees lost + balance = newbalance + + # Disconnect nodes so node0's transactions don't get into node1's mempool + disconnect_nodes(self.nodes[0], 1) + + # Identify the 10btc outputs + nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) + nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) + nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) + + inputs =[] + # spend 10btc outputs from txA and txB + inputs.append({"txid":txA, "vout":nA}) + inputs.append({"txid":txB, "vout":nB}) + outputs = {} + + outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") + outputs[self.nodes[1].getnewaddress()] = Decimal("5") + signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) + txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) + + # Identify the 14.99998btc output + nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) + + #Create a child tx spending AB1 and C + inputs = [] + inputs.append({"txid":txAB1, "vout":nAB}) + inputs.append({"txid":txC, "vout":nC}) + outputs = {} + outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") + signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) + txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) + + # In mempool txs from self should increase balance from change + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996")) + balance = newbalance + + # Restart the node with a higher min relay fee so the parent tx is no longer in mempool + # TODO: redo with eviction + self.stop_node(0) + self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) + + # Verify txs no longer in either node's mempool + assert_equal(len(self.nodes[0].getrawmempool()), 0) + assert_equal(len(self.nodes[1].getrawmempool()), 0) + + # Not in mempool txs from self should only reduce balance + # inputs are still spent, but change not received + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance - Decimal("24.9996")) + # Unconfirmed received funds that are not in mempool, also shouldn't show + # up in unconfirmed balance + unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() + assert_equal(unconfbalance, newbalance) + # Also shouldn't show up in listunspent + assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) + balance = newbalance + + # Abandon original transaction and verify inputs are available again + # including that the child tx was also abandoned + self.nodes[0].abandontransaction(txAB1) + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance + Decimal("30")) + balance = newbalance + + # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned + self.stop_node(0) + self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) + assert_equal(len(self.nodes[0].getrawmempool()), 0) + assert_equal(self.nodes[0].getbalance(), balance) + + # But if its received again then it is unabandoned + # And since now in mempool, the change is available + # But its child tx remains abandoned + self.nodes[0].sendrawtransaction(signed["hex"]) + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) + balance = newbalance + + # Send child tx again so its unabandoned + self.nodes[0].sendrawtransaction(signed2["hex"]) + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) + balance = newbalance + + # Remove using high relay fee again + self.stop_node(0) + self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) + assert_equal(len(self.nodes[0].getrawmempool()), 0) + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance - Decimal("24.9996")) + balance = newbalance + + # Create a double spend of AB1 by spending again from only A's 10 output + # Mine double spend from node 1 + inputs =[] + inputs.append({"txid":txA, "vout":nA}) + outputs = {} + outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") + tx = self.nodes[0].createrawtransaction(inputs, outputs) + signed = self.nodes[0].signrawtransaction(tx) + self.nodes[1].sendrawtransaction(signed["hex"]) + self.nodes[1].generate(1) + + connect_nodes(self.nodes[0], 1) + sync_blocks(self.nodes) + + # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted + newbalance = self.nodes[0].getbalance() + assert_equal(newbalance, balance + Decimal("20")) + balance = newbalance + + # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 + # Invalidate the block with the double spend and B's 10 BTC output should no longer be available + # Don't think C's should either + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + newbalance = self.nodes[0].getbalance() + #assert_equal(newbalance, balance - Decimal("10")) + self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") + self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") + self.log.info(str(balance) + " -> " + str(newbalance) + " ?") + +if __name__ == '__main__': + AbandonConflictTest().main() diff --git a/test/functional/wallet_accounts.py b/test/functional/wallet_accounts.py new file mode 100755 index 0000000000..ecd1cfc82b --- /dev/null +++ b/test/functional/wallet_accounts.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test account RPCs. + +RPCs tested are: + - getaccountaddress + - getaddressesbyaccount + - listaddressgroupings + - setaccount + - sendfrom (with account arguments) + - move (with account arguments) +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class WalletAccountsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [[]] + + def run_test(self): + node = self.nodes[0] + # Check that there's no UTXO on any of the nodes + assert_equal(len(node.listunspent()), 0) + + # Note each time we call generate, all generated coins go into + # the same address, so we call twice to get two addresses w/50 each + node.generate(1) + node.generate(101) + assert_equal(node.getbalance(), 100) + + # there should be 2 address groups + # each with 1 address with a balance of 50 Bitcoins + address_groups = node.listaddressgroupings() + assert_equal(len(address_groups), 2) + # the addresses aren't linked now, but will be after we send to the + # common address + linked_addresses = set() + for address_group in address_groups: + assert_equal(len(address_group), 1) + assert_equal(len(address_group[0]), 2) + assert_equal(address_group[0][1], 50) + linked_addresses.add(address_group[0][0]) + + # send 50 from each address to a third address not in this wallet + # There's some fee that will come back to us when the miner reward + # matures. + common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" + txid = node.sendmany( + fromaccount="", + amounts={common_address: 100}, + subtractfeefrom=[common_address], + minconf=1, + ) + tx_details = node.gettransaction(txid) + fee = -tx_details['details'][0]['fee'] + # there should be 1 address group, with the previously + # unlinked addresses now linked (they both have 0 balance) + address_groups = node.listaddressgroupings() + assert_equal(len(address_groups), 1) + assert_equal(len(address_groups[0]), 2) + assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) + assert_equal([a[1] for a in address_groups[0]], [0, 0]) + + node.generate(1) + + # we want to reset so that the "" account has what's expected. + # otherwise we're off by exactly the fee amount as that's mined + # and matures in the next 100 blocks + node.sendfrom("", common_address, fee) + amount_to_send = 1.0 + + # Create accounts and make sure subsequent account API calls + # recognize the account/address associations. + accounts = [Account(name) for name in ("a", "b", "c", "d", "e")] + for account in accounts: + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + + # Send a transaction to each account, and make sure this forces + # getaccountaddress to generate a new receiving address. + for account in accounts: + node.sendtoaddress(account.receive_address, amount_to_send) + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + + # Check the amounts received. + node.generate(1) + for account in accounts: + assert_equal( + node.getreceivedbyaddress(account.addresses[0]), amount_to_send) + assert_equal(node.getreceivedbyaccount(account.name), amount_to_send) + + # Check that sendfrom account reduces listaccounts balances. + for i, account in enumerate(accounts): + to_account = accounts[(i+1) % len(accounts)] + node.sendfrom(account.name, to_account.receive_address, amount_to_send) + node.generate(1) + for account in accounts: + account.add_receive_address(node.getaccountaddress(account.name)) + account.verify(node) + assert_equal(node.getreceivedbyaccount(account.name), 2) + node.move(account.name, "", node.getbalance(account.name)) + account.verify(node) + node.generate(101) + expected_account_balances = {"": 5200} + for account in accounts: + expected_account_balances[account.name] = 0 + assert_equal(node.listaccounts(), expected_account_balances) + assert_equal(node.getbalance(""), 5200) + + # Check that setaccount can assign an account to a new unused address. + for account in accounts: + address = node.getaccountaddress("") + node.setaccount(address, account.name) + account.add_address(address) + account.verify(node) + assert(address not in node.getaddressesbyaccount("")) + + # Check that addmultisigaddress can assign accounts. + for account in accounts: + addresses = [] + for x in range(10): + addresses.append(node.getnewaddress()) + multisig_address = node.addmultisigaddress(5, addresses, account.name)['address'] + account.add_address(multisig_address) + account.verify(node) + node.sendfrom("", multisig_address, 50) + node.generate(101) + for account in accounts: + assert_equal(node.getbalance(account.name), 50) + + # Check that setaccount can change the account of an address from a + # different account. + change_account(node, accounts[0].addresses[0], accounts[0], accounts[1]) + + # Check that setaccount can change the account of an address which + # is the receiving address of a different account. + change_account(node, accounts[0].receive_address, accounts[0], accounts[1]) + + # Check that setaccount can set the account of an address already + # in the account. This is a no-op. + change_account(node, accounts[2].addresses[0], accounts[2], accounts[2]) + + # Check that setaccount can set the account of an address which is + # already the receiving address of the account. It would probably make + # sense for this to be a no-op, but right now it resets the receiving + # address, causing getaccountaddress to return a brand new address. + change_account(node, accounts[2].receive_address, accounts[2], accounts[2]) + +class Account: + def __init__(self, name): + # Account name + self.name = name + # Current receiving address associated with this account. + self.receive_address = None + # List of all addresses assigned with this account + self.addresses = [] + + def add_address(self, address): + assert_equal(address not in self.addresses, True) + self.addresses.append(address) + + def add_receive_address(self, address): + self.add_address(address) + self.receive_address = address + + def verify(self, node): + if self.receive_address is not None: + assert self.receive_address in self.addresses + assert_equal(node.getaccountaddress(self.name), self.receive_address) + + for address in self.addresses: + assert_equal(node.getaccount(address), self.name) + + assert_equal( + set(node.getaddressesbyaccount(self.name)), set(self.addresses)) + + +def change_account(node, address, old_account, new_account): + assert_equal(address in old_account.addresses, True) + node.setaccount(address, new_account.name) + + old_account.addresses.remove(address) + new_account.add_address(address) + + # Calling setaccount on an address which was previously the receiving + # address of a different account should reset the receiving address of + # the old account, causing getaccountaddress to return a brand new + # address. + if address == old_account.receive_address: + new_address = node.getaccountaddress(old_account.name) + assert_equal(new_address not in old_account.addresses, True) + assert_equal(new_address not in new_account.addresses, True) + old_account.add_receive_address(new_address) + + old_account.verify(node) + new_account.verify(node) + + +if __name__ == '__main__': + WalletAccountsTest().main() diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py new file mode 100755 index 0000000000..38a3425214 --- /dev/null +++ b/test/functional/wallet_address_types.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test that the wallet can send and receive using all combinations of address types. + +There are 5 nodes-under-test: + - node0 uses legacy addresses + - node1 uses p2sh/segwit addresses + - node2 uses p2sh/segwit addresses and bech32 addresses for change + - node3 uses bech32 addresses + - node4 uses a p2sh/segwit addresses for change + +node5 exists to generate new blocks. + +## Multisig address test + +Test that adding a multisig address with: + - an uncompressed pubkey always gives a legacy address + - only compressed pubkeys gives the an `-addresstype` address + +## Sending to address types test + +A series of tests, iterating over node0-node4. In each iteration of the test, one node sends: + - 10/101th of its balance to itself (using getrawchangeaddress for single key addresses) + - 20/101th to the next node + - 30/101th to the node after that + - 40/101th to the remaining node + - 1/101th remains as fee+change + +Iterate over each node for single key addresses, and then over each node for +multisig addresses. + +Repeat test, but with explicit address_type parameters passed to getnewaddress +and getrawchangeaddress: + - node0 and node3 send to p2sh. + - node1 sends to bech32. + - node2 sends to legacy. + +As every node sends coins after receiving, this also +verifies that spending coins sent to all these address types works. + +## Change type test + +Test that the nodes generate the correct change address type: + - node0 always uses a legacy change address. + - node1 uses a bech32 addresses for change if any destination address is bech32. + - node2 always uses a bech32 address for change + - node3 always uses a bech32 address for change + - node4 always uses p2sh/segwit output for change. +""" + +from decimal import Decimal +import itertools + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, + assert_raises_rpc_error, + connect_nodes_bi, + sync_blocks, + sync_mempools, +) + +class AddressTypeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 6 + self.extra_args = [ + ["-addresstype=legacy"], + ["-addresstype=p2sh-segwit"], + ["-addresstype=p2sh-segwit", "-changetype=bech32"], + ["-addresstype=bech32"], + ["-changetype=p2sh-segwit"], + [] + ] + + def setup_network(self): + self.setup_nodes() + + # Fully mesh-connect nodes for faster mempool sync + for i, j in itertools.product(range(self.num_nodes), repeat=2): + if i > j: + connect_nodes_bi(self.nodes, i, j) + self.sync_all() + + def get_balances(self, confirmed=True): + """Return a list of confirmed or unconfirmed balances.""" + if confirmed: + return [self.nodes[i].getbalance() for i in range(4)] + else: + return [self.nodes[i].getunconfirmedbalance() for i in range(4)] + + def test_address(self, node, address, multisig, typ): + """Run sanity checks on an address.""" + info = self.nodes[node].validateaddress(address) + assert(info['isvalid']) + if not multisig and typ == 'legacy': + # P2PKH + assert(not info['isscript']) + assert(not info['iswitness']) + assert('pubkey' in info) + elif not multisig and typ == 'p2sh-segwit': + # P2SH-P2WPKH + assert(info['isscript']) + assert(not info['iswitness']) + assert_equal(info['script'], 'witness_v0_keyhash') + assert('pubkey' in info) + elif not multisig and typ == 'bech32': + # P2WPKH + assert(not info['isscript']) + assert(info['iswitness']) + assert_equal(info['witness_version'], 0) + assert_equal(len(info['witness_program']), 40) + assert('pubkey' in info) + elif typ == 'legacy': + # P2SH-multisig + assert(info['isscript']) + assert_equal(info['script'], 'multisig') + assert(not info['iswitness']) + assert('pubkeys' in info) + elif typ == 'p2sh-segwit': + # P2SH-P2WSH-multisig + assert(info['isscript']) + assert_equal(info['script'], 'witness_v0_scripthash') + assert(not info['iswitness']) + assert(info['embedded']['isscript']) + assert_equal(info['embedded']['script'], 'multisig') + assert(info['embedded']['iswitness']) + assert_equal(info['embedded']['witness_version'], 0) + assert_equal(len(info['embedded']['witness_program']), 64) + assert('pubkeys' in info['embedded']) + elif typ == 'bech32': + # P2WSH-multisig + assert(info['isscript']) + assert_equal(info['script'], 'multisig') + assert(info['iswitness']) + assert_equal(info['witness_version'], 0) + assert_equal(len(info['witness_program']), 64) + assert('pubkeys' in info) + else: + # Unknown type + assert(False) + + def test_change_output_type(self, node_sender, destinations, expected_type): + txid = self.nodes[node_sender].sendmany(fromaccount="", amounts=dict.fromkeys(destinations, 0.001)) + raw_tx = self.nodes[node_sender].getrawtransaction(txid) + tx = self.nodes[node_sender].decoderawtransaction(raw_tx) + + # Make sure the transaction has change: + assert_equal(len(tx["vout"]), len(destinations) + 1) + + # Make sure the destinations are included, and remove them: + output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]] + change_addresses = [d for d in output_addresses if d not in destinations] + assert_equal(len(change_addresses), 1) + + self.log.debug("Check if change address " + change_addresses[0] + " is " + expected_type) + self.test_address(node_sender, change_addresses[0], multisig=False, typ=expected_type) + + def run_test(self): + # Mine 101 blocks on node5 to bring nodes out of IBD and make sure that + # no coinbases are maturing for the nodes-under-test during the test + self.nodes[5].generate(101) + sync_blocks(self.nodes) + + uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee" + uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77" + compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52" + compressed_2 = "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073" + + # addmultisigaddress with at least 1 uncompressed key should return a legacy address. + for node in range(4): + self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy') + self.test_address(node, self.nodes[node].addmultisigaddress(2, [compressed_1, uncompressed_2])['address'], True, 'legacy') + self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, compressed_2])['address'], True, 'legacy') + # addmultisigaddress with all compressed keys should return the appropriate address type (even when the keys are not ours). + self.test_address(0, self.nodes[0].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'legacy') + self.test_address(1, self.nodes[1].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit') + self.test_address(2, self.nodes[2].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit') + self.test_address(3, self.nodes[3].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'bech32') + + for explicit_type, multisig, from_node in itertools.product([False, True], [False, True], range(4)): + address_type = None + if explicit_type and not multisig: + if from_node == 1: + address_type = 'bech32' + elif from_node == 0 or from_node == 3: + address_type = 'p2sh-segwit' + else: + address_type = 'legacy' + self.log.info("Sending from node {} ({}) with{} multisig using {}".format(from_node, self.extra_args[from_node], "" if multisig else "out", "default" if address_type is None else address_type)) + old_balances = self.get_balances() + self.log.debug("Old balances are {}".format(old_balances)) + to_send = (old_balances[from_node] / 101).quantize(Decimal("0.00000001")) + sends = {} + + self.log.debug("Prepare sends") + for n, to_node in enumerate(range(from_node, from_node + 4)): + to_node %= 4 + change = False + if not multisig: + if from_node == to_node: + # When sending non-multisig to self, use getrawchangeaddress + address = self.nodes[to_node].getrawchangeaddress(address_type=address_type) + change = True + else: + address = self.nodes[to_node].getnewaddress(address_type=address_type) + else: + addr1 = self.nodes[to_node].getnewaddress() + addr2 = self.nodes[to_node].getnewaddress() + address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])['address'] + + # Do some sanity checking on the created address + if address_type is not None: + typ = address_type + elif to_node == 0: + typ = 'legacy' + elif to_node == 1 or (to_node == 2 and not change): + typ = 'p2sh-segwit' + else: + typ = 'bech32' + self.test_address(to_node, address, multisig, typ) + + # Output entry + sends[address] = to_send * 10 * (1 + n) + + self.log.debug("Sending: {}".format(sends)) + self.nodes[from_node].sendmany("", sends) + sync_mempools(self.nodes) + + unconf_balances = self.get_balances(False) + self.log.debug("Check unconfirmed balances: {}".format(unconf_balances)) + assert_equal(unconf_balances[from_node], 0) + for n, to_node in enumerate(range(from_node + 1, from_node + 4)): + to_node %= 4 + assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n)) + + # node5 collects fee and block subsidy to keep accounting simple + self.nodes[5].generate(1) + sync_blocks(self.nodes) + + new_balances = self.get_balances() + self.log.debug("Check new balances: {}".format(new_balances)) + # We don't know what fee was set, so we can only check bounds on the balance of the sending node + assert_greater_than(new_balances[from_node], to_send * 10) + assert_greater_than(to_send * 11, new_balances[from_node]) + for n, to_node in enumerate(range(from_node + 1, from_node + 4)): + to_node %= 4 + assert_equal(new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n)) + + # Get one p2sh/segwit address from node2 and two bech32 addresses from node3: + to_address_p2sh = self.nodes[2].getnewaddress() + to_address_bech32_1 = self.nodes[3].getnewaddress() + to_address_bech32_2 = self.nodes[3].getnewaddress() + + # Fund node 4: + self.nodes[5].sendtoaddress(self.nodes[4].getnewaddress(), Decimal("1")) + self.nodes[5].generate(1) + sync_blocks(self.nodes) + assert_equal(self.nodes[4].getbalance(), 1) + + self.log.info("Nodes with addresstype=legacy never use a P2WPKH change output") + self.test_change_output_type(0, [to_address_bech32_1], 'legacy') + + self.log.info("Nodes with addresstype=p2sh-segwit only use a P2WPKH change output if any destination address is bech32:") + self.test_change_output_type(1, [to_address_p2sh], 'p2sh-segwit') + self.test_change_output_type(1, [to_address_bech32_1], 'bech32') + self.test_change_output_type(1, [to_address_p2sh, to_address_bech32_1], 'bech32') + self.test_change_output_type(1, [to_address_bech32_1, to_address_bech32_2], 'bech32') + + self.log.info("Nodes with change_type=bech32 always use a P2WPKH change output:") + self.test_change_output_type(2, [to_address_bech32_1], 'bech32') + self.test_change_output_type(2, [to_address_p2sh], 'bech32') + + self.log.info("Nodes with addresstype=bech32 always use a P2WPKH change output (unless changetype is set otherwise):") + self.test_change_output_type(3, [to_address_bech32_1], 'bech32') + self.test_change_output_type(3, [to_address_p2sh], 'bech32') + + self.log.info('getrawchangeaddress defaults to addresstype if -changetype is not set and argument is absent') + self.test_address(3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='bech32') + + self.log.info('getrawchangeaddress fails with invalid changetype argument') + assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23') + + self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output") + self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit') + self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit') + self.log.info("Except for getrawchangeaddress if specified:") + self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit') + self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32') + +if __name__ == '__main__': + AddressTypeTest().main() diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py new file mode 100755 index 0000000000..b4be7debb5 --- /dev/null +++ b/test/functional/wallet_backup.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet backup features. + +Test case is: +4 nodes. 1 2 and 3 send transactions between each other, +fourth node is a miner. +1 2 3 each mine a block to start, then +Miner creates 100 blocks so 1 2 3 each have 50 mature +coins to spend. +Then 5 iterations of 1/2/3 sending coins amongst +themselves to get transactions in the wallets, +and the miner mining one block. + +Wallets are backed up using dumpwallet/backupwallet. +Then 5 more iterations of transactions and mining a block. + +Miner then generates 101 more blocks, so any +transaction fees paid mature. + +Sanity check: + Sum(1,2,3,4 balances) == 114*50 + +1/2/3 are shutdown, and their wallets erased. +Then restore using wallet.dat backup. And +confirm 1/2/3/4 balances are same as before. + +Shutdown again, restore using importwallet, +and confirm again balances are correct. +""" +from random import randint +import shutil + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class WalletBackupTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = True + # nodes 1, 2,3 are spenders, let's give them a keypool=100 + self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []] + + def setup_network(self, split=False): + self.setup_nodes() + connect_nodes(self.nodes[0], 3) + connect_nodes(self.nodes[1], 3) + connect_nodes(self.nodes[2], 3) + connect_nodes(self.nodes[2], 0) + self.sync_all() + + def one_send(self, from_node, to_address): + if (randint(1,2) == 1): + amount = Decimal(randint(1,10)) / Decimal(10) + self.nodes[from_node].sendtoaddress(to_address, amount) + + def do_one_round(self): + a0 = self.nodes[0].getnewaddress() + a1 = self.nodes[1].getnewaddress() + a2 = self.nodes[2].getnewaddress() + + self.one_send(0, a1) + self.one_send(0, a2) + self.one_send(1, a0) + self.one_send(1, a2) + self.one_send(2, a0) + self.one_send(2, a1) + + # Have the miner (node3) mine a block. + # Must sync mempools before mining. + sync_mempools(self.nodes) + self.nodes[3].generate(1) + sync_blocks(self.nodes) + + # As above, this mirrors the original bash test. + def start_three(self): + self.start_node(0) + self.start_node(1) + self.start_node(2) + connect_nodes(self.nodes[0], 3) + connect_nodes(self.nodes[1], 3) + connect_nodes(self.nodes[2], 3) + connect_nodes(self.nodes[2], 0) + + def stop_three(self): + self.stop_node(0) + self.stop_node(1) + self.stop_node(2) + + def erase_three(self): + os.remove(self.options.tmpdir + "/node0/regtest/wallets/wallet.dat") + os.remove(self.options.tmpdir + "/node1/regtest/wallets/wallet.dat") + os.remove(self.options.tmpdir + "/node2/regtest/wallets/wallet.dat") + + def run_test(self): + self.log.info("Generating initial blockchain") + self.nodes[0].generate(1) + sync_blocks(self.nodes) + self.nodes[1].generate(1) + sync_blocks(self.nodes) + self.nodes[2].generate(1) + sync_blocks(self.nodes) + self.nodes[3].generate(100) + sync_blocks(self.nodes) + + assert_equal(self.nodes[0].getbalance(), 50) + assert_equal(self.nodes[1].getbalance(), 50) + assert_equal(self.nodes[2].getbalance(), 50) + assert_equal(self.nodes[3].getbalance(), 0) + + self.log.info("Creating transactions") + # Five rounds of sending each other transactions. + for i in range(5): + self.do_one_round() + + self.log.info("Backing up") + tmpdir = self.options.tmpdir + self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") + self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") + self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak") + self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump") + self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") + self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") + + self.log.info("More transactions") + for i in range(5): + self.do_one_round() + + # Generate 101 more blocks, so any fees paid mature + self.nodes[3].generate(101) + self.sync_all() + + balance0 = self.nodes[0].getbalance() + balance1 = self.nodes[1].getbalance() + balance2 = self.nodes[2].getbalance() + balance3 = self.nodes[3].getbalance() + total = balance0 + balance1 + balance2 + balance3 + + # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) + # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. + assert_equal(total, 5700) + + ## + # Test restoring spender wallets from backups + ## + self.log.info("Restoring using wallet.dat") + self.stop_three() + self.erase_three() + + # Start node2 with no chain + shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") + shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") + + # Restore wallets from backup + shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallets/wallet.dat") + shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallets/wallet.dat") + shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallets/wallet.dat") + + self.log.info("Re-starting nodes") + self.start_three() + sync_blocks(self.nodes) + + assert_equal(self.nodes[0].getbalance(), balance0) + assert_equal(self.nodes[1].getbalance(), balance1) + assert_equal(self.nodes[2].getbalance(), balance2) + + self.log.info("Restoring using dumped wallet") + self.stop_three() + self.erase_three() + + #start node2 with no chain + shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") + shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") + + self.start_three() + + assert_equal(self.nodes[0].getbalance(), 0) + assert_equal(self.nodes[1].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 0) + + self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump") + self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump") + self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump") + + sync_blocks(self.nodes) + + assert_equal(self.nodes[0].getbalance(), balance0) + assert_equal(self.nodes[1].getbalance(), balance1) + assert_equal(self.nodes[2].getbalance(), balance2) + + # Backup to source wallet file must fail + sourcePaths = [ + tmpdir + "/node0/regtest/wallets/wallet.dat", + tmpdir + "/node0/./regtest/wallets/wallet.dat", + tmpdir + "/node0/regtest/wallets/", + tmpdir + "/node0/regtest/wallets"] + + for sourcePath in sourcePaths: + assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath) + + +if __name__ == '__main__': + WalletBackupTest().main() diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py new file mode 100755 index 0000000000..a90dbc8adf --- /dev/null +++ b/test/functional/wallet_basic.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class WalletTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = True + + def setup_network(self): + self.add_nodes(4) + self.start_node(0) + self.start_node(1) + self.start_node(2) + connect_nodes_bi(self.nodes,0,1) + connect_nodes_bi(self.nodes,1,2) + connect_nodes_bi(self.nodes,0,2) + self.sync_all([self.nodes[0:3]]) + + def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): + """Return curr_balance after asserting the fee was in range""" + fee = balance_with_fee - curr_balance + assert_fee_amount(fee, tx_size, fee_per_byte * 1000) + return curr_balance + + def get_vsize(self, txn): + return self.nodes[0].decoderawtransaction(txn)['vsize'] + + def run_test(self): + # Check that there's no UTXO on none of the nodes + assert_equal(len(self.nodes[0].listunspent()), 0) + assert_equal(len(self.nodes[1].listunspent()), 0) + assert_equal(len(self.nodes[2].listunspent()), 0) + + self.log.info("Mining blocks...") + + self.nodes[0].generate(1) + + walletinfo = self.nodes[0].getwalletinfo() + assert_equal(walletinfo['immature_balance'], 50) + assert_equal(walletinfo['balance'], 0) + + self.sync_all([self.nodes[0:3]]) + self.nodes[1].generate(101) + self.sync_all([self.nodes[0:3]]) + + assert_equal(self.nodes[0].getbalance(), 50) + assert_equal(self.nodes[1].getbalance(), 50) + assert_equal(self.nodes[2].getbalance(), 0) + + # Check that only first and second nodes have UTXOs + utxos = self.nodes[0].listunspent() + assert_equal(len(utxos), 1) + assert_equal(len(self.nodes[1].listunspent()), 1) + assert_equal(len(self.nodes[2].listunspent()), 0) + + self.log.info("test gettxout") + confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"] + # First, outputs that are unspent both in the chain and in the + # mempool should appear with or without include_mempool + txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False) + assert_equal(txout['value'], 50) + txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True) + assert_equal(txout['value'], 50) + + # Send 21 BTC from 0 to 2 using sendtoaddress call. + # Locked memory should use at least 32 bytes to sign each transaction + self.log.info("test getmemoryinfo") + memory_before = self.nodes[0].getmemoryinfo() + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) + mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) + memory_after = self.nodes[0].getmemoryinfo() + assert(memory_before['locked']['used'] + 64 <= memory_after['locked']['used']) + + self.log.info("test gettxout (second part)") + # utxo spent in mempool should be visible if you exclude mempool + # but invisible if you include mempool + txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False) + assert_equal(txout['value'], 50) + txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True) + assert txout is None + # new utxo from mempool should be invisible if you exclude mempool + # but visible if you include mempool + txout = self.nodes[0].gettxout(mempool_txid, 0, False) + assert txout is None + txout1 = self.nodes[0].gettxout(mempool_txid, 0, True) + txout2 = self.nodes[0].gettxout(mempool_txid, 1, True) + # note the mempool tx will have randomly assigned indices + # but 10 will go to node2 and the rest will go to node0 + balance = self.nodes[0].getbalance() + assert_equal(set([txout1['value'], txout2['value']]), set([10, balance])) + walletinfo = self.nodes[0].getwalletinfo() + assert_equal(walletinfo['immature_balance'], 0) + + # Have node0 mine a block, thus it will collect its own fee. + self.nodes[0].generate(1) + self.sync_all([self.nodes[0:3]]) + + # Exercise locking of unspent outputs + unspent_0 = self.nodes[2].listunspent()[0] + unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} + assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0]) + self.nodes[2].lockunspent(False, [unspent_0]) + assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0]) + assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) + assert_equal([unspent_0], self.nodes[2].listlockunspent()) + self.nodes[2].lockunspent(True, [unspent_0]) + assert_equal(len(self.nodes[2].listlockunspent()), 0) + assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction", + self.nodes[2].lockunspent, False, + [{"txid": "0000000000000000000000000000000000", "vout": 0}]) + assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds", + self.nodes[2].lockunspent, False, + [{"txid": unspent_0["txid"], "vout": 999}]) + + # Have node1 generate 100 blocks (so node0 can recover the fee) + self.nodes[1].generate(100) + self.sync_all([self.nodes[0:3]]) + + # node0 should end up with 100 btc in block rewards plus fees, but + # minus the 21 plus fees sent to node2 + assert_equal(self.nodes[0].getbalance(), 100-21) + assert_equal(self.nodes[2].getbalance(), 21) + + # Node0 should have two unspent outputs. + # Create a couple of transactions to send them to node2, submit them through + # node1, and make sure both node0 and node2 pick them up properly: + node0utxos = self.nodes[0].listunspent(1) + assert_equal(len(node0utxos), 2) + + # create both transactions + txns_to_send = [] + for utxo in node0utxos: + inputs = [] + outputs = {} + inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]}) + outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3 + raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) + txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx)) + + # Have node 1 (miner) send the transactions + self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) + self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True) + + # Have node1 mine a block to confirm transactions: + self.nodes[1].generate(1) + self.sync_all([self.nodes[0:3]]) + + assert_equal(self.nodes[0].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 94) + assert_equal(self.nodes[2].getbalance("from1"), 94-21) + + # Verify that a spent output cannot be locked anymore + spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]} + assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0]) + + # Send 10 BTC normal + address = self.nodes[0].getnewaddress("test") + fee_per_byte = Decimal('0.001') / 1000 + self.nodes[2].settxfee(fee_per_byte * 1000) + txid = self.nodes[2].sendtoaddress(address, 10, "", "", False) + self.nodes[2].generate(1) + self.sync_all([self.nodes[0:3]]) + node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) + assert_equal(self.nodes[0].getbalance(), Decimal('10')) + + # Send 10 BTC with subtract fee from amount + txid = self.nodes[2].sendtoaddress(address, 10, "", "", True) + self.nodes[2].generate(1) + self.sync_all([self.nodes[0:3]]) + node_2_bal -= Decimal('10') + assert_equal(self.nodes[2].getbalance(), node_2_bal) + node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) + + # Sendmany 10 BTC + txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", []) + self.nodes[2].generate(1) + self.sync_all([self.nodes[0:3]]) + node_0_bal += Decimal('10') + node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) + assert_equal(self.nodes[0].getbalance(), node_0_bal) + + # Sendmany 10 BTC with subtract fee from amount + txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address]) + self.nodes[2].generate(1) + self.sync_all([self.nodes[0:3]]) + node_2_bal -= Decimal('10') + assert_equal(self.nodes[2].getbalance(), node_2_bal) + node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].getrawtransaction(txid))) + + # Test ResendWalletTransactions: + # Create a couple of transactions, then start up a fourth + # node (nodes[3]) and ask nodes[0] to rebroadcast. + # EXPECT: nodes[3] should have those transactions in its mempool. + txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) + txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) + sync_mempools(self.nodes[0:2]) + + self.start_node(3) + connect_nodes_bi(self.nodes, 0, 3) + sync_blocks(self.nodes) + + relayed = self.nodes[0].resendwallettransactions() + assert_equal(set(relayed), {txid1, txid2}) + sync_mempools(self.nodes) + + assert(txid1 in self.nodes[3].getrawmempool()) + + # Exercise balance rpcs + assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) + assert_equal(self.nodes[0].getunconfirmedbalance(), 1) + + #check if we can list zero value tx as available coins + #1. create rawtx + #2. hex-changed one output to 0.0 + #3. sign and send + #4. check if recipient (node0) can list the zero value tx + usp = self.nodes[1].listunspent() + inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}] + outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} + + rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32) + decRawTx = self.nodes[1].decoderawtransaction(rawTx) + signedRawTx = self.nodes[1].signrawtransaction(rawTx) + decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) + zeroValueTxid= decRawTx['txid'] + self.nodes[1].sendrawtransaction(signedRawTx['hex']) + + self.sync_all() + self.nodes[1].generate(1) #mine a block + self.sync_all() + + unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output + found = False + for uTx in unspentTxs: + if uTx['txid'] == zeroValueTxid: + found = True + assert_equal(uTx['amount'], Decimal('0')) + assert(found) + + #do some -walletbroadcast tests + self.stop_nodes() + self.start_node(0, ["-walletbroadcast=0"]) + self.start_node(1, ["-walletbroadcast=0"]) + self.start_node(2, ["-walletbroadcast=0"]) + connect_nodes_bi(self.nodes,0,1) + connect_nodes_bi(self.nodes,1,2) + connect_nodes_bi(self.nodes,0,2) + self.sync_all([self.nodes[0:3]]) + + txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) + txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) + self.nodes[1].generate(1) #mine a block, tx should not be in there + self.sync_all([self.nodes[0:3]]) + assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted + + #now broadcast from another node, mine a block, sync, and check the balance + self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) + self.nodes[1].generate(1) + self.sync_all([self.nodes[0:3]]) + node_2_bal += 2 + txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) + assert_equal(self.nodes[2].getbalance(), node_2_bal) + + #create another tx + txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) + + #restart the nodes with -walletbroadcast=1 + self.stop_nodes() + self.start_node(0) + self.start_node(1) + self.start_node(2) + connect_nodes_bi(self.nodes,0,1) + connect_nodes_bi(self.nodes,1,2) + connect_nodes_bi(self.nodes,0,2) + sync_blocks(self.nodes[0:3]) + + self.nodes[0].generate(1) + sync_blocks(self.nodes[0:3]) + node_2_bal += 2 + + #tx should be added to balance because after restarting the nodes tx should be broadcastet + assert_equal(self.nodes[2].getbalance(), node_2_bal) + + #send a tx with value in a string (PR#6380 +) + txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") + txObj = self.nodes[0].gettransaction(txId) + assert_equal(txObj['amount'], Decimal('-2')) + + txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001") + txObj = self.nodes[0].gettransaction(txId) + assert_equal(txObj['amount'], Decimal('-0.0001')) + + #check if JSON parser can handle scientific notation in strings + txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4") + txObj = self.nodes[0].gettransaction(txId) + assert_equal(txObj['amount'], Decimal('-0.0001')) + + # This will raise an exception because the amount type is wrong + assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4") + + # This will raise an exception since generate does not accept a string + assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2") + + # Import address and private key to check correct behavior of spendable unspents + # 1. Send some coins to generate new UTXO + address_to_import = self.nodes[2].getnewaddress() + txid = self.nodes[0].sendtoaddress(address_to_import, 1) + self.nodes[0].generate(1) + self.sync_all([self.nodes[0:3]]) + + # 2. Import address from node2 to node1 + self.nodes[1].importaddress(address_to_import) + + # 3. Validate that the imported address is watch-only on node1 + assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"]) + + # 4. Check that the unspents after import are not spendable + assert_array_result(self.nodes[1].listunspent(), + {"address": address_to_import}, + {"spendable": False}) + + # 5. Import private key of the previously imported address on node1 + priv_key = self.nodes[2].dumpprivkey(address_to_import) + self.nodes[1].importprivkey(priv_key) + + # 6. Check that the unspents are now spendable on node1 + assert_array_result(self.nodes[1].listunspent(), + {"address": address_to_import}, + {"spendable": True}) + + # Mine a block from node0 to an address from node1 + cbAddr = self.nodes[1].getnewaddress() + blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] + cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] + self.sync_all([self.nodes[0:3]]) + + # Check that the txid and balance is found by node1 + self.nodes[1].gettransaction(cbTxId) + + # check if wallet or blockchain maintenance changes the balance + self.sync_all([self.nodes[0:3]]) + blocks = self.nodes[0].generate(2) + self.sync_all([self.nodes[0:3]]) + balance_nodes = [self.nodes[i].getbalance() for i in range(3)] + block_count = self.nodes[0].getblockcount() + + # Check modes: + # - True: unicode escaped as \u.... + # - False: unicode directly as UTF-8 + for mode in [True, False]: + self.nodes[0].ensure_ascii = mode + # unicode check: Basic Multilingual Plane, Supplementary Plane respectively + for s in [u'рыба', u'𝅘𝅥𝅯']: + addr = self.nodes[0].getaccountaddress(s) + label = self.nodes[0].getaccount(addr) + assert_equal(label, s) + assert(s in self.nodes[0].listaccounts().keys()) + self.nodes[0].ensure_ascii = True # restore to default + + # maintenance tests + maintenance = [ + '-rescan', + '-reindex', + '-zapwallettxes=1', + '-zapwallettxes=2', + # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 + # '-salvagewallet', + ] + chainlimit = 6 + for m in maintenance: + self.log.info("check " + m) + self.stop_nodes() + # set lower ancestor limit for later + self.start_node(0, [m, "-limitancestorcount="+str(chainlimit)]) + self.start_node(1, [m, "-limitancestorcount="+str(chainlimit)]) + self.start_node(2, [m, "-limitancestorcount="+str(chainlimit)]) + while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: + # reindex will leave rpc warm up "early"; Wait for it to finish + time.sleep(0.1) + assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) + + # Exercise listsinceblock with the last two blocks + coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) + assert_equal(coinbase_tx_1["lastblock"], blocks[1]) + assert_equal(len(coinbase_tx_1["transactions"]), 1) + assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) + assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0) + + # ==Check that wallet prefers to use coins that don't exceed mempool limits ===== + + # Get all non-zero utxos together + chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] + singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) + self.nodes[0].generate(1) + node0_balance = self.nodes[0].getbalance() + # Split into two chains + rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) + signedtx = self.nodes[0].signrawtransaction(rawtx) + singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) + self.nodes[0].generate(1) + + # Make a long chain of unconfirmed payments without hitting mempool limit + # Each tx we make leaves only one output of change on a chain 1 longer + # Since the amount to send is always much less than the outputs, we only ever need one output + # So we should be able to generate exactly chainlimit txs for each original output + sending_addr = self.nodes[1].getnewaddress() + txid_list = [] + for i in range(chainlimit*2): + txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) + assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2) + assert_equal(len(txid_list), chainlimit*2) + + # Without walletrejectlongchains, we will still generate a txid + # The tx will be stored in the wallet but not accepted to the mempool + extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')) + assert(extra_txid not in self.nodes[0].getrawmempool()) + assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]) + self.nodes[0].abandontransaction(extra_txid) + total_txs = len(self.nodes[0].listtransactions("*",99999)) + + # Try with walletrejectlongchains + # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf + self.stop_node(0) + self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)]) + + # wait for loadmempool + timeout = 10 + while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2): + time.sleep(0.5) + timeout -= 0.5 + assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2) + + node0_balance = self.nodes[0].getbalance() + # With walletrejectlongchains we will not create the tx and store it in our wallet. + assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) + + # Verify nothing new in wallet + assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999))) + +if __name__ == '__main__': + WalletTest().main() diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py new file mode 100755 index 0000000000..2cd4127854 --- /dev/null +++ b/test/functional/wallet_bumpfee.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the bumpfee RPC. + +Verifies that the bumpfee RPC creates replacement transactions successfully when +its preconditions are met, and returns appropriate errors in other cases. + +This module consists of around a dozen individual test cases implemented in the +top-level functions named as test_. The test functions +can be disabled or reordered if needed for debugging. If new test cases are +added in the future, they should try to follow the same convention and not +make assumptions about execution order. +""" + +from test_framework.blocktools import send_to_witness +from test_framework.test_framework import BitcoinTestFramework +from test_framework import blocktools +from test_framework.mininode import CTransaction +from test_framework.util import * + +import io + +# Sequence number that is BIP 125 opt-in and BIP 68-compliant +BIP125_SEQUENCE_NUMBER = 0xfffffffd + +WALLET_PASSPHRASE = "test" +WALLET_PASSPHRASE_TIMEOUT = 3600 + + +class BumpFeeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)] + for i in range(self.num_nodes)] + + def run_test(self): + # Encrypt wallet for test_locked_wallet_fails test + self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE) + self.start_node(1) + self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT) + + connect_nodes_bi(self.nodes, 0, 1) + self.sync_all() + + peer_node, rbf_node = self.nodes + rbf_node_address = rbf_node.getnewaddress() + + # fund rbf node with 10 coins of 0.001 btc (100,000 satoshis) + self.log.info("Mining blocks...") + peer_node.generate(110) + self.sync_all() + for i in range(25): + peer_node.sendtoaddress(rbf_node_address, 0.001) + self.sync_all() + peer_node.generate(1) + self.sync_all() + assert_equal(rbf_node.getbalance(), Decimal("0.025")) + + self.log.info("Running tests") + dest_address = peer_node.getnewaddress() + test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address) + test_segwit_bumpfee_succeeds(rbf_node, dest_address) + test_nonrbf_bumpfee_fails(peer_node, dest_address) + test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address) + test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address) + test_small_output_fails(rbf_node, dest_address) + test_dust_to_fee(rbf_node, dest_address) + test_settxfee(rbf_node, dest_address) + test_rebumping(rbf_node, dest_address) + test_rebumping_not_replaceable(rbf_node, dest_address) + test_unconfirmed_not_spendable(rbf_node, rbf_node_address) + test_bumpfee_metadata(rbf_node, dest_address) + test_locked_wallet_fails(rbf_node, dest_address) + self.log.info("Success") + + +def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address): + rbfid = spend_one_input(rbf_node, dest_address) + rbftx = rbf_node.gettransaction(rbfid) + sync_mempools((rbf_node, peer_node)) + assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool() + bumped_tx = rbf_node.bumpfee(rbfid) + assert_equal(bumped_tx["errors"], []) + assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0 + # check that bumped_tx propagates, original tx was evicted and has a wallet conflict + sync_mempools((rbf_node, peer_node)) + assert bumped_tx["txid"] in rbf_node.getrawmempool() + assert bumped_tx["txid"] in peer_node.getrawmempool() + assert rbfid not in rbf_node.getrawmempool() + assert rbfid not in peer_node.getrawmempool() + oldwtx = rbf_node.gettransaction(rbfid) + assert len(oldwtx["walletconflicts"]) > 0 + # check wallet transaction replaces and replaced_by values + bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"]) + assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"]) + assert_equal(bumpedwtx["replaces_txid"], rbfid) + + +def test_segwit_bumpfee_succeeds(rbf_node, dest_address): + # Create a transaction with segwit output, then create an RBF transaction + # which spends it, and make sure bumpfee can be called on it. + + segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001")) + segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress()) + rbf_node.addwitnessaddress(segwit_out["address"]) + segwitid = send_to_witness( + use_p2wsh=False, + node=rbf_node, + utxo=segwit_in, + pubkey=segwit_out["pubkey"], + encode_p2sh=False, + amount=Decimal("0.0009"), + sign=True) + + rbfraw = rbf_node.createrawtransaction([{ + 'txid': segwitid, + 'vout': 0, + "sequence": BIP125_SEQUENCE_NUMBER + }], {dest_address: Decimal("0.0005"), + rbf_node.getrawchangeaddress(): Decimal("0.0003")}) + rbfsigned = rbf_node.signrawtransaction(rbfraw) + rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"]) + assert rbfid in rbf_node.getrawmempool() + + bumped_tx = rbf_node.bumpfee(rbfid) + assert bumped_tx["txid"] in rbf_node.getrawmempool() + assert rbfid not in rbf_node.getrawmempool() + + +def test_nonrbf_bumpfee_fails(peer_node, dest_address): + # cannot replace a non RBF transaction (from node which did not enable RBF) + not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000")) + assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid) + + +def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address): + # cannot bump fee unless the tx has only inputs that we own. + # here, the rbftx has a peer_node coin and then adds a rbf_node input + # Note that this test depends upon the RPC code checking input ownership prior to change outputs + # (since it can't use fundrawtransaction, it lacks a proper change output) + utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)] + inputs = [{ + "txid": utxo["txid"], + "vout": utxo["vout"], + "address": utxo["address"], + "sequence": BIP125_SEQUENCE_NUMBER + } for utxo in utxos] + output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001") + rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val}) + signedtx = rbf_node.signrawtransaction(rawtx) + signedtx = peer_node.signrawtransaction(signedtx["hex"]) + rbfid = rbf_node.sendrawtransaction(signedtx["hex"]) + assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet", + rbf_node.bumpfee, rbfid) + + +def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address): + # cannot bump fee if the transaction has a descendant + # parent is send-to-self, so we don't have to check which output is change when creating the child tx + parent_id = spend_one_input(rbf_node, rbf_node_address) + tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000}) + tx = rbf_node.signrawtransaction(tx) + rbf_node.sendrawtransaction(tx["hex"]) + assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id) + + +def test_small_output_fails(rbf_node, dest_address): + # cannot bump fee with a too-small output + rbfid = spend_one_input(rbf_node, dest_address) + rbf_node.bumpfee(rbfid, {"totalFee": 50000}) + + rbfid = spend_one_input(rbf_node, dest_address) + assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001}) + + +def test_dust_to_fee(rbf_node, dest_address): + # check that if output is reduced to dust, it will be converted to fee + # the bumped tx sets fee=49,900, but it converts to 50,000 + rbfid = spend_one_input(rbf_node, dest_address) + fulltx = rbf_node.getrawtransaction(rbfid, 1) + bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900}) + full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1) + assert_equal(bumped_tx["fee"], Decimal("0.00050000")) + assert_equal(len(fulltx["vout"]), 2) + assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated + + +def test_settxfee(rbf_node, dest_address): + # check that bumpfee reacts correctly to the use of settxfee (paytxfee) + rbfid = spend_one_input(rbf_node, dest_address) + requested_feerate = Decimal("0.00025000") + rbf_node.settxfee(requested_feerate) + bumped_tx = rbf_node.bumpfee(rbfid) + actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"] + # Assert that the difference between the requested feerate and the actual + # feerate of the bumped transaction is small. + assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate)) + rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee + + +def test_rebumping(rbf_node, dest_address): + # check that re-bumping the original tx fails, but bumping the bumper succeeds + rbfid = spend_one_input(rbf_node, dest_address) + bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000}) + assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000}) + rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000}) + + +def test_rebumping_not_replaceable(rbf_node, dest_address): + # check that re-bumping a non-replaceable bump tx fails + rbfid = spend_one_input(rbf_node, dest_address) + bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False}) + assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"], + {"totalFee": 20000}) + + +def test_unconfirmed_not_spendable(rbf_node, rbf_node_address): + # check that unconfirmed outputs from bumped transactions are not spendable + rbfid = spend_one_input(rbf_node, rbf_node_address) + rbftx = rbf_node.gettransaction(rbfid)["hex"] + assert rbfid in rbf_node.getrawmempool() + bumpid = rbf_node.bumpfee(rbfid)["txid"] + assert bumpid in rbf_node.getrawmempool() + assert rbfid not in rbf_node.getrawmempool() + + # check that outputs from the bump transaction are not spendable + # due to the replaces_txid check in CWallet::AvailableCoins + assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], []) + + # submit a block with the rbf tx to clear the bump tx out of the mempool, + # then call abandon to make sure the wallet doesn't attempt to resubmit the + # bump tx, then invalidate the block so the rbf tx will be put back in the + # mempool. this makes it possible to check whether the rbf tx outputs are + # spendable before the rbf tx is confirmed. + block = submit_block_with_tx(rbf_node, rbftx) + rbf_node.abandontransaction(bumpid) + rbf_node.invalidateblock(block.hash) + assert bumpid not in rbf_node.getrawmempool() + assert rbfid in rbf_node.getrawmempool() + + # check that outputs from the rbf tx are not spendable before the + # transaction is confirmed, due to the replaced_by_txid check in + # CWallet::AvailableCoins + assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], []) + + # check that the main output from the rbf tx is spendable after confirmed + rbf_node.generate(1) + assert_equal( + sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False) + if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1) + + +def test_bumpfee_metadata(rbf_node, dest_address): + rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value") + bumped_tx = rbf_node.bumpfee(rbfid) + bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"]) + assert_equal(bumped_wtx["comment"], "comment value") + assert_equal(bumped_wtx["to"], "to value") + + +def test_locked_wallet_fails(rbf_node, dest_address): + rbfid = spend_one_input(rbf_node, dest_address) + rbf_node.walletlock() + assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.", + rbf_node.bumpfee, rbfid) + + +def spend_one_input(node, dest_address): + tx_input = dict( + sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000"))) + rawtx = node.createrawtransaction( + [tx_input], {dest_address: Decimal("0.00050000"), + node.getrawchangeaddress(): Decimal("0.00049000")}) + signedtx = node.signrawtransaction(rawtx) + txid = node.sendrawtransaction(signedtx["hex"]) + return txid + + +def submit_block_with_tx(node, tx): + ctx = CTransaction() + ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx))) + + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + block_time = node.getblockheader(tip)["mediantime"] + 1 + block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time) + block.vtx.append(ctx) + block.rehash() + block.hashMerkleRoot = block.calc_merkle_root() + blocktools.add_witness_commitment(block) + block.solve() + node.submitblock(bytes_to_hex_str(block.serialize(True))) + return block + + +if __name__ == "__main__": + BumpFeeTest().main() diff --git a/test/functional/wallet_disable.py b/test/functional/wallet_disable.py new file mode 100755 index 0000000000..b0627d88ac --- /dev/null +++ b/test/functional/wallet_disable.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test a node with the -disablewallet option. + +- Test that validateaddress RPC works when running with -disablewallet +- Test that it is not possible to mine to an invalid address. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class DisableWalletTest (BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-disablewallet"]] + + def run_test (self): + # Make sure wallet is really disabled + assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo) + x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') + assert(x['isvalid'] == False) + x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') + assert(x['isvalid'] == True) + + # Checking mining to an address without a wallet. Generating to a valid address should succeed + # but generating to an invalid address will fail. + self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') + assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') + +if __name__ == '__main__': + DisableWalletTest ().main () diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py new file mode 100755 index 0000000000..77f90ffb81 --- /dev/null +++ b/test/functional/wallet_dump.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the dumpwallet RPC.""" + +import os + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import (assert_equal, assert_raises_rpc_error) + + +def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): + """ + Read the given dump, count the addrs that match, count change and reserve. + Also check that the old hd_master is inactive + """ + with open(file_name, encoding='utf8') as inputfile: + found_addr = 0 + found_script_addr = 0 + found_addr_chg = 0 + found_addr_rsv = 0 + hd_master_addr_ret = None + for line in inputfile: + # only read non comment lines + if line[0] != "#" and len(line) > 10: + # split out some data + key_label, comment = line.split("#") + # key = key_label.split(" ")[0] + keytype = key_label.split(" ")[2] + if len(comment) > 1: + addr_keypath = comment.split(" addr=")[1] + addr = addr_keypath.split(" ")[0] + keypath = None + if keytype == "inactivehdmaster=1": + # ensure the old master is still available + assert(hd_master_addr_old == addr) + elif keytype == "hdmaster=1": + # ensure we have generated a new hd master key + assert(hd_master_addr_old != addr) + hd_master_addr_ret = addr + elif keytype == "script=1": + # scripts don't have keypaths + keypath = None + else: + keypath = addr_keypath.rstrip().split("hdkeypath=")[1] + + # count key types + for addrObj in addrs: + if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=": + found_addr += 1 + break + elif keytype == "change=1": + found_addr_chg += 1 + break + elif keytype == "reserve=1": + found_addr_rsv += 1 + break + + # count scripts + for script_addr in script_addrs: + if script_addr == addr.rstrip() and keytype == "script=1": + found_script_addr += 1 + break + + return found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret + + +class WalletDumpTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [["-keypool=90", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] + + def setup_network(self, split=False): + # Use 1 minute timeout because the initial getnewaddress RPC can take + # longer than the default 30 seconds due to an expensive + # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in + # the test often takes even longer. + self.add_nodes(self.num_nodes, self.extra_args, timewait=60) + self.start_nodes() + + def run_test (self): + tmpdir = self.options.tmpdir + + # generate 20 addresses to compare against the dump + test_addr_count = 20 + addrs = [] + for i in range(0,test_addr_count): + addr = self.nodes[0].getnewaddress() + vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath + addrs.append(vaddr) + # Should be a no-op: + self.nodes[0].keypoolrefill() + + # Test scripts dump by adding a P2SH witness and a 1-of-1 multisig address + witness_addr = self.nodes[0].addwitnessaddress(addrs[0]["address"], True) + multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"] + script_addrs = [witness_addr, multisig_addr] + + # dump unencrypted wallet + result = self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump") + assert_equal(result['filename'], os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) + + found_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ + read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, script_addrs, None) + assert_equal(found_addr, test_addr_count) # all keys must be in the dump + assert_equal(found_script_addr, 2) # all scripts must be in the dump + assert_equal(found_addr_chg, 50) # 50 blocks where mined + assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys + + #encrypt wallet, restart, unlock and dump + self.nodes[0].node_encrypt_wallet('test') + self.start_node(0) + self.nodes[0].walletpassphrase('test', 10) + # Should be a no-op: + self.nodes[0].keypoolrefill() + self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") + + found_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ + read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, script_addrs, hd_master_addr_unenc) + assert_equal(found_addr, test_addr_count) + assert_equal(found_script_addr, 2) + assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now + assert_equal(found_addr_rsv, 90*2) + + # Overwriting should fail + assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump") + + # Restart node with new wallet, and test importwallet + self.stop_node(0) + self.start_node(0, ['-wallet=w2']) + + # Make sure the address is not IsMine before import + result = self.nodes[0].validateaddress(multisig_addr) + assert(result['ismine'] == False) + + self.nodes[0].importwallet(os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) + + # Now check IsMine is true + result = self.nodes[0].validateaddress(multisig_addr) + assert(result['ismine'] == True) + +if __name__ == '__main__': + WalletDumpTest().main () diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py new file mode 100755 index 0000000000..3c927ee484 --- /dev/null +++ b/test/functional/wallet_encryption.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test Wallet encryption""" + +import time + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + assert_greater_than, + assert_greater_than_or_equal, +) + +class WalletEncryptionTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + passphrase = "WalletPassphrase" + passphrase2 = "SecondWalletPassphrase" + + # Make sure the wallet isn't encrypted first + address = self.nodes[0].getnewaddress() + privkey = self.nodes[0].dumpprivkey(address) + assert_equal(privkey[:1], "c") + assert_equal(len(privkey), 52) + + # Encrypt the wallet + self.nodes[0].node_encrypt_wallet(passphrase) + self.start_node(0) + + # Test that the wallet is encrypted + assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) + + # Check that walletpassphrase works + self.nodes[0].walletpassphrase(passphrase, 2) + assert_equal(privkey, self.nodes[0].dumpprivkey(address)) + + # Check that the timeout is right + time.sleep(2) + assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) + + # Test wrong passphrase + assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10) + + # Test walletlock + self.nodes[0].walletpassphrase(passphrase, 84600) + assert_equal(privkey, self.nodes[0].dumpprivkey(address)) + self.nodes[0].walletlock() + assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address) + + # Test passphrase changes + self.nodes[0].walletpassphrasechange(passphrase, passphrase2) + assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10) + self.nodes[0].walletpassphrase(passphrase2, 10) + assert_equal(privkey, self.nodes[0].dumpprivkey(address)) + self.nodes[0].walletlock() + + # Test timeout bounds + assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10) + # Check the timeout + # Check a time less than the limit + expected_time = int(time.time()) + (1 << 30) - 600 + self.nodes[0].walletpassphrase(passphrase2, (1 << 30) - 600) + actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] + assert_greater_than_or_equal(actual_time, expected_time) + assert_greater_than(expected_time + 5, actual_time) # 5 second buffer + # Check a time greater than the limit + expected_time = int(time.time()) + (1 << 30) - 1 + self.nodes[0].walletpassphrase(passphrase2, (1 << 33)) + actual_time = self.nodes[0].getwalletinfo()['unlocked_until'] + assert_greater_than_or_equal(actual_time, expected_time) + assert_greater_than(expected_time + 5, actual_time) # 5 second buffer + +if __name__ == '__main__': + WalletEncryptionTest().main() diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py new file mode 100755 index 0000000000..9f0e9acb47 --- /dev/null +++ b/test/functional/wallet_hd.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test Hierarchical Deterministic wallet function.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + connect_nodes_bi, +) +import shutil +import os + +class WalletHDTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [[], ['-keypool=0']] + + def run_test (self): + tmpdir = self.options.tmpdir + + # Make sure can't switch off usehd after wallet creation + self.stop_node(1) + self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet') + self.start_node(1) + connect_nodes_bi(self.nodes, 0, 1) + + # Make sure we use hd, keep masterkeyid + masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid'] + assert_equal(len(masterkeyid), 40) + + # create an internal key + change_addr = self.nodes[1].getrawchangeaddress() + change_addrV= self.nodes[1].validateaddress(change_addr) + assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key + + # Import a non-HD private key in the HD wallet + non_hd_add = self.nodes[0].getnewaddress() + self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) + + # This should be enough to keep the master key and the non-HD key + self.nodes[1].backupwallet(tmpdir + "/hd.bak") + #self.nodes[1].dumpwallet(tmpdir + "/hd.dump") + + # Derive some HD addresses and remember the last + # Also send funds to each add + self.nodes[0].generate(101) + hd_add = None + num_hd_adds = 300 + for i in range(num_hd_adds): + hd_add = self.nodes[1].getnewaddress() + hd_info = self.nodes[1].validateaddress(hd_add) + assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'") + assert_equal(hd_info["hdmasterkeyid"], masterkeyid) + self.nodes[0].sendtoaddress(hd_add, 1) + self.nodes[0].generate(1) + self.nodes[0].sendtoaddress(non_hd_add, 1) + self.nodes[0].generate(1) + + # create an internal key (again) + change_addr = self.nodes[1].getrawchangeaddress() + change_addrV= self.nodes[1].validateaddress(change_addr) + assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key + + self.sync_all() + assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) + + self.log.info("Restore backup ...") + self.stop_node(1) + # we need to delete the complete regtest directory + # otherwise node1 would auto-recover all funds in flag the keypool keys as used + shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks")) + shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate")) + shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat")) + self.start_node(1) + + # Assert that derivation is deterministic + hd_add_2 = None + for _ in range(num_hd_adds): + hd_add_2 = self.nodes[1].getnewaddress() + hd_info_2 = self.nodes[1].validateaddress(hd_add_2) + assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'") + assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid) + assert_equal(hd_add, hd_add_2) + connect_nodes_bi(self.nodes, 0, 1) + self.sync_all() + + # Needs rescan + self.stop_node(1) + self.start_node(1, extra_args=self.extra_args[1] + ['-rescan']) + assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) + + # Try a RPC based rescan + self.stop_node(1) + shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks")) + shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate")) + shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat")) + self.start_node(1, extra_args=self.extra_args[1]) + connect_nodes_bi(self.nodes, 0, 1) + self.sync_all() + out = self.nodes[1].rescanblockchain(0, 1) + assert_equal(out['start_height'], 0) + assert_equal(out['stop_height'], 1) + out = self.nodes[1].rescanblockchain() + assert_equal(out['start_height'], 0) + assert_equal(out['stop_height'], self.nodes[1].getblockcount()) + assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) + + # send a tx and make sure its using the internal chain for the changeoutput + txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) + outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'] + keypath = "" + for out in outs: + if out['value'] != 1: + keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath'] + + assert_equal(keypath[0:7], "m/0'/1'") + +if __name__ == '__main__': + WalletHDTest().main () diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py new file mode 100755 index 0000000000..d193a99d5b --- /dev/null +++ b/test/functional/wallet_import_rescan.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test wallet import RPCs. + +Test rescan behavior of importaddress, importpubkey, importprivkey, and +importmulti RPCs with different types of keys and rescan options. + +In the first part of the test, node 0 creates an address for each type of +import RPC call and sends BTC to it. Then other nodes import the addresses, +and the test makes listtransactions and getbalance calls to confirm that the +importing node either did or did not execute rescans picking up the send +transactions. + +In the second part of the test, node 0 sends more BTC to each address, and the +test makes more listtransactions and getbalance calls to confirm that the +importing nodes pick up the new transactions regardless of whether rescans +happened previously. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times) + +import collections +import enum +import itertools + +Call = enum.Enum("Call", "single multi") +Data = enum.Enum("Data", "address pub priv") +Rescan = enum.Enum("Rescan", "no yes late_timestamp") + + +class Variant(collections.namedtuple("Variant", "call data rescan prune")): + """Helper for importing one key and verifying scanned transactions.""" + + def try_rpc(self, func, *args, **kwargs): + if self.expect_disabled: + assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs) + else: + return func(*args, **kwargs) + + def do_import(self, timestamp): + """Call one key import RPC.""" + + if self.call == Call.single: + if self.data == Data.address: + response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, + self.rescan == Rescan.yes) + elif self.data == Data.pub: + response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, + self.rescan == Rescan.yes) + elif self.data == Data.priv: + response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes) + assert_equal(response, None) + + elif self.call == Call.multi: + response = self.node.importmulti([{ + "scriptPubKey": { + "address": self.address["address"] + }, + "timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0), + "pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [], + "keys": [self.key] if self.data == Data.priv else [], + "label": self.label, + "watchonly": self.data != Data.priv + }], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)}) + assert_equal(response, [{"success": True}]) + + def check(self, txid=None, amount=None, confirmations=None): + """Verify that getbalance/listtransactions return expected values.""" + + balance = self.node.getbalance(self.label, 0, True) + assert_equal(balance, self.expected_balance) + + txs = self.node.listtransactions(self.label, 10000, 0, True) + assert_equal(len(txs), self.expected_txs) + + if txid is not None: + tx, = [tx for tx in txs if tx["txid"] == txid] + assert_equal(tx["account"], self.label) + assert_equal(tx["address"], self.address["address"]) + assert_equal(tx["amount"], amount) + assert_equal(tx["category"], "receive") + assert_equal(tx["label"], self.label) + assert_equal(tx["txid"], txid) + assert_equal(tx["confirmations"], confirmations) + assert_equal("trusted" not in tx, True) + # Verify the transaction is correctly marked watchonly depending on + # whether the transaction pays to an imported public key or + # imported private key. The test setup ensures that transaction + # inputs will not be from watchonly keys (important because + # involvesWatchonly will be true if either the transaction output + # or inputs are watchonly). + if self.data != Data.priv: + assert_equal(tx["involvesWatchonly"], True) + else: + assert_equal("involvesWatchonly" not in tx, True) + + +# List of Variants for each way a key or address could be imported. +IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))] + +# List of nodes to import keys to. Half the nodes will have pruning disabled, +# half will have it enabled. Different nodes will be used for imports that are +# expected to cause rescans, and imports that are not expected to cause +# rescans, in order to prevent rescans during later imports picking up +# transactions associated with earlier imports. This makes it easier to keep +# track of expected balances and transactions. +ImportNode = collections.namedtuple("ImportNode", "prune rescan") +IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)] + +# Rescans start at the earliest block up to 2 hours before the key timestamp. +TIMESTAMP_WINDOW = 2 * 60 * 60 + + +class ImportRescanTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + len(IMPORT_NODES) + + def setup_network(self): + extra_args = [["-addresstype=legacy"] for _ in range(self.num_nodes)] + for i, import_node in enumerate(IMPORT_NODES, 2): + if import_node.prune: + extra_args[i] += ["-prune=1"] + + self.add_nodes(self.num_nodes, extra_args) + self.start_nodes() + for i in range(1, self.num_nodes): + connect_nodes(self.nodes[i], 0) + + def run_test(self): + # Create one transaction on node 0 with a unique amount and label for + # each possible type of wallet import RPC. + for i, variant in enumerate(IMPORT_VARIANTS): + variant.label = "label {} {}".format(i, variant) + variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label)) + variant.key = self.nodes[1].dumpprivkey(variant.address["address"]) + variant.initial_amount = 10 - (i + 1) / 4.0 + variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount) + + # Generate a block containing the initial transactions, then another + # block further in the future (past the rescan window). + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getrawmempool(), []) + timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + # For each variation of wallet key import, invoke the import RPC and + # check the results from getbalance and listtransactions. + for variant in IMPORT_VARIANTS: + variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single + expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled + variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))] + variant.do_import(timestamp) + if expect_rescan: + variant.expected_balance = variant.initial_amount + variant.expected_txs = 1 + variant.check(variant.initial_txid, variant.initial_amount, 2) + else: + variant.expected_balance = 0 + variant.expected_txs = 0 + variant.check() + + # Create new transactions sending to each address. + for i, variant in enumerate(IMPORT_VARIANTS): + variant.sent_amount = 10 - (2 * i + 1) / 8.0 + variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount) + + # Generate a block containing the new transactions. + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getrawmempool(), []) + sync_blocks(self.nodes) + + # Check the latest results from getbalance and listtransactions. + for variant in IMPORT_VARIANTS: + if not variant.expect_disabled: + variant.expected_balance += variant.sent_amount + variant.expected_txs += 1 + variant.check(variant.sent_txid, variant.sent_amount, 1) + else: + variant.check() + +if __name__ == "__main__": + ImportRescanTest().main() diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py new file mode 100755 index 0000000000..be9be83839 --- /dev/null +++ b/test/functional/wallet_importmulti.py @@ -0,0 +1,451 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the importmulti RPC.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class ImportMultiTest (BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]] + self.setup_clean_chain = True + + def setup_network(self): + self.setup_nodes() + + def run_test (self): + self.log.info("Mining blocks...") + self.nodes[0].generate(1) + self.nodes[1].generate(1) + timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] + + node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + + #Check only one address + assert_equal(node0_address1['ismine'], True) + + #Node 1 sync test + assert_equal(self.nodes[1].getblockcount(),1) + + #Address Test - before import + address_info = self.nodes[1].validateaddress(node0_address1['address']) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], False) + + + # RPC importmulti ----------------------------------------------- + + # Bitcoin Address + self.log.info("Should import an address") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], timestamp) + watchonly_address = address['address'] + watchonly_timestamp = timestamp + + self.log.info("Should not import an invalid address") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": "not valid address", + }, + "timestamp": "now", + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -5) + assert_equal(result[0]['error']['message'], 'Invalid address') + + # ScriptPubKey + internal + self.log.info("Should import a scriptPubKey with internal flag") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "internal": True + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], timestamp) + + # ScriptPubKey + !internal + self.log.info("Should not import a scriptPubKey without internal flag") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -8) + assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # Address + Public key + !Internal + self.log.info("Should import an address with public key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "pubkeys": [ address['pubkey'] ] + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], timestamp) + + + # ScriptPubKey + Public key + internal + self.log.info("Should import a scriptPubKey with internal and with public key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + request = [{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "pubkeys": [ address['pubkey'] ], + "internal": True + }] + result = self.nodes[1].importmulti(request) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], timestamp) + + # ScriptPubKey + Public key + !internal + self.log.info("Should not import a scriptPubKey without internal and with public key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + request = [{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "pubkeys": [ address['pubkey'] ] + }] + result = self.nodes[1].importmulti(request) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -8) + assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + # Address + Private key + !watchonly + self.log.info("Should import an address with private key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ] + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], True) + assert_equal(address_assert['timestamp'], timestamp) + + self.log.info("Should not import an address with private key if is already imported") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ] + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -4) + assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') + + # Address + Private key + watchonly + self.log.info("Should not import an address with private key and with watchonly") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ], + "watchonly": True + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -8) + assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + # ScriptPubKey + Private key + internal + self.log.info("Should import a scriptPubKey with internal and with private key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ], + "internal": True + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], True) + assert_equal(address_assert['timestamp'], timestamp) + + # ScriptPubKey + Private key + !internal + self.log.info("Should not import a scriptPubKey without internal and with private key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address['address']) ] + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -8) + assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # P2SH address + sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + self.nodes[1].generate(100) + transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].generate(1) + timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] + + self.log.info("Should import a p2sh") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": multi_sig_script['address'] + }, + "timestamp": "now", + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) + assert_equal(address_assert['isscript'], True) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['timestamp'], timestamp) + p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2shunspent['spendable'], False) + assert_equal(p2shunspent['solvable'], False) + + + # P2SH + Redeem script + sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + self.nodes[1].generate(100) + transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].generate(1) + timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] + + self.log.info("Should import a p2sh with respective redeem script") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": multi_sig_script['address'] + }, + "timestamp": "now", + "redeemscript": multi_sig_script['redeemScript'] + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) + assert_equal(address_assert['timestamp'], timestamp) + + p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2shunspent['spendable'], False) + assert_equal(p2shunspent['solvable'], True) + + + # P2SH + Redeem script + Private Keys + !Watchonly + sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + self.nodes[1].generate(100) + transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].generate(1) + timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] + + self.log.info("Should import a p2sh with respective redeem script and private keys") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": multi_sig_script['address'] + }, + "timestamp": "now", + "redeemscript": multi_sig_script['redeemScript'], + "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) + assert_equal(address_assert['timestamp'], timestamp) + + p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2shunspent['spendable'], False) + assert_equal(p2shunspent['solvable'], True) + + # P2SH + Redeem script + Private Keys + Watchonly + sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) + self.nodes[1].generate(100) + transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].generate(1) + timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] + + self.log.info("Should import a p2sh with respective redeem script and private keys") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": multi_sig_script['address'] + }, + "timestamp": "now", + "redeemscript": multi_sig_script['redeemScript'], + "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], + "watchonly": True + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -8) + assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') + + + # Address + Public key + !Internal + Wrong pubkey + self.log.info("Should not import an address with a wrong public key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "pubkeys": [ address2['pubkey'] ] + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -5) + assert_equal(result[0]['error']['message'], 'Consistency check failed') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # ScriptPubKey + Public key + internal + Wrong pubkey + self.log.info("Should not import a scriptPubKey with internal and with a wrong public key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + request = [{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "pubkeys": [ address2['pubkey'] ], + "internal": True + }] + result = self.nodes[1].importmulti(request) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -5) + assert_equal(result[0]['error']['message'], 'Consistency check failed') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # Address + Private key + !watchonly + Wrong private key + self.log.info("Should not import an address with a wrong private key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": address['address'] + }, + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address2['address']) ] + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -5) + assert_equal(result[0]['error']['message'], 'Consistency check failed') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # ScriptPubKey + Private key + internal + Wrong private key + self.log.info("Should not import a scriptPubKey with internal and with a wrong private key") + address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) + result = self.nodes[1].importmulti([{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "now", + "keys": [ self.nodes[0].dumpprivkey(address2['address']) ], + "internal": True + }]) + assert_equal(result[0]['success'], False) + assert_equal(result[0]['error']['code'], -5) + assert_equal(result[0]['error']['message'], 'Consistency check failed') + address_assert = self.nodes[1].validateaddress(address['address']) + assert_equal(address_assert['iswatchonly'], False) + assert_equal(address_assert['ismine'], False) + assert_equal('timestamp' in address_assert, False) + + + # Importing existing watch only address with new timestamp should replace saved timestamp. + assert_greater_than(timestamp, watchonly_timestamp) + self.log.info("Should replace previously saved watch only timestamp.") + result = self.nodes[1].importmulti([{ + "scriptPubKey": { + "address": watchonly_address, + }, + "timestamp": "now", + }]) + assert_equal(result[0]['success'], True) + address_assert = self.nodes[1].validateaddress(watchonly_address) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], timestamp) + watchonly_timestamp = timestamp + + + # restart nodes to check for proper serialization/deserialization of watch only address + self.stop_nodes() + self.start_nodes() + address_assert = self.nodes[1].validateaddress(watchonly_address) + assert_equal(address_assert['iswatchonly'], True) + assert_equal(address_assert['ismine'], False) + assert_equal(address_assert['timestamp'], watchonly_timestamp) + + # Bad or missing timestamps + self.log.info("Should throw on invalid or missing timestamp values") + assert_raises_rpc_error(-3, 'Missing required timestamp field for key', + self.nodes[1].importmulti, [{ + "scriptPubKey": address['scriptPubKey'], + }]) + assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string', + self.nodes[1].importmulti, [{ + "scriptPubKey": address['scriptPubKey'], + "timestamp": "", + }]) + + +if __name__ == '__main__': + ImportMultiTest ().main () diff --git a/test/functional/wallet_importprunedfunds.py b/test/functional/wallet_importprunedfunds.py new file mode 100755 index 0000000000..6b2919b5ae --- /dev/null +++ b/test/functional/wallet_importprunedfunds.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the importprunedfunds and removeprunedfunds RPCs.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class ImportPrunedFundsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + + def run_test(self): + self.log.info("Mining blocks...") + self.nodes[0].generate(101) + + self.sync_all() + + # address + address1 = self.nodes[0].getnewaddress() + # pubkey + address2 = self.nodes[0].getnewaddress() + # privkey + address3 = self.nodes[0].getnewaddress() + address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey + + #Check only one address + address_info = self.nodes[0].validateaddress(address1) + assert_equal(address_info['ismine'], True) + + self.sync_all() + + #Node 1 sync test + assert_equal(self.nodes[1].getblockcount(),101) + + #Address Test - before import + address_info = self.nodes[1].validateaddress(address1) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], False) + + address_info = self.nodes[1].validateaddress(address2) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], False) + + address_info = self.nodes[1].validateaddress(address3) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], False) + + #Send funds to self + txnid1 = self.nodes[0].sendtoaddress(address1, 0.1) + self.nodes[0].generate(1) + rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex'] + proof1 = self.nodes[0].gettxoutproof([txnid1]) + + txnid2 = self.nodes[0].sendtoaddress(address2, 0.05) + self.nodes[0].generate(1) + rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex'] + proof2 = self.nodes[0].gettxoutproof([txnid2]) + + txnid3 = self.nodes[0].sendtoaddress(address3, 0.025) + self.nodes[0].generate(1) + rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex'] + proof3 = self.nodes[0].gettxoutproof([txnid3]) + + self.sync_all() + + #Import with no affiliated address + assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1) + + balance1 = self.nodes[1].getbalance("", 0, True) + assert_equal(balance1, Decimal(0)) + + #Import with affiliated address with no rescan + self.nodes[1].importaddress(address2, "add2", False) + self.nodes[1].importprunedfunds(rawtxn2, proof2) + balance2 = self.nodes[1].getbalance("add2", 0, True) + assert_equal(balance2, Decimal('0.05')) + + #Import with private key with no rescan + self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False) + self.nodes[1].importprunedfunds(rawtxn3, proof3) + balance3 = self.nodes[1].getbalance("add3", 0, False) + assert_equal(balance3, Decimal('0.025')) + balance3 = self.nodes[1].getbalance("*", 0, True) + assert_equal(balance3, Decimal('0.075')) + + #Addresses Test - after import + address_info = self.nodes[1].validateaddress(address1) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], False) + address_info = self.nodes[1].validateaddress(address2) + assert_equal(address_info['iswatchonly'], True) + assert_equal(address_info['ismine'], False) + address_info = self.nodes[1].validateaddress(address3) + assert_equal(address_info['iswatchonly'], False) + assert_equal(address_info['ismine'], True) + + #Remove transactions + assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1) + + balance1 = self.nodes[1].getbalance("*", 0, True) + assert_equal(balance1, Decimal('0.075')) + + self.nodes[1].removeprunedfunds(txnid2) + balance2 = self.nodes[1].getbalance("*", 0, True) + assert_equal(balance2, Decimal('0.025')) + + self.nodes[1].removeprunedfunds(txnid3) + balance3 = self.nodes[1].getbalance("*", 0, True) + assert_equal(balance3, Decimal('0.0')) + +if __name__ == '__main__': + ImportPrunedFundsTest().main() diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py new file mode 100755 index 0000000000..45a5eed8ec --- /dev/null +++ b/test/functional/wallet_keypool.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet keypool and interaction with wallet encryption/locking.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class KeyPoolTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def run_test(self): + nodes = self.nodes + addr_before_encrypting = nodes[0].getnewaddress() + addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting) + wallet_info_old = nodes[0].getwalletinfo() + assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid']) + + # Encrypt wallet and wait to terminate + nodes[0].node_encrypt_wallet('test') + # Restart node 0 + self.start_node(0) + # Keep creating keys + addr = nodes[0].getnewaddress() + addr_data = nodes[0].validateaddress(addr) + wallet_info = nodes[0].getwalletinfo() + assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid']) + assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid']) + assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) + + # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) + nodes[0].walletpassphrase('test', 12000) + nodes[0].keypoolrefill(6) + nodes[0].walletlock() + wi = nodes[0].getwalletinfo() + assert_equal(wi['keypoolsize_hd_internal'], 6) + assert_equal(wi['keypoolsize'], 6) + + # drain the internal keys + nodes[0].getrawchangeaddress() + nodes[0].getrawchangeaddress() + nodes[0].getrawchangeaddress() + nodes[0].getrawchangeaddress() + nodes[0].getrawchangeaddress() + nodes[0].getrawchangeaddress() + addr = set() + # the next one should fail + assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress) + + # drain the external keys + addr.add(nodes[0].getnewaddress()) + addr.add(nodes[0].getnewaddress()) + addr.add(nodes[0].getnewaddress()) + addr.add(nodes[0].getnewaddress()) + addr.add(nodes[0].getnewaddress()) + addr.add(nodes[0].getnewaddress()) + assert(len(addr) == 6) + # the next one should fail + assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) + + # refill keypool with three new addresses + nodes[0].walletpassphrase('test', 1) + nodes[0].keypoolrefill(3) + + # test walletpassphrase timeout + time.sleep(1.1) + assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0) + + # drain them by mining + nodes[0].generate(1) + nodes[0].generate(1) + nodes[0].generate(1) + assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1) + + nodes[0].walletpassphrase('test', 100) + nodes[0].keypoolrefill(100) + wi = nodes[0].getwalletinfo() + assert_equal(wi['keypoolsize_hd_internal'], 100) + assert_equal(wi['keypoolsize'], 100) + +if __name__ == '__main__': + KeyPoolTest().main() diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py new file mode 100755 index 0000000000..e7af3c3987 --- /dev/null +++ b/test/functional/wallet_keypool_topup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test HD Wallet keypool restore function. + +Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. + +- Start node1, shutdown and backup wallet. +- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. +- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. +- connect node1 to node0. Verify that they sync and node1 receives its funds.""" +import shutil + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + connect_nodes_bi, + sync_blocks, +) + +class KeypoolRestoreTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [[], ['-keypool=100', '-keypoolmin=20']] + + def run_test(self): + self.tmpdir = self.options.tmpdir + self.nodes[0].generate(101) + + self.log.info("Make backup of wallet") + + self.stop_node(1) + + shutil.copyfile(self.tmpdir + "/node1/regtest/wallets/wallet.dat", self.tmpdir + "/wallet.bak") + self.start_node(1, self.extra_args[1]) + connect_nodes_bi(self.nodes, 0, 1) + + self.log.info("Generate keys for wallet") + + for _ in range(90): + addr_oldpool = self.nodes[1].getnewaddress() + for _ in range(20): + addr_extpool = self.nodes[1].getnewaddress() + + self.log.info("Send funds to wallet") + + self.nodes[0].sendtoaddress(addr_oldpool, 10) + self.nodes[0].generate(1) + self.nodes[0].sendtoaddress(addr_extpool, 5) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + self.log.info("Restart node with wallet backup") + + self.stop_node(1) + + shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallets/wallet.dat") + + self.log.info("Verify keypool is restored and balance is correct") + + self.start_node(1, self.extra_args[1]) + connect_nodes_bi(self.nodes, 0, 1) + self.sync_all() + + assert_equal(self.nodes[1].getbalance(), 15) + assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive") + + # Check that we have marked all keys up to the used keypool key as used + assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'") + +if __name__ == '__main__': + KeypoolRestoreTest().main() diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py new file mode 100755 index 0000000000..1f2b3c8aa7 --- /dev/null +++ b/test/functional/wallet_listreceivedby.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the listreceivedbyaddress RPC.""" +from decimal import Decimal + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import (assert_array_result, + assert_equal, + assert_raises_rpc_error, + ) + +class ReceivedByTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + + def run_test(self): + # Generate block to get out of IBD + self.nodes[0].generate(1) + + self.log.info("listreceivedbyaddress Test") + + # Send from node 0 to 1 + addr = self.nodes[1].getnewaddress() + txid = self.nodes[0].sendtoaddress(addr, 0.1) + self.sync_all() + + # Check not listed in listreceivedbyaddress because has 0 confirmations + assert_array_result(self.nodes[1].listreceivedbyaddress(), + {"address": addr}, + {}, + True) + # Bury Tx under 10 block so it will be returned by listreceivedbyaddress + self.nodes[1].generate(10) + self.sync_all() + assert_array_result(self.nodes[1].listreceivedbyaddress(), + {"address": addr}, + {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + # With min confidence < 10 + assert_array_result(self.nodes[1].listreceivedbyaddress(5), + {"address": addr}, + {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) + # With min confidence > 10, should not find Tx + assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True) + + # Empty Tx + addr = self.nodes[1].getnewaddress() + assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), + {"address": addr}, + {"address": addr, "account": "", "amount": 0, "confirmations": 0, "txids": []}) + + self.log.info("getreceivedbyaddress Test") + + # Send from node 0 to 1 + addr = self.nodes[1].getnewaddress() + txid = self.nodes[0].sendtoaddress(addr, 0.1) + self.sync_all() + + # Check balance is 0 because of 0 confirmations + balance = self.nodes[1].getreceivedbyaddress(addr) + assert_equal(balance, Decimal("0.0")) + + # Check balance is 0.1 + balance = self.nodes[1].getreceivedbyaddress(addr, 0) + assert_equal(balance, Decimal("0.1")) + + # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress + self.nodes[1].generate(10) + self.sync_all() + balance = self.nodes[1].getreceivedbyaddress(addr) + assert_equal(balance, Decimal("0.1")) + + # Trying to getreceivedby for an address the wallet doesn't own should return an error + assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) + + self.log.info("listreceivedbyaccount + getreceivedbyaccount Test") + + # set pre-state + addrArr = self.nodes[1].getnewaddress() + account = self.nodes[1].getaccount(addrArr) + received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount() if r["account"] == account][0] + balance_by_account = self.nodes[1].getreceivedbyaccount(account) + + txid = self.nodes[0].sendtoaddress(addr, 0.1) + self.sync_all() + + # listreceivedbyaccount should return received_by_account_json because of 0 confirmations + assert_array_result(self.nodes[1].listreceivedbyaccount(), + {"account": account}, + received_by_account_json) + + # getreceivedbyaddress should return same balance because of 0 confirmations + balance = self.nodes[1].getreceivedbyaccount(account) + assert_equal(balance, balance_by_account) + + self.nodes[1].generate(10) + self.sync_all() + # listreceivedbyaccount should return updated account balance + assert_array_result(self.nodes[1].listreceivedbyaccount(), + {"account": account}, + {"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))}) + + # getreceivedbyaddress should return updates balance + balance = self.nodes[1].getreceivedbyaccount(account) + assert_equal(balance, balance_by_account + Decimal("0.1")) + + # Create a new account named "mynewaccount" that has a 0 balance + self.nodes[1].getaccountaddress("mynewaccount") + received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount(0, True) if r["account"] == "mynewaccount"][0] + + # Test includeempty of listreceivedbyaccount + assert_equal(received_by_account_json["amount"], Decimal("0.0")) + + # Test getreceivedbyaccount for 0 amount accounts + balance = self.nodes[1].getreceivedbyaccount("mynewaccount") + assert_equal(balance, Decimal("0.0")) + +if __name__ == '__main__': + ReceivedByTest().main() diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py new file mode 100755 index 0000000000..67e7744bf8 --- /dev/null +++ b/test/functional/wallet_listsinceblock.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the listsincelast RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error + +class ListSinceBlockTest (BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = True + + def run_test(self): + self.nodes[2].generate(101) + self.sync_all() + + self.test_no_blockhash() + self.test_invalid_blockhash() + self.test_reorg() + self.test_double_spend() + self.test_double_send() + + def test_no_blockhash(self): + txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) + blockhash, = self.nodes[2].generate(1) + self.sync_all() + + txs = self.nodes[0].listtransactions() + assert_array_result(txs, {"txid": txid}, { + "category": "receive", + "amount": 1, + "blockhash": blockhash, + "confirmations": 1, + }) + assert_equal( + self.nodes[0].listsinceblock(), + {"lastblock": blockhash, + "removed": [], + "transactions": txs}) + assert_equal( + self.nodes[0].listsinceblock(""), + {"lastblock": blockhash, + "removed": [], + "transactions": txs}) + + def test_invalid_blockhash(self): + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "0000000000000000000000000000000000000000000000000000000000000000") + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, + "invalid-hex") + + def test_reorg(self): + ''' + `listsinceblock` did not behave correctly when handed a block that was + no longer in the main chain: + + ab0 + / \ + aa1 [tx0] bb1 + | | + aa2 bb2 + | | + aa3 bb3 + | + bb4 + + Consider a client that has only seen block `aa3` above. It asks the node + to `listsinceblock aa3`. But at some point prior the main chain switched + to the bb chain. + + Previously: listsinceblock would find height=4 for block aa3 and compare + this to height=5 for the tip of the chain (bb4). It would then return + results restricted to bb3-bb4. + + Now: listsinceblock finds the fork at ab0 and returns results in the + range bb1-bb4. + + This test only checks that [tx0] is present. + ''' + + # Split network into two + self.split_network() + + # send to nodes[0] from nodes[2] + senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) + + # generate on both sides + lastblockhash = self.nodes[1].generate(6)[5] + self.nodes[2].generate(7) + self.log.info('lastblockhash=%s' % (lastblockhash)) + + self.sync_all([self.nodes[:2], self.nodes[2:]]) + + self.join_network() + + # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0] + lsbres = self.nodes[0].listsinceblock(lastblockhash) + found = False + for tx in lsbres['transactions']: + if tx['txid'] == senttx: + found = True + break + assert found + + def test_double_spend(self): + ''' + This tests the case where the same UTXO is spent twice on two separate + blocks as part of a reorg. + + ab0 + / \ + aa1 [tx1] bb1 [tx2] + | | + aa2 bb2 + | | + aa3 bb3 + | + bb4 + + Problematic case: + + 1. User 1 receives BTC in tx1 from utxo1 in block aa1. + 2. User 2 receives BTC in tx2 from utxo1 (same) in block bb1 + 3. User 1 sees 2 confirmations at block aa3. + 4. Reorg into bb chain. + 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now + invalidated. + + Currently the solution to this is to detect that a reorg'd block is + asked for in listsinceblock, and to iterate back over existing blocks up + until the fork point, and to include all transactions that relate to the + node wallet. + ''' + + self.sync_all() + + # Split network into two + self.split_network() + + # share utxo between nodes[1] and nodes[2] + utxos = self.nodes[2].listunspent() + utxo = utxos[0] + privkey = self.nodes[2].dumpprivkey(utxo['address']) + self.nodes[1].importprivkey(privkey) + + # send from nodes[1] using utxo to nodes[0] + change = '%.8f' % (float(utxo['amount']) - 1.0003) + recipientDict = { + self.nodes[0].getnewaddress(): 1, + self.nodes[1].getnewaddress(): change, + } + utxoDicts = [{ + 'txid': utxo['txid'], + 'vout': utxo['vout'], + }] + txid1 = self.nodes[1].sendrawtransaction( + self.nodes[1].signrawtransaction( + self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex']) + + # send from nodes[2] using utxo to nodes[3] + recipientDict2 = { + self.nodes[3].getnewaddress(): 1, + self.nodes[2].getnewaddress(): change, + } + self.nodes[2].sendrawtransaction( + self.nodes[2].signrawtransaction( + self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex']) + + # generate on both sides + lastblockhash = self.nodes[1].generate(3)[2] + self.nodes[2].generate(4) + + self.join_network() + + self.sync_all() + + # gettransaction should work for txid1 + assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1" + + # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] + lsbres = self.nodes[0].listsinceblock(lastblockhash) + assert any(tx['txid'] == txid1 for tx in lsbres['removed']) + + # but it should not include 'removed' if include_removed=false + lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False) + assert 'removed' not in lsbres2 + + def test_double_send(self): + ''' + This tests the case where the same transaction is submitted twice on two + separate blocks as part of a reorg. The former will vanish and the + latter will appear as the true transaction (with confirmations dropping + as a result). + + ab0 + / \ + aa1 [tx1] bb1 + | | + aa2 bb2 + | | + aa3 bb3 [tx1] + | + bb4 + + Asserted: + + 1. tx1 is listed in listsinceblock. + 2. It is included in 'removed' as it was removed, even though it is now + present in a different block. + 3. It is listed with a confirmations count of 2 (bb3, bb4), not + 3 (aa1, aa2, aa3). + ''' + + self.sync_all() + + # Split network into two + self.split_network() + + # create and sign a transaction + utxos = self.nodes[2].listunspent() + utxo = utxos[0] + change = '%.8f' % (float(utxo['amount']) - 1.0003) + recipientDict = { + self.nodes[0].getnewaddress(): 1, + self.nodes[2].getnewaddress(): change, + } + utxoDicts = [{ + 'txid': utxo['txid'], + 'vout': utxo['vout'], + }] + signedtxres = self.nodes[2].signrawtransaction( + self.nodes[2].createrawtransaction(utxoDicts, recipientDict)) + assert signedtxres['complete'] + + signedtx = signedtxres['hex'] + + # send from nodes[1]; this will end up in aa1 + txid1 = self.nodes[1].sendrawtransaction(signedtx) + + # generate bb1-bb2 on right side + self.nodes[2].generate(2) + + # send from nodes[2]; this will end up in bb3 + txid2 = self.nodes[2].sendrawtransaction(signedtx) + + assert_equal(txid1, txid2) + + # generate on both sides + lastblockhash = self.nodes[1].generate(3)[2] + self.nodes[2].generate(2) + + self.join_network() + + self.sync_all() + + # gettransaction should work for txid1 + self.nodes[0].gettransaction(txid1) + + # listsinceblock(lastblockhash) should now include txid1 in transactions + # as well as in removed + lsbres = self.nodes[0].listsinceblock(lastblockhash) + assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) + assert any(tx['txid'] == txid1 for tx in lsbres['removed']) + + # find transaction and ensure confirmations is valid + for tx in lsbres['transactions']: + if tx['txid'] == txid1: + assert_equal(tx['confirmations'], 2) + + # the same check for the removed array; confirmations should STILL be 2 + for tx in lsbres['removed']: + if tx['txid'] == txid1: + assert_equal(tx['confirmations'], 2) + +if __name__ == '__main__': + ListSinceBlockTest().main() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py new file mode 100755 index 0000000000..b07e451667 --- /dev/null +++ b/test/functional/wallet_multiwallet.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test multiwallet. + +Verify that a bitcoind node can load multiple wallet files +""" +import os +import shutil + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error + +class MultiWalletTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [['-wallet=w1', '-wallet=w2', '-wallet=w3', '-wallet=w'], []] + self.supports_cli = True + + def run_test(self): + node = self.nodes[0] + + data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p) + wallet_dir = lambda *p: data_dir('wallets', *p) + wallet = lambda name: node.get_wallet_rpc(name) + + assert_equal(set(node.listwallets()), {"w1", "w2", "w3", "w"}) + + self.stop_nodes() + + self.assert_start_raises_init_error(0, ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist') + self.assert_start_raises_init_error(0, ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir()) + self.assert_start_raises_init_error(0, ['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir()) + + # should not initialize if there are duplicate wallets + self.assert_start_raises_init_error(0, ['-wallet=w1', '-wallet=w1'], 'Error loading wallet w1. Duplicate -wallet filename specified.') + + # should not initialize if wallet file is a directory + os.mkdir(wallet_dir('w11')) + self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.') + + # should not initialize if one wallet is a copy of another + shutil.copyfile(wallet_dir('w2'), wallet_dir('w22')) + self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid') + + # should not initialize if wallet file is a symlink + os.symlink(wallet_dir('w1'), wallet_dir('w12')) + self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.') + + # should not initialize if the specified walletdir does not exist + self.assert_start_raises_init_error(0, ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist') + # should not initialize if the specified walletdir is not a directory + not_a_dir = wallet_dir('notadir') + open(not_a_dir, 'a').close() + self.assert_start_raises_init_error(0, ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory') + + # if wallets/ doesn't exist, datadir should be the default wallet dir + wallet_dir2 = data_dir('walletdir') + os.rename(wallet_dir(), wallet_dir2) + self.start_node(0, ['-wallet=w4', '-wallet=w5']) + assert_equal(set(node.listwallets()), {"w4", "w5"}) + w5 = wallet("w5") + w5.generate(1) + + # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded + os.rename(wallet_dir2, wallet_dir()) + self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()]) + assert_equal(set(node.listwallets()), {"w4", "w5"}) + w5 = wallet("w5") + w5_info = w5.getwalletinfo() + assert_equal(w5_info['immature_balance'], 50) + + competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir') + os.mkdir(competing_wallet_dir) + self.restart_node(0, ['-walletdir='+competing_wallet_dir]) + self.assert_start_raises_init_error(1, ['-walletdir='+competing_wallet_dir], 'Error initializing wallet database environment') + + self.restart_node(0, self.extra_args[0]) + + w1 = wallet("w1") + w2 = wallet("w2") + w3 = wallet("w3") + w4 = wallet("w") + wallet_bad = wallet("bad") + + w1.generate(1) + + # accessing invalid wallet fails + assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo) + + # accessing wallet RPC without using wallet endpoint fails + assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo) + + # check w1 wallet balance + w1_info = w1.getwalletinfo() + assert_equal(w1_info['immature_balance'], 50) + w1_name = w1_info['walletname'] + assert_equal(w1_name, "w1") + + # check w2 wallet balance + w2_info = w2.getwalletinfo() + assert_equal(w2_info['immature_balance'], 0) + w2_name = w2_info['walletname'] + assert_equal(w2_name, "w2") + + w3_name = w3.getwalletinfo()['walletname'] + assert_equal(w3_name, "w3") + + w4_name = w4.getwalletinfo()['walletname'] + assert_equal(w4_name, "w") + + w1.generate(101) + assert_equal(w1.getbalance(), 100) + assert_equal(w2.getbalance(), 0) + assert_equal(w3.getbalance(), 0) + assert_equal(w4.getbalance(), 0) + + w1.sendtoaddress(w2.getnewaddress(), 1) + w1.sendtoaddress(w3.getnewaddress(), 2) + w1.sendtoaddress(w4.getnewaddress(), 3) + w1.generate(1) + assert_equal(w2.getbalance(), 1) + assert_equal(w3.getbalance(), 2) + assert_equal(w4.getbalance(), 3) + + batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()]) + assert_equal(batch[0]["result"]["chain"], "regtest") + assert_equal(batch[1]["result"]["walletname"], "w1") + +if __name__ == '__main__': + MultiWalletTest().main() diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py new file mode 100755 index 0000000000..d959bb4c38 --- /dev/null +++ b/test/functional/wallet_resendwallettransactions.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test resendwallettransactions RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error + +class ResendWalletTransactionsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['--walletbroadcast=false']] + + def run_test(self): + # Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled. + assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions) + + # Should return an empty array if there aren't unconfirmed wallet transactions. + self.stop_node(0) + self.start_node(0, extra_args=[]) + assert_equal(self.nodes[0].resendwallettransactions(), []) + + # Should return an array with the unconfirmed wallet transaction. + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + assert_equal(self.nodes[0].resendwallettransactions(), [txid]) + +if __name__ == '__main__': + ResendWalletTransactionsTest().main() diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py new file mode 100755 index 0000000000..ce26d6e0ee --- /dev/null +++ b/test/functional/wallet_txn_clone.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class TxnMallTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + + def add_options(self, parser): + parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", + help="Test double-spend of 1-confirmed transaction") + parser.add_option("--segwit", dest="segwit", default=False, action="store_true", + help="Test behaviour with SegWit txn (which should fail") + + def setup_network(self): + # Start with split network: + super(TxnMallTest, self).setup_network() + disconnect_nodes(self.nodes[1], 2) + disconnect_nodes(self.nodes[2], 1) + + def run_test(self): + if self.options.segwit: + output_type="p2sh-segwit" + else: + output_type="legacy" + + # All nodes should start with 1,250 BTC: + starting_balance = 1250 + for i in range(4): + assert_equal(self.nodes[i].getbalance(), starting_balance) + self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! + + # Assign coins to foo and bar accounts: + self.nodes[0].settxfee(.001) + + node0_address_foo = self.nodes[0].getnewaddress("foo", output_type) + fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) + fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) + + node0_address_bar = self.nodes[0].getnewaddress("bar", output_type) + fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) + fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) + + assert_equal(self.nodes[0].getbalance(""), + starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) + + # Coins are sent to node1_address + node1_address = self.nodes[1].getnewaddress("from0") + + # Send tx1, and another transaction tx2 that won't be cloned + txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) + txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) + + # Construct a clone of tx1, to be malleated + rawtx1 = self.nodes[0].getrawtransaction(txid1,1) + clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}] + clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"], + rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]} + clone_locktime = rawtx1["locktime"] + clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime) + + # createrawtransaction randomizes the order of its outputs, so swap them if necessary. + # output 0 is at version+#inputs+input+sigstub+sequence+#outputs + # 40 BTC serialized is 00286bee00000000 + pos0 = 2*(4+1+36+1+4+1) + hex40 = "00286bee00000000" + output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0) + if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or + rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40): + output0 = clone_raw[pos0 : pos0 + output_len] + output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len] + clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:] + + # Use a different signature hash type to sign. This creates an equivalent but malleated clone. + # Don't send the clone anywhere yet + tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY") + assert_equal(tx1_clone["complete"], True) + + # Have node0 mine a block, if requested: + if (self.options.mine_block): + self.nodes[0].generate(1) + sync_blocks(self.nodes[0:2]) + + tx1 = self.nodes[0].gettransaction(txid1) + tx2 = self.nodes[0].gettransaction(txid2) + + # Node0's balance should be starting balance, plus 50BTC for another + # matured block, minus tx1 and tx2 amounts, and minus transaction fees: + expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] + if self.options.mine_block: expected += 50 + expected += tx1["amount"] + tx1["fee"] + expected += tx2["amount"] + tx2["fee"] + assert_equal(self.nodes[0].getbalance(), expected) + + # foo and bar accounts should be debited: + assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) + assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) + + if self.options.mine_block: + assert_equal(tx1["confirmations"], 1) + assert_equal(tx2["confirmations"], 1) + # Node1's "from0" balance should be both transaction amounts: + assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"])) + else: + assert_equal(tx1["confirmations"], 0) + assert_equal(tx2["confirmations"], 0) + + # Send clone and its parent to miner + self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) + txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) + if self.options.segwit: + assert_equal(txid1, txid1_clone) + return + + # ... mine a block... + self.nodes[2].generate(1) + + # Reconnect the split network, and sync chain: + connect_nodes(self.nodes[1], 2) + self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) + self.nodes[2].sendrawtransaction(tx2["hex"]) + self.nodes[2].generate(1) # Mine another block to make sure we sync + sync_blocks(self.nodes) + + # Re-fetch transaction info: + tx1 = self.nodes[0].gettransaction(txid1) + tx1_clone = self.nodes[0].gettransaction(txid1_clone) + tx2 = self.nodes[0].gettransaction(txid2) + + # Verify expected confirmations + assert_equal(tx1["confirmations"], -2) + assert_equal(tx1_clone["confirmations"], 2) + assert_equal(tx2["confirmations"], 1) + + # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, + # less possible orphaned matured subsidy + expected += 100 + if (self.options.mine_block): + expected -= 50 + assert_equal(self.nodes[0].getbalance(), expected) + assert_equal(self.nodes[0].getbalance("*", 0), expected) + + # Check node0's individual account balances. + # "foo" should have been debited by the equivalent clone of tx1 + assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) + # "bar" should have been debited by (possibly unconfirmed) tx2 + assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) + # "" should have starting balance, less funding txes, plus subsidies + assert_equal(self.nodes[0].getbalance("", 0), starting_balance + - 1219 + + fund_foo_tx["fee"] + - 29 + + fund_bar_tx["fee"] + + 100) + + # Node1's "from0" account balance + assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"])) + +if __name__ == '__main__': + TxnMallTest().main() + diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py new file mode 100755 index 0000000000..01129f3817 --- /dev/null +++ b/test/functional/wallet_txn_doublespend.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the wallet accounts properly when there is a double-spend conflict.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class TxnMallTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + + def add_options(self, parser): + parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", + help="Test double-spend of 1-confirmed transaction") + + def setup_network(self): + # Start with split network: + super().setup_network() + disconnect_nodes(self.nodes[1], 2) + disconnect_nodes(self.nodes[2], 1) + + def run_test(self): + # All nodes should start with 1,250 BTC: + starting_balance = 1250 + for i in range(4): + assert_equal(self.nodes[i].getbalance(), starting_balance) + self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! + + # Assign coins to foo and bar accounts: + node0_address_foo = self.nodes[0].getnewaddress("foo") + fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) + fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) + + node0_address_bar = self.nodes[0].getnewaddress("bar") + fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) + fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) + + assert_equal(self.nodes[0].getbalance(""), + starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) + + # Coins are sent to node1_address + node1_address = self.nodes[1].getnewaddress("from0") + + # First: use raw transaction API to send 1240 BTC to node1_address, + # but don't broadcast: + doublespend_fee = Decimal('-.02') + rawtx_input_0 = {} + rawtx_input_0["txid"] = fund_foo_txid + rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) + rawtx_input_1 = {} + rawtx_input_1["txid"] = fund_bar_txid + rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) + inputs = [rawtx_input_0, rawtx_input_1] + change_address = self.nodes[0].getnewaddress() + outputs = {} + outputs[node1_address] = 1240 + outputs[change_address] = 1248 - 1240 + doublespend_fee + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + doublespend = self.nodes[0].signrawtransaction(rawtx) + assert_equal(doublespend["complete"], True) + + # Create two spends using 1 50 BTC coin each + txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) + txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) + + # Have node0 mine a block: + if (self.options.mine_block): + self.nodes[0].generate(1) + sync_blocks(self.nodes[0:2]) + + tx1 = self.nodes[0].gettransaction(txid1) + tx2 = self.nodes[0].gettransaction(txid2) + + # Node0's balance should be starting balance, plus 50BTC for another + # matured block, minus 40, minus 20, and minus transaction fees: + expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] + if self.options.mine_block: expected += 50 + expected += tx1["amount"] + tx1["fee"] + expected += tx2["amount"] + tx2["fee"] + assert_equal(self.nodes[0].getbalance(), expected) + + # foo and bar accounts should be debited: + assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"]) + assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"]) + + if self.options.mine_block: + assert_equal(tx1["confirmations"], 1) + assert_equal(tx2["confirmations"], 1) + # Node1's "from0" balance should be both transaction amounts: + assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"])) + else: + assert_equal(tx1["confirmations"], 0) + assert_equal(tx2["confirmations"], 0) + + # Now give doublespend and its parents to miner: + self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) + self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) + doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) + # ... mine a block... + self.nodes[2].generate(1) + + # Reconnect the split network, and sync chain: + connect_nodes(self.nodes[1], 2) + self.nodes[2].generate(1) # Mine another block to make sure we sync + sync_blocks(self.nodes) + assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2) + + # Re-fetch transaction info: + tx1 = self.nodes[0].gettransaction(txid1) + tx2 = self.nodes[0].gettransaction(txid2) + + # Both transactions should be conflicted + assert_equal(tx1["confirmations"], -2) + assert_equal(tx2["confirmations"], -2) + + # Node0's total balance should be starting balance, plus 100BTC for + # two more matured blocks, minus 1240 for the double-spend, plus fees (which are + # negative): + expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee + assert_equal(self.nodes[0].getbalance(), expected) + assert_equal(self.nodes[0].getbalance("*"), expected) + + # Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies + + # fees (which are negative) + assert_equal(self.nodes[0].getbalance("foo"), 1219) + assert_equal(self.nodes[0].getbalance("bar"), 29) + assert_equal(self.nodes[0].getbalance(""), starting_balance + -1219 + - 29 + -1240 + + 100 + + fund_foo_tx["fee"] + + fund_bar_tx["fee"] + + doublespend_fee) + + # Node1's "from0" account balance should be just the doublespend: + assert_equal(self.nodes[1].getbalance("from0"), 1240) + +if __name__ == '__main__': + TxnMallTest().main() + diff --git a/test/functional/wallet_zapwallettxes.py b/test/functional/wallet_zapwallettxes.py new file mode 100755 index 0000000000..08afb87894 --- /dev/null +++ b/test/functional/wallet_zapwallettxes.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the zapwallettxes functionality. + +- start two bitcoind nodes +- create two transactions on node 0 - one is confirmed and one is unconfirmed. +- restart node 0 and verify that both the confirmed and the unconfirmed + transactions are still available. +- restart node 0 with zapwallettxes and persistmempool, and verify that both + the confirmed and the unconfirmed transactions are still available. +- restart node 0 with just zapwallettxes and verify that the confirmed + transactions are still available, but that the unconfirmed transaction has + been zapped. +""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + wait_until, +) + +class ZapWalletTXesTest (BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + + def run_test(self): + self.log.info("Mining blocks...") + self.nodes[0].generate(1) + self.sync_all() + self.nodes[1].generate(100) + self.sync_all() + + # This transaction will be confirmed + txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10) + + self.nodes[0].generate(1) + self.sync_all() + + # This transaction will not be confirmed + txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20) + + # Confirmed and unconfirmed transactions are now in the wallet. + assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) + assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) + + # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet. + self.stop_node(0) + self.start_node(0) + + assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) + assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) + + # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed + # transaction is zapped from the wallet, but is re-added when the mempool is reloaded. + self.stop_node(0) + self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"]) + + wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3) + + assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) + assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) + + # Stop node0 and restart with zapwallettxes, but not persistmempool. + # The unconfirmed transaction is zapped and is no longer in the wallet. + self.stop_node(0) + self.start_node(0, ["-zapwallettxes=2"]) + + # tx1 is still be available because it was confirmed + assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) + + # This will raise an exception because the unconfirmed transaction has been zapped + assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2) + +if __name__ == '__main__': + ZapWalletTXesTest().main() diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py deleted file mode 100755 index b4be7debb5..0000000000 --- a/test/functional/walletbackup.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet backup features. - -Test case is: -4 nodes. 1 2 and 3 send transactions between each other, -fourth node is a miner. -1 2 3 each mine a block to start, then -Miner creates 100 blocks so 1 2 3 each have 50 mature -coins to spend. -Then 5 iterations of 1/2/3 sending coins amongst -themselves to get transactions in the wallets, -and the miner mining one block. - -Wallets are backed up using dumpwallet/backupwallet. -Then 5 more iterations of transactions and mining a block. - -Miner then generates 101 more blocks, so any -transaction fees paid mature. - -Sanity check: - Sum(1,2,3,4 balances) == 114*50 - -1/2/3 are shutdown, and their wallets erased. -Then restore using wallet.dat backup. And -confirm 1/2/3/4 balances are same as before. - -Shutdown again, restore using importwallet, -and confirm again balances are correct. -""" -from random import randint -import shutil - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class WalletBackupTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - # nodes 1, 2,3 are spenders, let's give them a keypool=100 - self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []] - - def setup_network(self, split=False): - self.setup_nodes() - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[1], 3) - connect_nodes(self.nodes[2], 3) - connect_nodes(self.nodes[2], 0) - self.sync_all() - - def one_send(self, from_node, to_address): - if (randint(1,2) == 1): - amount = Decimal(randint(1,10)) / Decimal(10) - self.nodes[from_node].sendtoaddress(to_address, amount) - - def do_one_round(self): - a0 = self.nodes[0].getnewaddress() - a1 = self.nodes[1].getnewaddress() - a2 = self.nodes[2].getnewaddress() - - self.one_send(0, a1) - self.one_send(0, a2) - self.one_send(1, a0) - self.one_send(1, a2) - self.one_send(2, a0) - self.one_send(2, a1) - - # Have the miner (node3) mine a block. - # Must sync mempools before mining. - sync_mempools(self.nodes) - self.nodes[3].generate(1) - sync_blocks(self.nodes) - - # As above, this mirrors the original bash test. - def start_three(self): - self.start_node(0) - self.start_node(1) - self.start_node(2) - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[1], 3) - connect_nodes(self.nodes[2], 3) - connect_nodes(self.nodes[2], 0) - - def stop_three(self): - self.stop_node(0) - self.stop_node(1) - self.stop_node(2) - - def erase_three(self): - os.remove(self.options.tmpdir + "/node0/regtest/wallets/wallet.dat") - os.remove(self.options.tmpdir + "/node1/regtest/wallets/wallet.dat") - os.remove(self.options.tmpdir + "/node2/regtest/wallets/wallet.dat") - - def run_test(self): - self.log.info("Generating initial blockchain") - self.nodes[0].generate(1) - sync_blocks(self.nodes) - self.nodes[1].generate(1) - sync_blocks(self.nodes) - self.nodes[2].generate(1) - sync_blocks(self.nodes) - self.nodes[3].generate(100) - sync_blocks(self.nodes) - - assert_equal(self.nodes[0].getbalance(), 50) - assert_equal(self.nodes[1].getbalance(), 50) - assert_equal(self.nodes[2].getbalance(), 50) - assert_equal(self.nodes[3].getbalance(), 0) - - self.log.info("Creating transactions") - # Five rounds of sending each other transactions. - for i in range(5): - self.do_one_round() - - self.log.info("Backing up") - tmpdir = self.options.tmpdir - self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") - self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") - self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak") - self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump") - self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") - self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") - - self.log.info("More transactions") - for i in range(5): - self.do_one_round() - - # Generate 101 more blocks, so any fees paid mature - self.nodes[3].generate(101) - self.sync_all() - - balance0 = self.nodes[0].getbalance() - balance1 = self.nodes[1].getbalance() - balance2 = self.nodes[2].getbalance() - balance3 = self.nodes[3].getbalance() - total = balance0 + balance1 + balance2 + balance3 - - # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) - # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. - assert_equal(total, 5700) - - ## - # Test restoring spender wallets from backups - ## - self.log.info("Restoring using wallet.dat") - self.stop_three() - self.erase_three() - - # Start node2 with no chain - shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") - shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") - - # Restore wallets from backup - shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallets/wallet.dat") - shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallets/wallet.dat") - shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallets/wallet.dat") - - self.log.info("Re-starting nodes") - self.start_three() - sync_blocks(self.nodes) - - assert_equal(self.nodes[0].getbalance(), balance0) - assert_equal(self.nodes[1].getbalance(), balance1) - assert_equal(self.nodes[2].getbalance(), balance2) - - self.log.info("Restoring using dumped wallet") - self.stop_three() - self.erase_three() - - #start node2 with no chain - shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") - shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") - - self.start_three() - - assert_equal(self.nodes[0].getbalance(), 0) - assert_equal(self.nodes[1].getbalance(), 0) - assert_equal(self.nodes[2].getbalance(), 0) - - self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump") - self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump") - self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump") - - sync_blocks(self.nodes) - - assert_equal(self.nodes[0].getbalance(), balance0) - assert_equal(self.nodes[1].getbalance(), balance1) - assert_equal(self.nodes[2].getbalance(), balance2) - - # Backup to source wallet file must fail - sourcePaths = [ - tmpdir + "/node0/regtest/wallets/wallet.dat", - tmpdir + "/node0/./regtest/wallets/wallet.dat", - tmpdir + "/node0/regtest/wallets/", - tmpdir + "/node0/regtest/wallets"] - - for sourcePath in sourcePaths: - assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath) - - -if __name__ == '__main__': - WalletBackupTest().main() diff --git a/test/functional/zapwallettxes.py b/test/functional/zapwallettxes.py deleted file mode 100755 index 08afb87894..0000000000 --- a/test/functional/zapwallettxes.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the zapwallettxes functionality. - -- start two bitcoind nodes -- create two transactions on node 0 - one is confirmed and one is unconfirmed. -- restart node 0 and verify that both the confirmed and the unconfirmed - transactions are still available. -- restart node 0 with zapwallettxes and persistmempool, and verify that both - the confirmed and the unconfirmed transactions are still available. -- restart node 0 with just zapwallettxes and verify that the confirmed - transactions are still available, but that the unconfirmed transaction has - been zapped. -""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - wait_until, -) - -class ZapWalletTXesTest (BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - - def run_test(self): - self.log.info("Mining blocks...") - self.nodes[0].generate(1) - self.sync_all() - self.nodes[1].generate(100) - self.sync_all() - - # This transaction will be confirmed - txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10) - - self.nodes[0].generate(1) - self.sync_all() - - # This transaction will not be confirmed - txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20) - - # Confirmed and unconfirmed transactions are now in the wallet. - assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) - assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - - # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet. - self.stop_node(0) - self.start_node(0) - - assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) - assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - - # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed - # transaction is zapped from the wallet, but is re-added when the mempool is reloaded. - self.stop_node(0) - self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"]) - - wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3) - - assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) - assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2) - - # Stop node0 and restart with zapwallettxes, but not persistmempool. - # The unconfirmed transaction is zapped and is no longer in the wallet. - self.stop_node(0) - self.start_node(0, ["-zapwallettxes=2"]) - - # tx1 is still be available because it was confirmed - assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1) - - # This will raise an exception because the unconfirmed transaction has been zapped - assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2) - -if __name__ == '__main__': - ZapWalletTXesTest().main() -- cgit v1.2.3 From 61b8f7f273022d3163f998ff5d66d53ca0460c8b Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:29 +1000 Subject: [tests] Rename p2p_* functional tests. --- test/functional/README.md | 4 +- test/functional/disconnect_ban.py | 107 -- test/functional/invalidblockrequest.py | 117 -- test/functional/invalidtxrequest.py | 73 - test/functional/node_network_limited.py | 57 - test/functional/p2p-acceptblock.py | 323 ----- test/functional/p2p-compactblocks.py | 921 ------------- test/functional/p2p-feefilter.py | 93 -- test/functional/p2p-fingerprint.py | 152 --- test/functional/p2p-leaktests.py | 153 --- test/functional/p2p-mempool.py | 35 - test/functional/p2p-segwit.py | 1952 --------------------------- test/functional/p2p-timeouts.py | 75 - test/functional/p2p_compactblocks.py | 921 +++++++++++++ test/functional/p2p_disconnect_ban.py | 107 ++ test/functional/p2p_feefilter.py | 93 ++ test/functional/p2p_fingerprint.py | 152 +++ test/functional/p2p_invalid_block.py | 117 ++ test/functional/p2p_invalid_tx.py | 73 + test/functional/p2p_leak.py | 153 +++ test/functional/p2p_mempool.py | 35 + test/functional/p2p_node_network_limited.py | 57 + test/functional/p2p_segwit.py | 1952 +++++++++++++++++++++++++++ test/functional/p2p_sendheaders.py | 598 ++++++++ test/functional/p2p_timeouts.py | 75 + test/functional/p2p_unrequested_blocks.py | 323 +++++ test/functional/sendheaders.py | 598 -------- test/functional/test_runner.py | 28 +- 28 files changed, 4672 insertions(+), 4672 deletions(-) delete mode 100755 test/functional/disconnect_ban.py delete mode 100755 test/functional/invalidblockrequest.py delete mode 100755 test/functional/invalidtxrequest.py delete mode 100755 test/functional/node_network_limited.py delete mode 100755 test/functional/p2p-acceptblock.py delete mode 100755 test/functional/p2p-compactblocks.py delete mode 100755 test/functional/p2p-feefilter.py delete mode 100755 test/functional/p2p-fingerprint.py delete mode 100755 test/functional/p2p-leaktests.py delete mode 100755 test/functional/p2p-mempool.py delete mode 100755 test/functional/p2p-segwit.py delete mode 100755 test/functional/p2p-timeouts.py create mode 100755 test/functional/p2p_compactblocks.py create mode 100755 test/functional/p2p_disconnect_ban.py create mode 100755 test/functional/p2p_feefilter.py create mode 100755 test/functional/p2p_fingerprint.py create mode 100755 test/functional/p2p_invalid_block.py create mode 100755 test/functional/p2p_invalid_tx.py create mode 100755 test/functional/p2p_leak.py create mode 100755 test/functional/p2p_mempool.py create mode 100755 test/functional/p2p_node_network_limited.py create mode 100755 test/functional/p2p_segwit.py create mode 100755 test/functional/p2p_sendheaders.py create mode 100755 test/functional/p2p_timeouts.py create mode 100755 test/functional/p2p_unrequested_blocks.py delete mode 100755 test/functional/sendheaders.py (limited to 'test') diff --git a/test/functional/README.md b/test/functional/README.md index b3d34c72fd..662b4b44d5 100644 --- a/test/functional/README.md +++ b/test/functional/README.md @@ -87,7 +87,7 @@ start the networking thread. (Continue with the test logic in your existing thread.) - Can be used to write tests where specific P2P protocol behavior is tested. -Examples tests are `p2p-acceptblock.py`, `p2p-compactblocks.py`. +Examples tests are `p2p_unrequested_blocks.py`, `p2p_compactblocks.py`. #### Comptool @@ -133,7 +133,7 @@ Each `TestInstance` consists of: acceptance is tested against the given outcome. - For examples of tests written in this framework, see - `invalidblockrequest.py` and `feature_block.py`. + `p2p_invalid_block.py` and `feature_block.py`. ### test-framework modules diff --git a/test/functional/disconnect_ban.py b/test/functional/disconnect_ban.py deleted file mode 100755 index c6067befb2..0000000000 --- a/test/functional/disconnect_ban.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node disconnect and ban behavior""" -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - connect_nodes_bi, - wait_until, -) - -class DisconnectBanTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - - def run_test(self): - self.log.info("Test setban and listbanned RPCs") - - self.log.info("setban: successfully ban single IP address") - assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point - self.nodes[1].setban("127.0.0.1", "add") - wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) - assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point - assert_equal(len(self.nodes[1].listbanned()), 1) - - self.log.info("clearbanned: successfully clear ban list") - self.nodes[1].clearbanned() - assert_equal(len(self.nodes[1].listbanned()), 0) - self.nodes[1].setban("127.0.0.0/24", "add") - - self.log.info("setban: fail to ban an already banned subnet") - assert_equal(len(self.nodes[1].listbanned()), 1) - assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") - - self.log.info("setban: fail to ban an invalid subnet") - assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") - assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24 - - self.log.info("setban remove: fail to unban a non-banned subnet") - assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") - assert_equal(len(self.nodes[1].listbanned()), 1) - - self.log.info("setban remove: successfully unban subnet") - self.nodes[1].setban("127.0.0.0/24", "remove") - assert_equal(len(self.nodes[1].listbanned()), 0) - self.nodes[1].clearbanned() - assert_equal(len(self.nodes[1].listbanned()), 0) - - self.log.info("setban: test persistence across node restart") - self.nodes[1].setban("127.0.0.0/32", "add") - self.nodes[1].setban("127.0.0.0/24", "add") - # Set the mocktime so we can control when bans expire - old_time = int(time.time()) - self.nodes[1].setmocktime(old_time) - self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds - self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds - listBeforeShutdown = self.nodes[1].listbanned() - assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) - # Move time forward by 3 seconds so the third ban has expired - self.nodes[1].setmocktime(old_time + 3) - assert_equal(len(self.nodes[1].listbanned()), 3) - - self.stop_node(1) - self.start_node(1) - - listAfterShutdown = self.nodes[1].listbanned() - assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) - assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) - assert_equal("/19" in listAfterShutdown[2]['address'], True) - - # Clear ban lists - self.nodes[1].clearbanned() - connect_nodes_bi(self.nodes, 0, 1) - - self.log.info("Test disconnectnode RPCs") - - self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid") - address1 = self.nodes[0].getpeerinfo()[0]['addr'] - node1 = self.nodes[0].getpeerinfo()[0]['addr'] - assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) - - self.log.info("disconnectnode: fail to disconnect when calling with junk address") - assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street") - - self.log.info("disconnectnode: successfully disconnect node by address") - address1 = self.nodes[0].getpeerinfo()[0]['addr'] - self.nodes[0].disconnectnode(address=address1) - wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) - assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] - - self.log.info("disconnectnode: successfully reconnect node") - connect_nodes_bi(self.nodes, 0, 1) # reconnect the node - assert_equal(len(self.nodes[0].getpeerinfo()), 2) - assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] - - self.log.info("disconnectnode: successfully disconnect node by node id") - id1 = self.nodes[0].getpeerinfo()[0]['id'] - self.nodes[0].disconnectnode(nodeid=id1) - wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) - assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1] - -if __name__ == '__main__': - DisconnectBanTest().main() diff --git a/test/functional/invalidblockrequest.py b/test/functional/invalidblockrequest.py deleted file mode 100755 index edcade63c1..0000000000 --- a/test/functional/invalidblockrequest.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node responses to invalid blocks. - -In this test we connect to one node over p2p, and test block requests: -1) Valid blocks should be requested and become chain tip. -2) Invalid block with duplicated transaction should be re-requested. -3) Invalid block with bad coinbase value should be rejected and not -re-requested. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * -from test_framework.mininode import network_thread_start -import copy -import time - -# Use the ComparisonTestFramework with 1 node: only use --testbinary. -class InvalidBlockRequestTest(ComparisonTestFramework): - - ''' Can either run this test as 1 node with expected answers, or two and compare them. - Change the "outcome" variable from each TestInstance object to only do the comparison. ''' - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - self.tip = None - self.block_time = None - network_thread_start() - test.run() - - def get_tests(self): - if self.tip is None: - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.block_time = int(time.time())+1 - - ''' - Create a new block with an anyone-can-spend coinbase - ''' - height = 1 - block = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - block.solve() - # Save the coinbase for later - self.block1 = block - self.tip = block.sha256 - height += 1 - yield TestInstance([[block, True]]) - - ''' - Now we need that block to mature so we can spend the coinbase. - ''' - test = TestInstance(sync_every_block=False) - for i in range(100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.solve() - self.tip = block.sha256 - self.block_time += 1 - test.blocks_and_transactions.append([block, True]) - height += 1 - yield test - - ''' - Now we use merkle-root malleability to generate an invalid block with - same blockheader. - Manufacture a block with 3 transactions (coinbase, spend of prior - coinbase, spend of that spend). Duplicate the 3rd transaction to - leave merkle root and blockheader unchanged but invalidate the block. - ''' - block2 = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - - # b'0x51' is OP_TRUE - tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN) - tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN) - - block2.vtx.extend([tx1, tx2]) - block2.hashMerkleRoot = block2.calc_merkle_root() - block2.rehash() - block2.solve() - orig_hash = block2.sha256 - block2_orig = copy.deepcopy(block2) - - # Mutate block 2 - block2.vtx.append(tx2) - assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) - assert_equal(orig_hash, block2.rehash()) - assert(block2_orig.vtx != block2.vtx) - - self.tip = block2.sha256 - yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]]) - height += 1 - - ''' - Make sure that a totally screwed up block is not valid. - ''' - block3 = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! - block3.vtx[0].sha256=None - block3.vtx[0].calc_sha256() - block3.hashMerkleRoot = block3.calc_merkle_root() - block3.rehash() - block3.solve() - - yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]]) - - -if __name__ == '__main__': - InvalidBlockRequestTest().main() diff --git a/test/functional/invalidtxrequest.py b/test/functional/invalidtxrequest.py deleted file mode 100755 index 9c1100e070..0000000000 --- a/test/functional/invalidtxrequest.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node responses to invalid transactions. - -In this test we connect to one node over p2p, and test tx requests. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * -import time - - - -# Use the ComparisonTestFramework with 1 node: only use --testbinary. -class InvalidTxRequestTest(ComparisonTestFramework): - - ''' Can either run this test as 1 node with expected answers, or two and compare them. - Change the "outcome" variable from each TestInstance object to only do the comparison. ''' - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - self.tip = None - self.block_time = None - network_thread_start() - test.run() - - def get_tests(self): - if self.tip is None: - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.block_time = int(time.time())+1 - - ''' - Create a new block with an anyone-can-spend coinbase - ''' - height = 1 - block = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - block.solve() - # Save the coinbase for later - self.block1 = block - self.tip = block.sha256 - height += 1 - yield TestInstance([[block, True]]) - - ''' - Now we need that block to mature so we can spend the coinbase. - ''' - test = TestInstance(sync_every_block=False) - for i in range(100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.solve() - self.tip = block.sha256 - self.block_time += 1 - test.blocks_and_transactions.append([block, True]) - height += 1 - yield test - - # b'\x64' is OP_NOTIF - # Transaction will be rejected with code 16 (REJECT_INVALID) - tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000) - yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]]) - - # TODO: test further transactions... - -if __name__ == '__main__': - InvalidTxRequestTest().main() diff --git a/test/functional/node_network_limited.py b/test/functional/node_network_limited.py deleted file mode 100755 index 70415e0168..0000000000 --- a/test/functional/node_network_limited.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Tests NODE_NETWORK_LIMITED. - -Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly -and that it responds to getdata requests for blocks correctly: - - send a block within 288 + 2 of the tip - - disconnect peers who request blocks older than that.""" -from test_framework.messages import CInv, msg_getdata -from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS, NetworkThread, P2PInterface -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class P2PIgnoreInv(P2PInterface): - def on_inv(self, message): - # The node will send us invs for other blocks. Ignore them. - pass - - def send_getdata_for_block(self, blockhash): - getdata_request = msg_getdata() - getdata_request.inv.append(CInv(2, int(blockhash, 16))) - self.send_message(getdata_request) - -class NodeNetworkLimitedTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [['-prune=550']] - - def run_test(self): - node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) - NetworkThread().start() - node.wait_for_verack() - - expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED - - self.log.info("Check that node has signalled expected services.") - assert_equal(node.nServices, expected_services) - - self.log.info("Check that the localservices is as expected.") - assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services) - - self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") - blocks = self.nodes[0].generate(292) - - self.log.info("Make sure we can max retrive block at tip-288.") - node.send_getdata_for_block(blocks[1]) # last block in valid range - node.wait_for_block(int(blocks[1], 16), timeout=3) - - self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).") - node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit - node.wait_for_disconnect(5) - -if __name__ == '__main__': - NodeNetworkLimitedTest().main() diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py deleted file mode 100755 index 672626f15b..0000000000 --- a/test/functional/p2p-acceptblock.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test processing of unrequested blocks. - -Setup: two nodes, node0+node1, not connected to each other. Node1 will have -nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. - -We have one P2PInterface connection to node0 called test_node, and one to node1 -called min_work_node. - -The test: -1. Generate one block on each node, to leave IBD. - -2. Mine a new block on each tip, and deliver to each node from node's peer. - The tip should advance for node0, but node1 should skip processing due to - nMinimumChainWork. - -Node1 is unused in tests 3-7: - -3. Mine a block that forks from the genesis block, and deliver to test_node. - Node0 should not process this block (just accept the header), because it - is unrequested and doesn't have more or equal work to the tip. - -4a,b. Send another two blocks that build on the forking block. - Node0 should process the second block but be stuck on the shorter chain, - because it's missing an intermediate block. - -4c.Send 288 more blocks on the longer chain (the number of blocks ahead - we currently store). - Node0 should process all but the last block (too far ahead in height). - -5. Send a duplicate of the block in #3 to Node0. - Node0 should not process the block because it is unrequested, and stay on - the shorter chain. - -6. Send Node0 an inv for the height 3 block produced in #4 above. - Node0 should figure out that Node0 has the missing height 2 block and send a - getdata. - -7. Send Node0 the missing block again. - Node0 should process and the tip should advance. - -8. Create a fork which is invalid at a height longer than the current chain - (ie to which the node will try to reorg) but which has headers built on top - of the invalid block. Check that we get disconnected if we send more headers - on the chain the node now knows to be invalid. - -9. Test Node1 is able to sync when connected to node0 (which should have sufficient - work on its chain). -""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import time -from test_framework.blocktools import create_block, create_coinbase, create_transaction - -class AcceptBlockTest(BitcoinTestFramework): - def add_options(self, parser): - parser.add_option("--testbinary", dest="testbinary", - default=os.getenv("BITCOIND", "bitcoind"), - help="bitcoind binary to test") - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [[], ["-minimumchainwork=0x10"]] - - def setup_network(self): - # Node0 will be used to test behavior of processing unrequested blocks - # from peers which are not whitelisted, while Node1 will be used for - # the whitelisted case. - # Node2 will be used for non-whitelisted peers to test the interaction - # with nMinimumChainWork. - self.setup_nodes() - - def run_test(self): - # Setup the p2p connections and start up the network thread. - # test_node connects to node0 (not whitelisted) - test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - # min_work_node connects to node1 (whitelisted) - min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # Test logic begins here - test_node.wait_for_verack() - min_work_node.wait_for_verack() - - # 1. Have nodes mine a block (leave IBD) - [ n.generate(1) for n in self.nodes ] - tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] - - # 2. Send one block that builds on each tip. - # This should be accepted by node0 - blocks_h2 = [] # the height 2 blocks on each node's chain - block_time = int(time.time()) + 1 - for i in range(2): - blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) - blocks_h2[i].solve() - block_time += 1 - test_node.send_message(msg_block(blocks_h2[0])) - min_work_node.send_message(msg_block(blocks_h2[1])) - - for x in [test_node, min_work_node]: - x.sync_with_ping() - assert_equal(self.nodes[0].getblockcount(), 2) - assert_equal(self.nodes[1].getblockcount(), 1) - self.log.info("First height 2 block accepted by node0; correctly rejected by node1") - - # 3. Send another block that builds on genesis. - block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) - block_time += 1 - block_h1f.solve() - test_node.send_message(msg_block(block_h1f)) - - test_node.sync_with_ping() - tip_entry_found = False - for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h1f.hash: - assert_equal(x['status'], "headers-only") - tip_entry_found = True - assert(tip_entry_found) - assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) - - # 4. Send another two block that build on the fork. - block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) - block_time += 1 - block_h2f.solve() - test_node.send_message(msg_block(block_h2f)) - - test_node.sync_with_ping() - # Since the earlier block was not processed by node, the new block - # can't be fully validated. - tip_entry_found = False - for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h2f.hash: - assert_equal(x['status'], "headers-only") - tip_entry_found = True - assert(tip_entry_found) - - # But this block should be accepted by node since it has equal work. - self.nodes[0].getblock(block_h2f.hash) - self.log.info("Second height 2 block accepted, but not reorg'ed to") - - # 4b. Now send another block that builds on the forking chain. - block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) - block_h3.solve() - test_node.send_message(msg_block(block_h3)) - - test_node.sync_with_ping() - # Since the earlier block was not processed by node, the new block - # can't be fully validated. - tip_entry_found = False - for x in self.nodes[0].getchaintips(): - if x['hash'] == block_h3.hash: - assert_equal(x['status'], "headers-only") - tip_entry_found = True - assert(tip_entry_found) - self.nodes[0].getblock(block_h3.hash) - - # But this block should be accepted by node since it has more work. - self.nodes[0].getblock(block_h3.hash) - self.log.info("Unrequested more-work block accepted") - - # 4c. Now mine 288 more blocks and deliver; all should be processed but - # the last (height-too-high) on node (as long as its not missing any headers) - tip = block_h3 - all_blocks = [] - for i in range(288): - next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) - next_block.solve() - all_blocks.append(next_block) - tip = next_block - - # Now send the block at height 5 and check that it wasn't accepted (missing header) - test_node.send_message(msg_block(all_blocks[1])) - test_node.sync_with_ping() - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) - - # The block at height 5 should be accepted if we provide the missing header, though - headers_message = msg_headers() - headers_message.headers.append(CBlockHeader(all_blocks[0])) - test_node.send_message(headers_message) - test_node.send_message(msg_block(all_blocks[1])) - test_node.sync_with_ping() - self.nodes[0].getblock(all_blocks[1].hash) - - # Now send the blocks in all_blocks - for i in range(288): - test_node.send_message(msg_block(all_blocks[i])) - test_node.sync_with_ping() - - # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead - for x in all_blocks[:-1]: - self.nodes[0].getblock(x.hash) - assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) - - # 5. Test handling of unrequested block on the node that didn't process - # Should still not be processed (even though it has a child that has more - # work). - - # The node should have requested the blocks at some point, so - # disconnect/reconnect first - - self.nodes[0].disconnect_p2ps() - self.nodes[1].disconnect_p2ps() - network_thread_join() - - test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - network_thread_start() - test_node.wait_for_verack() - - test_node.send_message(msg_block(block_h1f)) - - test_node.sync_with_ping() - assert_equal(self.nodes[0].getblockcount(), 2) - self.log.info("Unrequested block that would complete more-work chain was ignored") - - # 6. Try to get node to request the missing block. - # Poke the node with an inv for block at height 3 and see if that - # triggers a getdata on block 2 (it should if block 2 is missing). - with mininode_lock: - # Clear state so we can check the getdata request - test_node.last_message.pop("getdata", None) - test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) - - test_node.sync_with_ping() - with mininode_lock: - getdata = test_node.last_message["getdata"] - - # Check that the getdata includes the right block - assert_equal(getdata.inv[0].hash, block_h1f.sha256) - self.log.info("Inv at tip triggered getdata for unprocessed block") - - # 7. Send the missing block for the third time (now it is requested) - test_node.send_message(msg_block(block_h1f)) - - test_node.sync_with_ping() - assert_equal(self.nodes[0].getblockcount(), 290) - self.nodes[0].getblock(all_blocks[286].hash) - assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) - assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) - self.log.info("Successfully reorged to longer chain from non-whitelisted peer") - - # 8. Create a chain which is invalid at a height longer than the - # current chain, but which has more blocks on top of that - block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) - block_289f.solve() - block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) - block_290f.solve() - block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) - # block_291 spends a coinbase below maturity! - block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) - block_291.hashMerkleRoot = block_291.calc_merkle_root() - block_291.solve() - block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) - block_292.solve() - - # Now send all the headers on the chain and enough blocks to trigger reorg - headers_message = msg_headers() - headers_message.headers.append(CBlockHeader(block_289f)) - headers_message.headers.append(CBlockHeader(block_290f)) - headers_message.headers.append(CBlockHeader(block_291)) - headers_message.headers.append(CBlockHeader(block_292)) - test_node.send_message(headers_message) - - test_node.sync_with_ping() - tip_entry_found = False - for x in self.nodes[0].getchaintips(): - if x['hash'] == block_292.hash: - assert_equal(x['status'], "headers-only") - tip_entry_found = True - assert(tip_entry_found) - assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) - - test_node.send_message(msg_block(block_289f)) - test_node.send_message(msg_block(block_290f)) - - test_node.sync_with_ping() - self.nodes[0].getblock(block_289f.hash) - self.nodes[0].getblock(block_290f.hash) - - test_node.send_message(msg_block(block_291)) - - # At this point we've sent an obviously-bogus block, wait for full processing - # without assuming whether we will be disconnected or not - try: - # Only wait a short while so the test doesn't take forever if we do get - # disconnected - test_node.sync_with_ping(timeout=1) - except AssertionError: - test_node.wait_for_disconnect() - - self.nodes[0].disconnect_p2ps() - test_node = self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - test_node.wait_for_verack() - - # We should have failed reorg and switched back to 290 (but have block 291) - assert_equal(self.nodes[0].getblockcount(), 290) - assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) - assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) - - # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected - block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) - block_293.solve() - headers_message = msg_headers() - headers_message.headers.append(CBlockHeader(block_293)) - test_node.send_message(headers_message) - test_node.wait_for_disconnect() - - # 9. Connect node1 to node0 and ensure it is able to sync - connect_nodes(self.nodes[0], 1) - sync_blocks([self.nodes[0], self.nodes[1]]) - self.log.info("Successfully synced nodes 1 and 0") - -if __name__ == '__main__': - AcceptBlockTest().main() diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py deleted file mode 100755 index d9f461a049..0000000000 --- a/test/functional/p2p-compactblocks.py +++ /dev/null @@ -1,921 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test compact blocks (BIP 152). - -Version 1 compact blocks are pre-segwit (txids) -Version 2 compact blocks are post-segwit (wtxids) -""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment -from test_framework.script import CScript, OP_TRUE - -# TestNode: A peer we use to send messages to bitcoind, and store responses. -class TestNode(P2PInterface): - def __init__(self): - super().__init__() - self.last_sendcmpct = [] - self.block_announced = False - # Store the hashes of blocks we've seen announced. - # This is for synchronizing the p2p message traffic, - # so we can eg wait until a particular block is announced. - self.announced_blockhashes = set() - - def on_sendcmpct(self, message): - self.last_sendcmpct.append(message) - - def on_cmpctblock(self, message): - self.block_announced = True - self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() - self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256) - - def on_headers(self, message): - self.block_announced = True - for x in self.last_message["headers"].headers: - x.calc_sha256() - self.announced_blockhashes.add(x.sha256) - - def on_inv(self, message): - for x in self.last_message["inv"].inv: - if x.type == 2: - self.block_announced = True - self.announced_blockhashes.add(x.hash) - - # Requires caller to hold mininode_lock - def received_block_announcement(self): - return self.block_announced - - def clear_block_announcement(self): - with mininode_lock: - self.block_announced = False - self.last_message.pop("inv", None) - self.last_message.pop("headers", None) - self.last_message.pop("cmpctblock", None) - - def get_headers(self, locator, hashstop): - msg = msg_getheaders() - msg.locator.vHave = locator - msg.hashstop = hashstop - self.send_message(msg) - - def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() - headers_message.headers = [CBlockHeader(b) for b in new_blocks] - self.send_message(headers_message) - - def request_headers_and_sync(self, locator, hashstop=0): - self.clear_block_announcement() - self.get_headers(locator, hashstop) - wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) - self.clear_block_announcement() - - # Block until a block announcement for a particular block hash is - # received. - def wait_for_block_announcement(self, block_hash, timeout=30): - def received_hash(): - return (block_hash in self.announced_blockhashes) - wait_until(received_hash, timeout=timeout, lock=mininode_lock) - - def send_await_disconnect(self, message, timeout=30): - """Sends a message to the node and wait for disconnect. - - This is used when we want to send a message into the node that we expect - will get us disconnected, eg an invalid block.""" - self.send_message(message) - wait_until(lambda: self.state != "connected", timeout=timeout, lock=mininode_lock) - -class CompactBlocksTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - # Node0 = pre-segwit, node1 = segwit-aware - self.num_nodes = 2 - # This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window). - # TODO: Rewrite this test to support SegWit being always active. - self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex", "-deprecatedrpc=addwitnessaddress"]] - self.utxos = [] - - def build_block_on_tip(self, node, segwit=False): - height = node.getblockcount() - tip = node.getbestblockhash() - mtp = node.getblockheader(tip)['mediantime'] - block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) - block.nVersion = 4 - if segwit: - add_witness_commitment(block) - block.solve() - return block - - # Create 10 more anyone-can-spend utxo's for testing. - def make_utxos(self): - # Doesn't matter which node we use, just use node0. - block = self.build_block_on_tip(self.nodes[0]) - self.test_node.send_and_ping(msg_block(block)) - assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) - self.nodes[0].generate(100) - - total_value = block.vtx[0].vout[0].nValue - out_value = total_value // 10 - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) - for i in range(10): - tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) - tx.rehash() - - block2 = self.build_block_on_tip(self.nodes[0]) - block2.vtx.append(tx) - block2.hashMerkleRoot = block2.calc_merkle_root() - block2.solve() - self.test_node.send_and_ping(msg_block(block2)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) - self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) - return - - # Test "sendcmpct" (between peers preferring the same version): - # - No compact block announcements unless sendcmpct is sent. - # - If sendcmpct is sent with version > preferred_version, the message is ignored. - # - If sendcmpct is sent with boolean 0, then block announcements are not - # made with compact blocks. - # - If sendcmpct is then sent with boolean 1, then new block announcements - # are made with compact blocks. - # If old_node is passed in, request compact blocks with version=preferred-1 - # and verify that it receives block announcements via compact block. - def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): - # Make sure we get a SENDCMPCT message from our peer - def received_sendcmpct(): - return (len(test_node.last_sendcmpct) > 0) - wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) - with mininode_lock: - # Check that the first version received is the preferred one - assert_equal(test_node.last_sendcmpct[0].version, preferred_version) - # And that we receive versions down to 1. - assert_equal(test_node.last_sendcmpct[-1].version, 1) - test_node.last_sendcmpct = [] - - tip = int(node.getbestblockhash(), 16) - - def check_announcement_of_new_block(node, peer, predicate): - peer.clear_block_announcement() - block_hash = int(node.generate(1)[0], 16) - peer.wait_for_block_announcement(block_hash, timeout=30) - assert(peer.block_announced) - - with mininode_lock: - assert predicate(peer), ( - "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( - block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) - - # We shouldn't get any block announcements via cmpctblock yet. - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Try one more time, this time after requesting headers. - test_node.request_headers_and_sync(locator=[tip]) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) - - # Test a few ways of using sendcmpct that should NOT - # result in compact block announcements. - # Before each test, sync the headers chain. - test_node.request_headers_and_sync(locator=[tip]) - - # Now try a SENDCMPCT message with too-high version - sendcmpct = msg_sendcmpct() - sendcmpct.version = preferred_version+1 - sendcmpct.announce = True - test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Headers sync before next test. - test_node.request_headers_and_sync(locator=[tip]) - - # Now try a SENDCMPCT message with valid version, but announce=False - sendcmpct.version = preferred_version - sendcmpct.announce = False - test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Headers sync before next test. - test_node.request_headers_and_sync(locator=[tip]) - - # Finally, try a SENDCMPCT message with announce=True - sendcmpct.version = preferred_version - sendcmpct.announce = True - test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time (no headers sync should be needed!) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time, after turning on sendheaders - test_node.send_and_ping(msg_sendheaders()) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time, after sending a version-1, announce=false message. - sendcmpct.version = preferred_version-1 - sendcmpct.announce = False - test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Now turn off announcements - sendcmpct.version = preferred_version - sendcmpct.announce = False - test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) - - if old_node is not None: - # Verify that a peer using an older protocol version can receive - # announcements from this node. - sendcmpct.version = preferred_version-1 - sendcmpct.announce = True - old_node.send_and_ping(sendcmpct) - # Header sync - old_node.request_headers_and_sync(locator=[tip]) - check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message) - - # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last. - def test_invalid_cmpctblock_message(self): - self.nodes[0].generate(101) - block = self.build_block_on_tip(self.nodes[0]) - - cmpct_block = P2PHeaderAndShortIDs() - cmpct_block.header = CBlockHeader(block) - cmpct_block.prefilled_txn_length = 1 - # This index will be too high - prefilled_txn = PrefilledTransaction(1, block.vtx[0]) - cmpct_block.prefilled_txn = [prefilled_txn] - self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) - - # Compare the generated shortids to what we expect based on BIP 152, given - # bitcoind's choice of nonce. - def test_compactblock_construction(self, node, test_node, version, use_witness_address): - # Generate a bunch of transactions. - node.generate(101) - num_transactions = 25 - address = node.getnewaddress() - if use_witness_address: - # Want at least one segwit spend, so move all funds to - # a witness address. - address = node.addwitnessaddress(address) - value_to_send = node.getbalance() - node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1))) - node.generate(1) - - segwit_tx_generated = False - for i in range(num_transactions): - txid = node.sendtoaddress(address, 0.1) - hex_tx = node.gettransaction(txid)["hex"] - tx = FromHex(CTransaction(), hex_tx) - if not tx.wit.is_null(): - segwit_tx_generated = True - - if use_witness_address: - assert(segwit_tx_generated) # check that our test is not broken - - # Wait until we've seen the block announcement for the resulting tip - tip = int(node.getbestblockhash(), 16) - test_node.wait_for_block_announcement(tip) - - # Make sure we will receive a fast-announce compact block - self.request_cb_announcements(test_node, node, version) - - # Now mine a block, and look at the resulting compact block. - test_node.clear_block_announcement() - block_hash = int(node.generate(1)[0], 16) - - # Store the raw block in our internal format. - block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) - for tx in block.vtx: - tx.calc_sha256() - block.rehash() - - # Wait until the block was announced (via compact blocks) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) - - # Now fetch and check the compact block - header_and_shortids = None - with mininode_lock: - assert("cmpctblock" in test_node.last_message) - # Convert the on-the-wire representation to absolute indexes - header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) - self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) - - # Now fetch the compact block using a normal non-announce getdata - with mininode_lock: - test_node.clear_block_announcement() - inv = CInv(4, block_hash) # 4 == "CompactBlock" - test_node.send_message(msg_getdata([inv])) - - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) - - # Now fetch and check the compact block - header_and_shortids = None - with mininode_lock: - assert("cmpctblock" in test_node.last_message) - # Convert the on-the-wire representation to absolute indexes - header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) - self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) - - def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block): - # Check that we got the right block! - header_and_shortids.header.calc_sha256() - assert_equal(header_and_shortids.header.sha256, block_hash) - - # Make sure the prefilled_txn appears to have included the coinbase - assert(len(header_and_shortids.prefilled_txn) >= 1) - assert_equal(header_and_shortids.prefilled_txn[0].index, 0) - - # Check that all prefilled_txn entries match what's in the block. - for entry in header_and_shortids.prefilled_txn: - entry.tx.calc_sha256() - # This checks the non-witness parts of the tx agree - assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) - - # And this checks the witness - wtxid = entry.tx.calc_sha256(True) - if version == 2: - assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) - else: - # Shouldn't have received a witness - assert(entry.tx.wit.is_null()) - - # Check that the cmpctblock message announced all the transactions. - assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) - - # And now check that all the shortids are as expected as well. - # Determine the siphash keys to use. - [k0, k1] = header_and_shortids.get_siphash_keys() - - index = 0 - while index < len(block.vtx): - if (len(header_and_shortids.prefilled_txn) > 0 and - header_and_shortids.prefilled_txn[0].index == index): - # Already checked prefilled transactions above - header_and_shortids.prefilled_txn.pop(0) - else: - tx_hash = block.vtx[index].sha256 - if version == 2: - tx_hash = block.vtx[index].calc_sha256(True) - shortid = calculate_shortid(k0, k1, tx_hash) - assert_equal(shortid, header_and_shortids.shortids[0]) - header_and_shortids.shortids.pop(0) - index += 1 - - # Test that bitcoind requests compact blocks when we announce new blocks - # via header or inv, and that responding to getblocktxn causes the block - # to be successfully reconstructed. - # Post-segwit: upgraded nodes would only make this request of cb-version-2, - # NODE_WITNESS peers. Unupgraded nodes would still make this request of - # any cb-version-1-supporting peer. - def test_compactblock_requests(self, node, test_node, version, segwit): - # Try announcing a block with an inv or header, expect a compactblock - # request - for announce in ["inv", "header"]: - block = self.build_block_on_tip(node, segwit=segwit) - with mininode_lock: - test_node.last_message.pop("getdata", None) - - if announce == "inv": - test_node.send_message(msg_inv([CInv(2, block.sha256)])) - wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) - test_node.send_header_for_blocks([block]) - else: - test_node.send_header_for_blocks([block]) - wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) - assert_equal(len(test_node.last_message["getdata"].inv), 1) - assert_equal(test_node.last_message["getdata"].inv[0].type, 4) - assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) - - # Send back a compactblock message that omits the coinbase - comp_block = HeaderAndShortIDs() - comp_block.header = CBlockHeader(block) - comp_block.nonce = 0 - [k0, k1] = comp_block.get_siphash_keys() - coinbase_hash = block.vtx[0].sha256 - if version == 2: - coinbase_hash = block.vtx[0].calc_sha256(True) - comp_block.shortids = [ - calculate_shortid(k0, k1, coinbase_hash) ] - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) - # Expect a getblocktxn message. - with mininode_lock: - assert("getblocktxn" in test_node.last_message) - absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, [0]) # should be a coinbase request - - # Send the coinbase, and verify that the tip advances. - if version == 2: - msg = msg_witness_blocktxn() - else: - msg = msg_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = [block.vtx[0]] - test_node.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - # Create a chain of transactions from given utxo, and add to a new block. - def build_block_with_transactions(self, node, utxo, num_transactions): - block = self.build_block_on_tip(node) - - for i in range(num_transactions): - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) - tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) - tx.rehash() - utxo = [tx.sha256, 0, tx.vout[0].nValue] - block.vtx.append(tx) - - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - return block - - # Test that we only receive getblocktxn requests for transactions that the - # node needs, and that responding to them causes the block to be - # reconstructed. - def test_getblocktxn_requests(self, node, test_node, version): - with_witness = (version==2) - - def test_getblocktxn_response(compact_block, peer, expected_result): - msg = msg_cmpctblock(compact_block.to_p2p()) - peer.send_and_ping(msg) - with mininode_lock: - assert("getblocktxn" in peer.last_message) - absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, expected_result) - - def test_tip_after_message(node, peer, msg, tip): - peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), tip) - - # First try announcing compactblocks that won't reconstruct, and verify - # that we receive getblocktxn messages back. - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, use_witness=with_witness) - - test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) - - msg_bt = msg_blocktxn() - if with_witness: - msg_bt = msg_witness_blocktxn() # serialize with witnesses - msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - - # Now try interspersing the prefilled transactions - comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness) - test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) - msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - # Now try giving one transaction ahead of time. - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - test_node.send_and_ping(msg_tx(block.vtx[1])) - assert(block.vtx[1].hash in node.getrawmempool()) - - # Prefill 4 out of the 6 transactions, and verify that only the one - # that was not in the mempool is requested. - comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) - test_getblocktxn_response(comp_block, test_node, [5]) - - msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - # Now provide all transactions to the node before the block is - # announced and verify reconstruction happens immediately. - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - for tx in block.vtx[1:]: - test_node.send_message(msg_tx(tx)) - test_node.sync_with_ping() - # Make sure all transactions were accepted. - mempool = node.getrawmempool() - for tx in block.vtx[1:]: - assert(tx.hash in mempool) - - # Clear out last request. - with mininode_lock: - test_node.last_message.pop("getblocktxn", None) - - # Send compact block - comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness) - test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) - with mininode_lock: - # Shouldn't have gotten a request for any transaction - assert("getblocktxn" not in test_node.last_message) - - # Incorrectly responding to a getblocktxn shouldn't cause the block to be - # permanently failed. - def test_incorrect_blocktxn_response(self, node, test_node, version): - if (len(self.utxos) == 0): - self.make_utxos() - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - # Relay the first 5 transactions from the block in advance - for tx in block.vtx[1:6]: - test_node.send_message(msg_tx(tx)) - test_node.sync_with_ping() - # Make sure all transactions were accepted. - mempool = node.getrawmempool() - for tx in block.vtx[1:6]: - assert(tx.hash in mempool) - - # Send compact block - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - absolute_indexes = [] - with mininode_lock: - assert("getblocktxn" in test_node.last_message) - absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) - - # Now give an incorrect response. - # Note that it's possible for bitcoind to be smart enough to know we're - # lying, since it could check to see if the shortid matches what we're - # sending, and eg disconnect us for misbehavior. If that behavior - # change were made, we could just modify this test by having a - # different peer provide the block further down, so that we're still - # verifying that the block isn't marked bad permanently. This is good - # enough for now. - msg = msg_blocktxn() - if version==2: - msg = msg_witness_blocktxn() - msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) - test_node.send_and_ping(msg) - - # Tip should not have updated - assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) - - # We should receive a getdata request - wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) - assert_equal(len(test_node.last_message["getdata"].inv), 1) - assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG) - assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) - - # Deliver the block - if version==2: - test_node.send_and_ping(msg_witness_block(block)) - else: - test_node.send_and_ping(msg_block(block)) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - def test_getblocktxn_handler(self, node, test_node, version): - # bitcoind will not send blocktxn responses for blocks whose height is - # more than 10 blocks deep. - MAX_GETBLOCKTXN_DEPTH = 10 - chain_height = node.getblockcount() - current_height = chain_height - while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): - block_hash = node.getblockhash(current_height) - block = FromHex(CBlock(), node.getblock(block_hash, False)) - - msg = msg_getblocktxn() - msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) - num_to_request = random.randint(1, len(block.vtx)) - msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) - test_node.send_message(msg) - wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) - - [tx.calc_sha256() for tx in block.vtx] - with mininode_lock: - assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16)) - all_indices = msg.block_txn_request.to_absolute() - for index in all_indices: - tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0) - tx.calc_sha256() - assert_equal(tx.sha256, block.vtx[index].sha256) - if version == 1: - # Witnesses should have been stripped - assert(tx.wit.is_null()) - else: - # Check that the witness matches - assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) - test_node.last_message.pop("blocktxn", None) - current_height -= 1 - - # Next request should send a full block response, as we're past the - # allowed depth for a blocktxn response. - block_hash = node.getblockhash(current_height) - msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) - with mininode_lock: - test_node.last_message.pop("block", None) - test_node.last_message.pop("blocktxn", None) - test_node.send_and_ping(msg) - with mininode_lock: - test_node.last_message["block"].block.calc_sha256() - assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) - assert "blocktxn" not in test_node.last_message - - def test_compactblocks_not_at_tip(self, node, test_node): - # Test that requesting old compactblocks doesn't work. - MAX_CMPCTBLOCK_DEPTH = 5 - new_blocks = [] - for i in range(MAX_CMPCTBLOCK_DEPTH + 1): - test_node.clear_block_announcement() - new_blocks.append(node.generate(1)[0]) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) - - test_node.clear_block_announcement() - test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) - - test_node.clear_block_announcement() - node.generate(1) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) - test_node.clear_block_announcement() - with mininode_lock: - test_node.last_message.pop("block", None) - test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) - with mininode_lock: - test_node.last_message["block"].block.calc_sha256() - assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) - - # Generate an old compactblock, and verify that it's not accepted. - cur_height = node.getblockcount() - hashPrevBlock = int(node.getblockhash(cur_height-5), 16) - block = self.build_block_on_tip(node) - block.hashPrevBlock = hashPrevBlock - block.solve() - - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - - tips = node.getchaintips() - found = False - for x in tips: - if x["hash"] == block.hash: - assert_equal(x["status"], "headers-only") - found = True - break - assert(found) - - # Requesting this block via getblocktxn should silently fail - # (to avoid fingerprinting attacks). - msg = msg_getblocktxn() - msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) - with mininode_lock: - test_node.last_message.pop("blocktxn", None) - test_node.send_and_ping(msg) - with mininode_lock: - assert "blocktxn" not in test_node.last_message - - def activate_segwit(self, node): - node.generate(144*3) - assert_equal(get_bip9_status(node, "segwit")["status"], 'active') - - def test_end_to_end_block_relay(self, node, listeners): - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 10) - - [l.clear_block_announcement() for l in listeners] - - # ToHex() won't serialize with witness, but this block has no witnesses - # anyway. TODO: repeat this test with witness tx's to a segwit node. - node.submitblock(ToHex(block)) - - for l in listeners: - wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) - with mininode_lock: - for l in listeners: - assert "cmpctblock" in l.last_message - l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() - assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) - - # Test that we don't get disconnected if we relay a compact block with valid header, - # but invalid transactions. - def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit): - assert(len(self.utxos)) - utxo = self.utxos[0] - - block = self.build_block_with_transactions(node, utxo, 5) - del block.vtx[3] - block.hashMerkleRoot = block.calc_merkle_root() - if use_segwit: - # If we're testing with segwit, also drop the coinbase witness, - # but include the witness commitment. - add_witness_commitment(block) - block.vtx[0].wit.vtxinwit = [] - block.solve() - - # Now send the compact block with all transactions prefilled, and - # verify that we don't get disconnected. - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit) - msg = msg_cmpctblock(comp_block.to_p2p()) - test_node.send_and_ping(msg) - - # Check that the tip didn't advance - assert(int(node.getbestblockhash(), 16) is not block.sha256) - test_node.sync_with_ping() - - # Helper for enabling cb announcements - # Send the sendcmpct request and sync headers - def request_cb_announcements(self, peer, node, version): - tip = node.getbestblockhash() - peer.get_headers(locator=[int(tip, 16)], hashstop=0) - - msg = msg_sendcmpct() - msg.version = version - msg.announce = True - peer.send_and_ping(msg) - - def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): - assert(len(self.utxos)) - - def announce_cmpct_block(node, peer): - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - - cmpct_block = HeaderAndShortIDs() - cmpct_block.initialize_from_block(block) - msg = msg_cmpctblock(cmpct_block.to_p2p()) - peer.send_and_ping(msg) - with mininode_lock: - assert "getblocktxn" in peer.last_message - return block, cmpct_block - - block, cmpct_block = announce_cmpct_block(node, stalling_peer) - - for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) - delivery_peer.sync_with_ping() - mempool = node.getrawmempool() - for tx in block.vtx[1:]: - assert(tx.hash in mempool) - - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - - # Now test that delivering an invalid compact block won't break relay - - block, cmpct_block = announce_cmpct_block(node, stalling_peer) - for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) - delivery_peer.sync_with_ping() - - cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ] - cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] - - cmpct_block.use_witness = True - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - assert(int(node.getbestblockhash(), 16) != block.sha256) - - msg = msg_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = block.vtx[1:] - stalling_peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - def run_test(self): - # Setup the p2p connections and start up the network thread. - self.test_node = self.nodes[0].add_p2p_connection(TestNode()) - self.segwit_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) - self.old_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK) - - network_thread_start() - - self.test_node.wait_for_verack() - - # We will need UTXOs to construct transactions in later tests. - self.make_utxos() - - self.log.info("Running tests, pre-segwit activation:") - - self.log.info("Testing SENDCMPCT p2p message... ") - self.test_sendcmpct(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) - self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node) - sync_blocks(self.nodes) - - self.log.info("Testing compactblock construction...") - self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False) - sync_blocks(self.nodes) - self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False) - sync_blocks(self.nodes) - - self.log.info("Testing compactblock requests... ") - self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False) - sync_blocks(self.nodes) - self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False) - sync_blocks(self.nodes) - - self.log.info("Testing getblocktxn requests...") - self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) - self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) - sync_blocks(self.nodes) - - self.log.info("Testing getblocktxn handler...") - self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) - self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) - self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) - sync_blocks(self.nodes) - - self.log.info("Testing compactblock requests/announcements not at chain tip...") - self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) - sync_blocks(self.nodes) - self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node) - self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) - sync_blocks(self.nodes) - - self.log.info("Testing handling of incorrect blocktxn responses...") - self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) - sync_blocks(self.nodes) - self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2) - sync_blocks(self.nodes) - - # End-to-end block relay tests - self.log.info("Testing end-to-end block relay...") - self.request_cb_announcements(self.test_node, self.nodes[0], 1) - self.request_cb_announcements(self.old_node, self.nodes[1], 1) - self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) - self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node]) - self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) - - self.log.info("Testing handling of invalid compact blocks...") - self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) - self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False) - self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False) - - self.log.info("Testing reconstructing compact blocks from all peers...") - self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node) - sync_blocks(self.nodes) - - # Advance to segwit activation - self.log.info("Advancing to segwit activation") - self.activate_segwit(self.nodes[1]) - self.log.info("Running tests, post-segwit activation...") - - self.log.info("Testing compactblock construction...") - self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True) - self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True) - sync_blocks(self.nodes) - - self.log.info("Testing compactblock requests (unupgraded node)... ") - self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True) - - self.log.info("Testing getblocktxn requests (unupgraded node)...") - self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) - - # Need to manually sync node0 and node1, because post-segwit activation, - # node1 will not download blocks from node0. - self.log.info("Syncing nodes...") - assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) - while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()): - block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1) - self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False)) - assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) - - self.log.info("Testing compactblock requests (segwit node)... ") - self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True) - - self.log.info("Testing getblocktxn requests (segwit node)...") - self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) - sync_blocks(self.nodes) - - self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...") - self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) - self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) - - # Test that if we submitblock to node1, we'll get a compact block - # announcement to all peers. - # (Post-segwit activation, blocks won't propagate from node0 to node1 - # automatically, so don't bother testing a block announced to node0.) - self.log.info("Testing end-to-end block relay...") - self.request_cb_announcements(self.test_node, self.nodes[0], 1) - self.request_cb_announcements(self.old_node, self.nodes[1], 1) - self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) - self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) - - self.log.info("Testing handling of invalid compact blocks...") - self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) - self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True) - self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True) - - self.log.info("Testing invalid index in cmpctblock message...") - self.test_invalid_cmpctblock_message() - - -if __name__ == '__main__': - CompactBlocksTest().main() diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py deleted file mode 100755 index 47d9c55160..0000000000 --- a/test/functional/p2p-feefilter.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test processing of feefilter messages.""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import time - - -def hashToHex(hash): - return format(hash, '064x') - -# Wait up to 60 secs to see if the testnode has received all the expected invs -def allInvsMatch(invsExpected, testnode): - for x in range(60): - with mininode_lock: - if (sorted(invsExpected) == sorted(testnode.txinvs)): - return True - time.sleep(1) - return False - -class TestNode(P2PInterface): - def __init__(self): - super().__init__() - self.txinvs = [] - - def on_inv(self, message): - for i in message.inv: - if (i.type == 1): - self.txinvs.append(hashToHex(i.hash)) - - def clear_invs(self): - with mininode_lock: - self.txinvs = [] - -class FeeFilterTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - - def run_test(self): - node1 = self.nodes[1] - node0 = self.nodes[0] - # Get out of IBD - node1.generate(1) - sync_blocks(self.nodes) - - # Setup the p2p connections and start up the network thread. - self.nodes[0].add_p2p_connection(TestNode()) - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - # Test that invs are received for all txs at feerate of 20 sat/byte - node1.settxfee(Decimal("0.00020000")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, self.nodes[0].p2p)) - self.nodes[0].p2p.clear_invs() - - # Set a filter of 15 sat/byte - self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) - - # Test that txs are still being received (paying 20 sat/byte) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, self.nodes[0].p2p)) - self.nodes[0].p2p.clear_invs() - - # Change tx fee rate to 10 sat/byte and test they are no longer received - node1.settxfee(Decimal("0.00010000")) - [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - sync_mempools(self.nodes) # must be sure node 0 has received all txs - - # Send one transaction from node0 that should be received, so that we - # we can sync the test on receipt (if node1's txs were relayed, they'd - # be received by the time this node0 tx is received). This is - # unfortunately reliant on the current relay behavior where we batch up - # to 35 entries in an inv, which means that when this next transaction - # is eligible for relay, the prior transactions from node1 are eligible - # as well. - node0.settxfee(Decimal("0.00020000")) - txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] - assert(allInvsMatch(txids, self.nodes[0].p2p)) - self.nodes[0].p2p.clear_invs() - - # Remove fee filter and check that txs are received again - self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, self.nodes[0].p2p)) - self.nodes[0].p2p.clear_invs() - -if __name__ == '__main__': - FeeFilterTest().main() diff --git a/test/functional/p2p-fingerprint.py b/test/functional/p2p-fingerprint.py deleted file mode 100755 index 93ef73e25e..0000000000 --- a/test/functional/p2p-fingerprint.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various fingerprinting protections. - -If an stale block more than a month old or its header are requested by a peer, -the node should pretend that it does not have it to avoid fingerprinting. -""" - -import time - -from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.mininode import ( - CInv, - P2PInterface, - msg_headers, - msg_block, - msg_getdata, - msg_getheaders, - network_thread_start, - wait_until, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, -) - -class P2PFingerprintTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - # Build a chain of blocks on top of given one - def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time): - blocks = [] - for _ in range(nblocks): - coinbase = create_coinbase(prev_height + 1) - block_time = prev_median_time + 1 - block = create_block(int(prev_hash, 16), coinbase, block_time) - block.solve() - - blocks.append(block) - prev_hash = block.hash - prev_height += 1 - prev_median_time = block_time - return blocks - - # Send a getdata request for a given block hash - def send_block_request(self, block_hash, node): - msg = msg_getdata() - msg.inv.append(CInv(2, block_hash)) # 2 == "Block" - node.send_message(msg) - - # Send a getheaders request for a given single block hash - def send_header_request(self, block_hash, node): - msg = msg_getheaders() - msg.hashstop = block_hash - node.send_message(msg) - - # Check whether last block received from node has a given hash - def last_block_equals(self, expected_hash, node): - block_msg = node.last_message.get("block") - return block_msg and block_msg.block.rehash() == expected_hash - - # Check whether last block header received from node has a given hash - def last_header_equals(self, expected_hash, node): - headers_msg = node.last_message.get("headers") - return (headers_msg and - headers_msg.headers and - headers_msg.headers[0].rehash() == expected_hash) - - # Checks that stale blocks timestamped more than a month ago are not served - # by the node while recent stale blocks and old active chain blocks are. - # This does not currently test that stale blocks timestamped within the - # last month but that have over a month's worth of work are also withheld. - def run_test(self): - node0 = self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - node0.wait_for_verack() - - # Set node time to 60 days ago - self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) - - # Generating a chain of 10 blocks - block_hashes = self.nodes[0].generate(nblocks=10) - - # Create longer chain starting 2 blocks before current tip - height = len(block_hashes) - 2 - block_hash = block_hashes[height - 1] - block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 - new_blocks = self.build_chain(5, block_hash, height, block_time) - - # Force reorg to a longer chain - node0.send_message(msg_headers(new_blocks)) - node0.wait_for_getdata() - for block in new_blocks: - node0.send_and_ping(msg_block(block)) - - # Check that reorg succeeded - assert_equal(self.nodes[0].getblockcount(), 13) - - stale_hash = int(block_hashes[-1], 16) - - # Check that getdata request for stale block succeeds - self.send_block_request(stale_hash, node0) - test_function = lambda: self.last_block_equals(stale_hash, node0) - wait_until(test_function, timeout=3) - - # Check that getheader request for stale block header succeeds - self.send_header_request(stale_hash, node0) - test_function = lambda: self.last_header_equals(stale_hash, node0) - wait_until(test_function, timeout=3) - - # Longest chain is extended so stale is much older than chain tip - self.nodes[0].setmocktime(0) - tip = self.nodes[0].generate(nblocks=1)[0] - assert_equal(self.nodes[0].getblockcount(), 14) - - # Send getdata & getheaders to refresh last received getheader message - block_hash = int(tip, 16) - self.send_block_request(block_hash, node0) - self.send_header_request(block_hash, node0) - node0.sync_with_ping() - - # Request for very old stale block should now fail - self.send_block_request(stale_hash, node0) - time.sleep(3) - assert not self.last_block_equals(stale_hash, node0) - - # Request for very old stale block header should now fail - self.send_header_request(stale_hash, node0) - time.sleep(3) - assert not self.last_header_equals(stale_hash, node0) - - # Verify we can fetch very old blocks and headers on the active chain - block_hash = int(block_hashes[2], 16) - self.send_block_request(block_hash, node0) - self.send_header_request(block_hash, node0) - node0.sync_with_ping() - - self.send_block_request(block_hash, node0) - test_function = lambda: self.last_block_equals(block_hash, node0) - wait_until(test_function, timeout=3) - - self.send_header_request(block_hash, node0) - test_function = lambda: self.last_header_equals(block_hash, node0) - wait_until(test_function, timeout=3) - -if __name__ == '__main__': - P2PFingerprintTest().main() diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py deleted file mode 100755 index ce4e6e9144..0000000000 --- a/test/functional/p2p-leaktests.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test message sending before handshake completion. - -A node should never send anything other than VERSION/VERACK/REJECT until it's -received a VERACK. - -This test connects to a node and sends it a few messages, trying to intice it -into sending us something it shouldn't. - -Also test that nodes that send unsupported service bits to bitcoind are disconnected -and don't receive a VERACK. Unsupported service bits are currently 1 << 5 and -1 << 7 (until August 1st 2018).""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -banscore = 10 - -class CLazyNode(P2PInterface): - def __init__(self): - super().__init__() - self.unexpected_msg = False - self.ever_connected = False - - def bad_message(self, message): - self.unexpected_msg = True - self.log.info("should not have received message: %s" % message.command) - - def on_open(self): - self.ever_connected = True - - def on_version(self, message): self.bad_message(message) - def on_verack(self, message): self.bad_message(message) - def on_reject(self, message): self.bad_message(message) - def on_inv(self, message): self.bad_message(message) - def on_addr(self, message): self.bad_message(message) - def on_getdata(self, message): self.bad_message(message) - def on_getblocks(self, message): self.bad_message(message) - def on_tx(self, message): self.bad_message(message) - def on_block(self, message): self.bad_message(message) - def on_getaddr(self, message): self.bad_message(message) - def on_headers(self, message): self.bad_message(message) - def on_getheaders(self, message): self.bad_message(message) - def on_ping(self, message): self.bad_message(message) - def on_mempool(self, message): self.bad_message(message) - def on_pong(self, message): self.bad_message(message) - def on_feefilter(self, message): self.bad_message(message) - def on_sendheaders(self, message): self.bad_message(message) - def on_sendcmpct(self, message): self.bad_message(message) - def on_cmpctblock(self, message): self.bad_message(message) - def on_getblocktxn(self, message): self.bad_message(message) - def on_blocktxn(self, message): self.bad_message(message) - -# Node that never sends a version. We'll use this to send a bunch of messages -# anyway, and eventually get disconnected. -class CNodeNoVersionBan(CLazyNode): - # send a bunch of veracks without sending a message. This should get us disconnected. - # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes - def on_open(self): - super().on_open() - for i in range(banscore): - self.send_message(msg_verack()) - - def on_reject(self, message): pass - -# Node that never sends a version. This one just sits idle and hopes to receive -# any message (it shouldn't!) -class CNodeNoVersionIdle(CLazyNode): - def __init__(self): - super().__init__() - -# Node that sends a version but not a verack. -class CNodeNoVerackIdle(CLazyNode): - def __init__(self): - self.version_received = False - super().__init__() - - def on_reject(self, message): pass - def on_verack(self, message): pass - # When version is received, don't reply with a verack. Instead, see if the - # node will give us a message that it shouldn't. This is not an exhaustive - # list! - def on_version(self, message): - self.version_received = True - self.send_message(msg_ping()) - self.send_message(msg_getaddr()) - -class P2PLeakTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-banscore='+str(banscore)]] - - def run_test(self): - self.nodes[0].setmocktime(1501545600) # August 1st 2017 - - no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False) - no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False) - no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle()) - unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) - unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) - - network_thread_start() - - wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) - wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) - wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) - wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10, lock=mininode_lock) - wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10, lock=mininode_lock) - - # Mine a block and make sure that it's not sent to the connected nodes - self.nodes[0].generate(1) - - #Give the node enough time to possibly leak out a message - time.sleep(5) - - #This node should have been banned - assert no_version_bannode.state != "connected" - - # These nodes should have been disconnected - assert unsupported_service_bit5_node.state != "connected" - assert unsupported_service_bit7_node.state != "connected" - - self.nodes[0].disconnect_p2ps() - - # Wait until all connections are closed and the network thread has terminated - wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) - network_thread_join() - - # Make sure no unexpected messages came in - assert(no_version_bannode.unexpected_msg == False) - assert(no_version_idlenode.unexpected_msg == False) - assert(no_verack_idlenode.unexpected_msg == False) - assert not unsupported_service_bit5_node.unexpected_msg - assert not unsupported_service_bit7_node.unexpected_msg - - self.log.info("Service bits 5 and 7 are allowed after August 1st 2018") - self.nodes[0].setmocktime(1533168000) # August 2nd 2018 - - allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) - allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) - - # Network thread stopped when all previous P2PInterfaces disconnected. Restart it - network_thread_start() - - wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock) - wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock) - -if __name__ == '__main__': - P2PLeakTest().main() diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py deleted file mode 100755 index 485a8af3d0..0000000000 --- a/test/functional/p2p-mempool.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test p2p mempool message. - -Test that nodes are disconnected if they send mempool messages when bloom -filters are not enabled. -""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class P2PMempoolTests(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-peerbloomfilters=0"]] - - def run_test(self): - # Add a p2p connection - self.nodes[0].add_p2p_connection(P2PInterface()) - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - #request mempool - self.nodes[0].p2p.send_message(msg_mempool()) - self.nodes[0].p2p.wait_for_disconnect() - - #mininode must be disconnected at this point - assert_equal(len(self.nodes[0].getpeerinfo()), 0) - -if __name__ == '__main__': - P2PMempoolTests().main() diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py deleted file mode 100755 index 20e4805df0..0000000000 --- a/test/functional/p2p-segwit.py +++ /dev/null @@ -1,1952 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test segwit transactions and blocks on P2P network.""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import * -from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER -from test_framework.key import CECKey, CPubKey -import time -import random -from binascii import hexlify - -# The versionbit bit used to signal activation of SegWit -VB_WITNESS_BIT = 1 -VB_PERIOD = 144 -VB_TOP_BITS = 0x20000000 - -MAX_SIGOP_COST = 80000 - - -# Calculate the virtual size of a witness block: -# (base + witness/4) -def get_virtual_size(witness_block): - base_size = len(witness_block.serialize(with_witness=False)) - total_size = len(witness_block.serialize(with_witness=True)) - # the "+3" is so we round up - vsize = int((3*base_size + total_size + 3)/4) - return vsize - -def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None): - """Send a transaction to the node and check that it's accepted to the mempool - - - Submit the transaction over the p2p interface - - use the getrawmempool rpc to check for acceptance.""" - tx_message = msg_tx(tx) - if with_witness: - tx_message = msg_witness_tx(tx) - p2p.send_message(tx_message) - p2p.sync_with_ping() - assert_equal(tx.hash in rpc.getrawmempool(), accepted) - if (reason != None and not accepted): - # Check the rejection reason as well. - with mininode_lock: - assert_equal(p2p.last_message["reject"].reason, reason) - -def test_witness_block(rpc, p2p, block, accepted, with_witness=True): - """Send a block to the node and check that it's accepted - - - Submit the block over the p2p interface - - use the getbestblockhash rpc to check for acceptance.""" - if with_witness: - p2p.send_message(msg_witness_block(block)) - else: - p2p.send_message(msg_block(block)) - p2p.sync_with_ping() - assert_equal(rpc.getbestblockhash() == block.hash, accepted) - -class TestNode(P2PInterface): - def __init__(self): - super().__init__() - self.getdataset = set() - - def on_getdata(self, message): - for inv in message.inv: - self.getdataset.add(inv.hash) - - def announce_tx_and_wait_for_getdata(self, tx, timeout=60): - with mininode_lock: - self.last_message.pop("getdata", None) - self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) - self.wait_for_getdata(timeout) - - def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): - with mininode_lock: - self.last_message.pop("getdata", None) - self.last_message.pop("getheaders", None) - msg = msg_headers() - msg.headers = [ CBlockHeader(block) ] - if use_header: - self.send_message(msg) - else: - self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) - self.wait_for_getheaders() - self.send_message(msg) - self.wait_for_getdata() - - def request_block(self, blockhash, inv_type, timeout=60): - with mininode_lock: - self.last_message.pop("block", None) - self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) - self.wait_for_block(blockhash, timeout) - return self.last_message["block"].block - -# Used to keep track of anyone-can-spend outputs that we can use in the tests -class UTXO(): - def __init__(self, sha256, n, nValue): - self.sha256 = sha256 - self.n = n - self.nValue = nValue - -# Helper for getting the script associated with a P2PKH -def GetP2PKHScript(pubkeyhash): - return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) - -# Add signature for a P2PK witness program. -def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key): - tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value) - signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1') - txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script] - txTo.rehash() - - -class SegWitTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. - self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] - - def setup_network(self): - self.setup_nodes() - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) - self.sync_all() - - ''' Helpers ''' - # Build a block on top of node0's tip. - def build_next_block(self, nVersion=4): - tip = self.nodes[0].getbestblockhash() - height = self.nodes[0].getblockcount() + 1 - block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 - block = create_block(int(tip, 16), create_coinbase(height), block_time) - block.nVersion = nVersion - block.rehash() - return block - - # Adds list of transactions to block, adds witness commitment, then solves. - def update_witness_block_with_transactions(self, block, tx_list, nonce=0): - block.vtx.extend(tx_list) - add_witness_commitment(block, nonce) - block.solve() - return - - ''' Individual tests ''' - def test_witness_services(self): - self.log.info("Verifying NODE_WITNESS service bit") - assert((self.test_node.nServices & NODE_WITNESS) != 0) - - - # See if sending a regular transaction works, and create a utxo - # to use in later tests. - def test_non_witness_transaction(self): - # Mine a block with an anyone-can-spend coinbase, - # let it mature, then try to spend it. - self.log.info("Testing non-witness transaction") - block = self.build_next_block(nVersion=1) - block.solve() - self.test_node.send_message(msg_block(block)) - self.test_node.sync_with_ping() # make sure the block was processed - txid = block.vtx[0].sha256 - - self.nodes[0].generate(99) # let the block mature - - # Create a transaction that spends the coinbase - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) - tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE]))) - tx.calc_sha256() - - # Check that serializing it with or without witness is the same - # This is a sanity check of our testing framework. - assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) - - self.test_node.send_message(msg_witness_tx(tx)) - self.test_node.sync_with_ping() # make sure the tx was processed - assert(tx.hash in self.nodes[0].getrawmempool()) - # Save this transaction for later - self.utxo.append(UTXO(tx.sha256, 0, 49*100000000)) - self.nodes[0].generate(1) - - - # Verify that blocks with witnesses are rejected before activation. - def test_unnecessary_witness_before_segwit_activation(self): - self.log.info("Testing behavior of unnecessary witnesses") - # For now, rely on earlier tests to have created at least one utxo for - # us to use - assert(len(self.utxo) > 0) - assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active') - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] - - # Verify the hash with witness differs from the txid - # (otherwise our testing framework must be broken!) - tx.rehash() - assert(tx.sha256 != tx.calc_sha256(with_witness=True)) - - # Construct a segwit-signaling block that includes the transaction. - block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT))) - self.update_witness_block_with_transactions(block, [tx]) - # Sending witness data before activation is not allowed (anti-spam - # rule). - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - # TODO: fix synchronization so we can test reject reason - # Right now, bitcoind delays sending reject messages for blocks - # until the future, making synchronization here difficult. - #assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness") - - # But it should not be permanently marked bad... - # Resend without witness information. - self.test_node.send_message(msg_block(block)) - self.test_node.sync_with_ping() - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - sync_blocks(self.nodes) - - # Create a p2sh output -- this is so we can pass the standardness - # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped - # in P2SH). - p2sh_program = CScript([OP_TRUE]) - p2sh_pubkey = hash160(p2sh_program) - scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) - - # Now check that unnecessary witnesses can't be used to blind a node - # to a transaction, eg by violating standardness checks. - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey)) - tx2.rehash() - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - # We'll add an unnecessary witness to this transaction that would cause - # it to be non-standard, to test that violating policy with a witness before - # segwit activation doesn't blind a node to a transaction. Transactions - # rejected for having a witness before segwit activation shouldn't be added - # to the rejection cache. - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) - tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey)) - tx3.wit.vtxinwit.append(CTxInWitness()) - tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000] - tx3.rehash() - # Note that this should be rejected for the premature witness reason, - # rather than a policy check, since segwit hasn't activated yet. - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet') - - # If we send without witness, it should be accepted. - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True) - - # Now create a new anyone-can-spend utxo for the next test. - tx4 = CTransaction() - tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program]))) - tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE]))) - tx4.rehash() - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True) - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True) - - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - # Update our utxo list; we spent the first entry. - self.utxo.pop(0) - self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue)) - - - # Mine enough blocks for segwit's vb state to be 'started'. - def advance_to_segwit_started(self): - height = self.nodes[0].getblockcount() - # Will need to rewrite the tests here if we are past the first period - assert(height < VB_PERIOD - 1) - # Genesis block is 'defined'. - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined') - # Advance to end of period, status should now be 'started' - self.nodes[0].generate(VB_PERIOD-height-1) - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') - - # Mine enough blocks to lock in segwit, but don't activate. - # TODO: we could verify that lockin only happens at the right threshold of - # signalling blocks, rather than just at the right period boundary. - def advance_to_segwit_lockin(self): - height = self.nodes[0].getblockcount() - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') - # Advance to end of period, and verify lock-in happens at the end - self.nodes[0].generate(VB_PERIOD-1) - height = self.nodes[0].getblockcount() - assert((height % VB_PERIOD) == VB_PERIOD - 2) - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') - self.nodes[0].generate(1) - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') - - - # Mine enough blocks to activate segwit. - # TODO: we could verify that activation only happens at the right threshold - # of signalling blocks, rather than just at the right period boundary. - def advance_to_segwit_active(self): - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') - height = self.nodes[0].getblockcount() - self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2) - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') - self.nodes[0].generate(1) - assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active') - - - # This test can only be run after segwit has activated - def test_witness_commitments(self): - self.log.info("Testing witness commitments") - - # First try a correct witness commitment. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - # Test the test -- witness serialization should be different - assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) - - # This empty block should be valid. - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Try to tweak the nonce - block_2 = self.build_next_block() - add_witness_commitment(block_2, nonce=28) - block_2.solve() - - # The commitment should have changed! - assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]) - - # This should also be valid. - test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True) - - # Now test commitments with actual transactions - assert (len(self.utxo) > 0) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - - # Let's construct a witness program - witness_program = CScript([OP_TRUE]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) - tx.rehash() - - # tx2 will spend tx1, and send back to a regular anyone-can-spend address - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] - tx2.rehash() - - block_3 = self.build_next_block() - self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) - # Add an extra OP_RETURN output that matches the witness commitment template, - # even though it has extra data after the incorrect commitment. - # This block should fail. - block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) - block_3.vtx[0].rehash() - block_3.hashMerkleRoot = block_3.calc_merkle_root() - block_3.rehash() - block_3.solve() - - test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) - - # Add a different commitment with different nonce, but in the - # right location, and with some funds burned(!). - # This should succeed (nValue shouldn't affect finding the - # witness commitment). - add_witness_commitment(block_3, nonce=0) - block_3.vtx[0].vout[0].nValue -= 1 - block_3.vtx[0].vout[-1].nValue += 1 - block_3.vtx[0].rehash() - block_3.hashMerkleRoot = block_3.calc_merkle_root() - block_3.rehash() - assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns - block_3.solve() - test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True) - - # Finally test that a block with no witness transactions can - # omit the commitment. - block_4 = self.build_next_block() - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) - tx3.rehash() - block_4.vtx.append(tx3) - block_4.hashMerkleRoot = block_4.calc_merkle_root() - block_4.solve() - test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True) - - # Update available utxo's for use in later test. - self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - - def test_block_malleability(self): - self.log.info("Testing witness block malleability") - - # Make sure that a block that has too big a virtual size - # because of a too-large coinbase witness is not permanently - # marked bad. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000) - assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE) - - # We can't send over the p2p network, because this is too big to relay - # TODO: repeat this test with a block that can be relayed - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - - assert(self.nodes[0].getbestblockhash() != block.hash) - - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() - assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE) - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - - assert(self.nodes[0].getbestblockhash() == block.hash) - - # Now make sure that malleating the witness nonce doesn't - # result in a block permanently marked bad. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - # Change the nonce -- should not cause the block to be permanently - # failed - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Changing the witness nonce doesn't change the block hash - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - - def test_witness_block_size(self): - self.log.info("Testing witness block size limit") - # TODO: Test that non-witness carrying blocks can't exceed 1MB - # Skipping this test for now; this is covered in p2p-fullblocktest.py - - # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. - block = self.build_next_block() - - assert(len(self.utxo) > 0) - - # Create a P2WSH transaction. - # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. - # This should give us plenty of room to tweak the spending tx's - # virtual size. - NUM_DROPS = 200 # 201 max ops per script! - NUM_OUTPUTS = 50 - - witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE]) - witness_hash = uint256_from_str(sha256(witness_program)) - scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)]) - - prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) - value = self.utxo[0].nValue - - parent_tx = CTransaction() - parent_tx.vin.append(CTxIn(prevout, b"")) - child_value = int(value/NUM_OUTPUTS) - for i in range(NUM_OUTPUTS): - parent_tx.vout.append(CTxOut(child_value, scriptPubKey)) - parent_tx.vout[0].nValue -= 50000 - assert(parent_tx.vout[0].nValue > 0) - parent_tx.rehash() - - child_tx = CTransaction() - for i in range(NUM_OUTPUTS): - child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) - child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] - for i in range(NUM_OUTPUTS): - child_tx.wit.vtxinwit.append(CTxInWitness()) - child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program] - child_tx.rehash() - self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) - - vsize = get_virtual_size(block) - additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4 - i = 0 - while additional_bytes > 0: - # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 - extra_bytes = min(additional_bytes+1, 55) - block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes) - additional_bytes -= extra_bytes - i += 1 - - block.vtx[0].vout.pop() # Remove old commitment - add_witness_commitment(block) - block.solve() - vsize = get_virtual_size(block) - assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) - # Make sure that our test case would exceed the old max-network-message - # limit - assert(len(block.serialize(True)) > 2*1024*1024) - - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Now resize the second transaction to make the block fit. - cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) - block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1) - block.vtx[0].vout.pop() - add_witness_commitment(block) - block.solve() - assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE) - - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Update available utxo's - self.utxo.pop(0) - self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) - - - # submitblock will try to add the nonce automatically, so that mining - # software doesn't need to worry about doing so itself. - def test_submit_block(self): - block = self.build_next_block() - - # Try using a custom nonce and then don't supply it. - # This shouldn't possibly work. - add_witness_commitment(block, nonce=1) - block.vtx[0].wit = CTxWitness() # drop the nonce - block.solve() - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - assert(self.nodes[0].getbestblockhash() != block.hash) - - # Now redo commitment with the standard nonce, but let bitcoind fill it in. - add_witness_commitment(block, nonce=0) - block.vtx[0].wit = CTxWitness() - block.solve() - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - # This time, add a tx with non-empty witness, but don't supply - # the commitment. - block_2 = self.build_next_block() - - add_witness_commitment(block_2) - - block_2.solve() - - # Drop commitment and nonce -- submitblock should not fill in. - block_2.vtx[0].vout.pop() - block_2.vtx[0].wit = CTxWitness() - - self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True))) - # Tip should not advance! - assert(self.nodes[0].getbestblockhash() != block_2.hash) - - - # Consensus tests of extra witness data in a transaction. - def test_extra_witness_data(self): - self.log.info("Testing extra witness data in tx") - - assert(len(self.utxo) > 0) - - block = self.build_next_block() - - witness_program = CScript([OP_DROP, OP_TRUE]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - # First try extra witness data on a tx that doesn't require a witness - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey)) - tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] - tx.rehash() - self.update_witness_block_with_transactions(block, [tx]) - - # Extra witness data should not be allowed. - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Try extra signature data. Ok if we're not spending a witness output. - block.vtx[1].wit.vtxinwit = [] - block.vtx[1].vin[0].scriptSig = CScript([OP_0]) - block.vtx[1].rehash() - add_witness_commitment(block) - block.solve() - - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Now try extra witness/signature data on an input that DOES require a - # witness - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness - tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) - tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ] - tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ] - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2]) - - # This has extra witness data, so it should fail. - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Now get rid of the extra witness, but add extra scriptSig data - tx2.vin[0].scriptSig = CScript([OP_TRUE]) - tx2.vin[1].scriptSig = CScript([OP_TRUE]) - tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) - tx2.wit.vtxinwit[1].scriptWitness.stack = [] - tx2.rehash() - add_witness_commitment(block) - block.solve() - - # This has extra signature data for a witness input, so it should fail. - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Now get rid of the extra scriptsig on the witness input, and verify - # success (even with extra scriptsig data in the non-witness input) - tx2.vin[0].scriptSig = b"" - tx2.rehash() - add_witness_commitment(block) - block.solve() - - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Update utxo for later tests - self.utxo.pop(0) - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - - def test_max_witness_push_length(self): - ''' Should only allow up to 520 byte pushes in witness stack ''' - self.log.info("Testing maximum witness push size") - MAX_SCRIPT_ELEMENT_SIZE = 520 - assert(len(self.utxo)) - - block = self.build_next_block() - - witness_program = CScript([OP_DROP, OP_TRUE]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) - tx.rehash() - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.append(CTxInWitness()) - # First try a 521-byte stack element - tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ] - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Now reduce the length of the stack element - tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE) - - add_witness_commitment(block) - block.solve() - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Update the utxo for later tests - self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - def test_max_witness_program_length(self): - # Can create witness outputs that are long, but can't be greater than - # 10k bytes to successfully spend - self.log.info("Testing maximum witness program length") - assert(len(self.utxo)) - MAX_PROGRAM_LENGTH = 10000 - - # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. - long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE]) - assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1) - long_witness_hash = sha256(long_witness_program) - long_scriptPubKey = CScript([OP_0, long_witness_hash]) - - block = self.build_next_block() - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey)) - tx.rehash() - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program] - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx, tx2]) - - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Try again with one less byte in the witness program - witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE]) - assert(len(witness_program) == MAX_PROGRAM_LENGTH) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey) - tx.rehash() - tx2.vin[0].prevout.hash = tx.sha256 - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program] - tx2.rehash() - block.vtx = [block.vtx[0]] - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - - def test_witness_input_length(self): - ''' Ensure that vin length must match vtxinwit length ''' - self.log.info("Testing witness input length") - assert(len(self.utxo)) - - witness_program = CScript([OP_DROP, OP_TRUE]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - # Create a transaction that splits our utxo into many outputs - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - nValue = self.utxo[0].nValue - for i in range(10): - tx.vout.append(CTxOut(int(nValue/10), scriptPubKey)) - tx.vout[0].nValue -= 1000 - assert(tx.vout[0].nValue >= 0) - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Try various ways to spend tx that should all break. - # This "broken" transaction serializer will not normalize - # the length of vtxinwit. - class BrokenCTransaction(CTransaction): - def serialize_with_witness(self): - flags = 0 - if not self.wit.is_null(): - flags |= 1 - r = b"" - r += struct.pack(" version 1 transactions - # are non-standard - scriptPubKey = CScript([CScriptOp(OP_1), witness_hash]) - tx2 = CTransaction() - tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] - tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)] - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] - tx2.rehash() - # Gets accepted to test_node, because standardness of outputs isn't - # checked with fRequireStandard - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True) - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False) - temp_utxo.pop() # last entry in temp_utxo was the output we just spent - temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - # Spend everything in temp_utxo back to an OP_TRUE output. - tx3 = CTransaction() - total_value = 0 - for i in temp_utxo: - tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) - tx3.wit.vtxinwit.append(CTxInWitness()) - total_value += i.nValue - tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] - tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) - tx3.rehash() - # Spending a higher version witness output is not allowed by policy, - # even with fRequireStandard=false. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False) - self.test_node.sync_with_ping() - with mininode_lock: - assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason) - - # Building a block with the transaction must be valid, however. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2, tx3]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - sync_blocks(self.nodes) - - # Add utxo to our list - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - - def test_premature_coinbase_witness_spend(self): - self.log.info("Testing premature coinbase witness spend") - block = self.build_next_block() - # Change the output of the block to be a witness output. - witness_program = CScript([OP_TRUE]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - block.vtx[0].vout[0].scriptPubKey = scriptPubKey - # This next line will rehash the coinbase and update the merkle - # root, and solve. - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - spend_tx = CTransaction() - spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] - spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] - spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] - spend_tx.rehash() - - # Now test a premature spend. - self.nodes[0].generate(98) - sync_blocks(self.nodes) - block2 = self.build_next_block() - self.update_witness_block_with_transactions(block2, [spend_tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False) - - # Advancing one more block should allow the spend. - self.nodes[0].generate(1) - block2 = self.build_next_block() - self.update_witness_block_with_transactions(block2, [spend_tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True) - sync_blocks(self.nodes) - - - def test_signature_version_1(self): - self.log.info("Testing segwit signature hash version 1") - key = CECKey() - key.set_secretbytes(b"9") - pubkey = CPubKey(key.get_pubkey()) - - witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - # First create a witness output for use in the tests. - assert(len(self.utxo)) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) - tx.rehash() - - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True) - # Mine this transaction in preparation for following tests. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - sync_blocks(self.nodes) - self.utxo.pop(0) - - # Test each hashtype - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) - for sigflag in [ 0, SIGHASH_ANYONECANPAY ]: - for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: - hashtype |= sigflag - block = self.build_next_block() - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) - tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey)) - tx.wit.vtxinwit.append(CTxInWitness()) - # Too-large input value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key) - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Too-small input value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key) - block.vtx.pop() # remove last tx - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Now try correct value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) - block.vtx.pop() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) - - # Test combinations of signature hashes. - # Split the utxo into a lot of outputs. - # Randomly choose up to 10 to spend, sign with different hashtypes, and - # output to a random number of outputs. Repeat NUM_TESTS times. - # Ensure that we've tested a situation where we use SIGHASH_SINGLE with - # an input index > number of outputs. - NUM_TESTS = 500 - temp_utxos = [] - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) - split_value = prev_utxo.nValue // NUM_TESTS - for i in range(NUM_TESTS): - tx.vout.append(CTxOut(split_value, scriptPubKey)) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) - for i in range(NUM_TESTS): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - block = self.build_next_block() - used_sighash_single_out_of_bounds = False - for i in range(NUM_TESTS): - # Ping regularly to keep the connection alive - if (not i % 100): - self.test_node.sync_with_ping() - # Choose random number of inputs to use. - num_inputs = random.randint(1, 10) - # Create a slight bias for producing more utxos - num_outputs = random.randint(1, 11) - random.shuffle(temp_utxos) - assert(len(temp_utxos) > num_inputs) - tx = CTransaction() - total_value = 0 - for i in range(num_inputs): - tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) - tx.wit.vtxinwit.append(CTxInWitness()) - total_value += temp_utxos[i].nValue - split_value = total_value // num_outputs - for i in range(num_outputs): - tx.vout.append(CTxOut(split_value, scriptPubKey)) - for i in range(num_inputs): - # Now try to sign each input, using a random hashtype. - anyonecanpay = 0 - if random.randint(0, 1): - anyonecanpay = SIGHASH_ANYONECANPAY - hashtype = random.randint(1, 3) | anyonecanpay - sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) - if (hashtype == SIGHASH_SINGLE and i >= num_outputs): - used_sighash_single_out_of_bounds = True - tx.rehash() - for i in range(num_outputs): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) - temp_utxos = temp_utxos[num_inputs:] - - block.vtx.append(tx) - - # Test the block periodically, if we're close to maxblocksize - if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - block = self.build_next_block() - - if (not used_sighash_single_out_of_bounds): - self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") - # Test the transactions we've added to the block - if (len(block.vtx) > 1): - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Now test witness version 0 P2PKH transactions - pubkeyhash = hash160(pubkey) - scriptPKH = CScript([OP_0, pubkeyhash]) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) - tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH)) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) - - script = GetP2PKHScript(pubkeyhash) - sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) - signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL - - # Check that we can't have a scriptSig - tx2.vin[0].scriptSig = CScript([signature, pubkey]) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) - - # Move the signature to the witness. - block.vtx.pop() - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] - tx2.vin[0].scriptSig = b"" - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - temp_utxos.pop(0) - - # Update self.utxos for later tests by creating two outputs - # that consolidate all the coins in temp_utxos. - output_value = sum(i.nValue for i in temp_utxos) // 2 - - tx = CTransaction() - index = 0 - # Just spend to our usual anyone-can-spend output - tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2 - for i in temp_utxos: - # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up - # the signatures as we go. - tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_ALL|SIGHASH_ANYONECANPAY, i.nValue, key) - index += 1 - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - for i in range(len(tx.vout)): - self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) - - - # Test P2SH wrapped witness programs. - def test_p2sh_witness(self, segwit_activated): - self.log.info("Testing P2SH witness transactions") - - assert(len(self.utxo)) - - # Prepare the p2sh-wrapped witness output - witness_program = CScript([OP_DROP, OP_TRUE]) - witness_hash = sha256(witness_program) - p2wsh_pubkey = CScript([OP_0, witness_hash]) - p2sh_witness_hash = hash160(p2wsh_pubkey) - scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) - scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script - - # Fund the P2SH output - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) - tx.rehash() - - # Verify mempool acceptance and block validity - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=segwit_activated) - sync_blocks(self.nodes) - - # Now test attempts to spend the output. - spend_tx = CTransaction() - spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig)) - spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) - spend_tx.rehash() - - # This transaction should not be accepted into the mempool pre- or - # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which - # will require a witness to spend a witness program regardless of - # segwit activation. Note that older bitcoind's that are not - # segwit-aware would also reject this for failing CLEANSTACK. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) - - # Try to put the witness script in the scriptSig, should also fail. - spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) - spend_tx.rehash() - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) - - # Now put the witness script in the witness, should succeed after - # segwit activates. - spend_tx.vin[0].scriptSig = scriptSig - spend_tx.rehash() - spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ] - - # Verify mempool acceptance - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=segwit_activated) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [spend_tx]) - - # If we're before activation, then sending this without witnesses - # should be valid. If we're after activation, then sending this with - # witnesses should be valid. - if segwit_activated: - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - else: - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=False) - - # Update self.utxo - self.utxo.pop(0) - self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) - - # Test the behavior of starting up a segwit-aware node after the softfork - # has activated. As segwit requires different block data than pre-segwit - # nodes would have stored, this requires special handling. - # To enable this test, pass --oldbinary= to - # the test. - def test_upgrade_after_activation(self, node_id): - self.log.info("Testing software upgrade after softfork activation") - - assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind - - # Make sure the nodes are all up - sync_blocks(self.nodes) - - # Restart with the new binary - self.stop_node(node_id) - self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"]) - connect_nodes(self.nodes[0], node_id) - - sync_blocks(self.nodes) - - # Make sure that this peer thinks segwit has activated. - assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active") - - # Make sure this peers blocks match those of node0. - height = self.nodes[node_id].getblockcount() - while height >= 0: - block_hash = self.nodes[node_id].getblockhash(height) - assert_equal(block_hash, self.nodes[0].getblockhash(height)) - assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash)) - height -= 1 - - - def test_witness_sigops(self): - '''Ensure sigop counting is correct inside witnesses.''' - self.log.info("Testing sigops limit") - - assert(len(self.utxo)) - - # Keep this under MAX_OPS_PER_SCRIPT (201) - witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF]) - witness_hash = sha256(witness_program) - scriptPubKey = CScript([OP_0, witness_hash]) - - sigops_per_script = 20*5 + 193*1 - # We'll produce 2 extra outputs, one with a program that would take us - # over max sig ops, and one with a program that would exactly reach max - # sig ops - outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 - extra_sigops_available = MAX_SIGOP_COST % sigops_per_script - - # We chose the number of checkmultisigs/checksigs to make this work: - assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT - - # This script, when spent with the first - # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, - # would push us just over the block sigop limit. - witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF]) - witness_hash_toomany = sha256(witness_program_toomany) - scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany]) - - # If we spend this script instead, we would exactly reach our sigop - # limit (for witness sigops). - witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF]) - witness_hash_justright = sha256(witness_program_justright) - scriptPubKey_justright = CScript([OP_0, witness_hash_justright]) - - # First split our available utxo into a bunch of outputs - split_value = self.utxo[0].nValue // outputs - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - for i in range(outputs): - tx.vout.append(CTxOut(split_value, scriptPubKey)) - tx.vout[-2].scriptPubKey = scriptPubKey_toomany - tx.vout[-1].scriptPubKey = scriptPubKey_justright - tx.rehash() - - block_1 = self.build_next_block() - self.update_witness_block_with_transactions(block_1, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True) - - tx2 = CTransaction() - # If we try to spend the first n-1 outputs from tx, that should be - # too many sigops. - total_value = 0 - for i in range(outputs-1): - tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ] - total_value += tx.vout[i].nValue - tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ] - tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) - tx2.rehash() - - block_2 = self.build_next_block() - self.update_witness_block_with_transactions(block_2, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False) - - # Try dropping the last input in tx2, and add an output that has - # too many sigops (contributing to legacy sigop count). - checksig_count = (extra_sigops_available // 4) + 1 - scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count) - tx2.vout.append(CTxOut(0, scriptPubKey_checksigs)) - tx2.vin.pop() - tx2.wit.vtxinwit.pop() - tx2.vout[0].nValue -= tx.vout[-2].nValue - tx2.rehash() - block_3 = self.build_next_block() - self.update_witness_block_with_transactions(block_3, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) - - # If we drop the last checksig in this output, the tx should succeed. - block_4 = self.build_next_block() - tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1)) - tx2.rehash() - self.update_witness_block_with_transactions(block_4, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True) - - # Reset the tip back down for the next test - sync_blocks(self.nodes) - for x in self.nodes: - x.invalidateblock(block_4.hash) - - # Try replacing the last input of tx2 to be spending the last - # output of tx - block_5 = self.build_next_block() - tx2.vout.pop() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b"")) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ] - tx2.rehash() - self.update_witness_block_with_transactions(block_5, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True) - - # TODO: test p2sh sigop counting - - def test_getblocktemplate_before_lockin(self): - self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)") - # Node0 is segwit aware, node2 is not. - for node in [self.nodes[0], self.nodes[2]]: - gbt_results = node.getblocktemplate() - block_version = gbt_results['version'] - # If we're not indicating segwit support, we will still be - # signalling for segwit activation. - assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0]) - # If we don't specify the segwit rule, then we won't get a default - # commitment. - assert('default_witness_commitment' not in gbt_results) - - # Workaround: - # Can either change the tip, or change the mempool and wait 5 seconds - # to trigger a recomputation of getblocktemplate. - txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) - # Using mocktime lets us avoid sleep() - sync_mempools(self.nodes) - self.nodes[0].setmocktime(int(time.time())+10) - self.nodes[2].setmocktime(int(time.time())+10) - - for node in [self.nodes[0], self.nodes[2]]: - gbt_results = node.getblocktemplate({"rules" : ["segwit"]}) - block_version = gbt_results['version'] - if node == self.nodes[2]: - # If this is a non-segwit node, we should still not get a witness - # commitment, nor a version bit signalling segwit. - assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) - assert('default_witness_commitment' not in gbt_results) - else: - # For segwit-aware nodes, check the version bit and the witness - # commitment are correct. - assert(block_version & (1 << VB_WITNESS_BIT) != 0) - assert('default_witness_commitment' in gbt_results) - witness_commitment = gbt_results['default_witness_commitment'] - - # Check that default_witness_commitment is present. - witness_root = CBlock.get_merkle_root([ser_uint256(0), - ser_uint256(txid)]) - script = get_witness_script(witness_root, 0) - assert_equal(witness_commitment, bytes_to_hex_str(script)) - - # undo mocktime - self.nodes[0].setmocktime(0) - self.nodes[2].setmocktime(0) - - # Uncompressed pubkeys are no longer supported in default relay policy, - # but (for now) are still valid in blocks. - def test_uncompressed_pubkey(self): - self.log.info("Testing uncompressed pubkeys") - # Segwit transactions using uncompressed pubkeys are not accepted - # under default policy, but should still pass consensus. - key = CECKey() - key.set_secretbytes(b"9") - key.set_compressed(False) - pubkey = CPubKey(key.get_pubkey()) - assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey - - assert(len(self.utxo) > 0) - utxo = self.utxo.pop(0) - - # Test 1: P2WPKH - # First create a P2WPKH output that uses an uncompressed pubkey - pubkeyhash = hash160(pubkey) - scriptPKH = CScript([OP_0, pubkeyhash]) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) - tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH)) - tx.rehash() - - # Confirm it in a block. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Now try to spend it. Send it to a P2WSH output, which we'll - # use in the next test. - witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) - witness_hash = sha256(witness_program) - scriptWSH = CScript([OP_0, witness_hash]) - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH)) - script = GetP2PKHScript(pubkeyhash) - sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) - signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ] - tx2.rehash() - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - # But passes consensus. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Test 2: P2WSH - # Try to spend the P2WSH output created in last test. - # Send it to a P2SH(P2WSH) output, which we'll use in the next test. - p2sh_witness_hash = hash160(scriptWSH) - scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) - scriptSig = CScript([scriptWSH]) - - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH)) - tx3.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - # But passes consensus. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx3]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Test 3: P2SH(P2WSH) - # Try to spend the P2SH output created in the last test. - # Send it to a P2PKH output, which we'll use in the next test. - scriptPubKey = GetP2PKHScript(pubkeyhash) - tx4 = CTransaction() - tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig)) - tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey)) - tx4.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx4]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - - # Test 4: Uncompressed pubkeys should still be valid in non-segwit - # transactions. - tx5 = CTransaction() - tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) - tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE]))) - (sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL) - signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL - tx5.vin[0].scriptSig = CScript([signature, pubkey]) - tx5.rehash() - # Should pass policy and consensus. - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx5]) - test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) - self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) - - def test_non_standard_witness(self): - self.log.info("Testing detection of non-standard P2WSH witness") - pad = chr(1).encode('latin-1') - - # Create scripts for tests - scripts = [] - scripts.append(CScript([OP_DROP] * 100)) - scripts.append(CScript([OP_DROP] * 99)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) - - p2wsh_scripts = [] - - assert(len(self.utxo)) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - - # For each script, generate a pair of P2WSH and P2SH-P2WSH output. - outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) - for i in scripts: - p2wsh = CScript([OP_0, sha256(i)]) - p2sh = hash160(p2wsh) - p2wsh_scripts.append(p2wsh) - tx.vout.append(CTxOut(outputvalue, p2wsh)) - tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) - tx.rehash() - txid = tx.sha256 - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) - - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - # Creating transactions for tests - p2wsh_txs = [] - p2sh_txs = [] - for i in range(len(scripts)): - p2wsh_tx = CTransaction() - p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2))) - p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) - p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) - p2wsh_tx.rehash() - p2wsh_txs.append(p2wsh_tx) - p2sh_tx = CTransaction() - p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]]))) - p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) - p2sh_tx.wit.vtxinwit.append(CTxInWitness()) - p2sh_tx.rehash() - p2sh_txs.append(p2sh_tx) - - # Testing native P2WSH - # Witness stack size, excluding witnessScript, over 100 is non-standard - p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True) - - # Stack element size over 80 bytes is non-standard - p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True) - # Standard nodes should accept if element size is not over 80 bytes - p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True) - - # witnessScript size at 3600 bytes is standard - p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True) - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True) - - # witnessScript size at 3601 bytes is non-standard - p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True) - - # Repeating the same tests with P2SH-P2WSH - p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True) - p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True) - p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True) - p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True) - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True) - p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] - test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True) - - self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node - # Valid but non-standard transactions in a block should be accepted by standard node - sync_blocks(self.nodes) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - - self.utxo.pop(0) - - - def run_test(self): - # Setup the p2p connections and start up the network thread. - # self.test_node sets NODE_WITNESS|NODE_NETWORK - self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) - # self.old_node sets only NODE_NETWORK - self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK) - # self.std_node is for testing node1 (fRequireStandard=true) - self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) - - network_thread_start() - - # Keep a place to store utxo's that can be used in later tests - self.utxo = [] - - # Test logic begins here - self.test_node.wait_for_verack() - - self.log.info("Starting tests before segwit lock in:") - - self.test_witness_services() # Verifies NODE_WITNESS - self.test_non_witness_transaction() # non-witness tx's are accepted - self.test_unnecessary_witness_before_segwit_activation() - self.test_block_relay(segwit_activated=False) - - # Advance to segwit being 'started' - self.advance_to_segwit_started() - sync_blocks(self.nodes) - self.test_getblocktemplate_before_lockin() - - sync_blocks(self.nodes) - - # At lockin, nothing should change. - self.log.info("Testing behavior post lockin, pre-activation") - self.advance_to_segwit_lockin() - - # Retest unnecessary witnesses - self.test_unnecessary_witness_before_segwit_activation() - self.test_witness_tx_relay_before_segwit_activation() - self.test_block_relay(segwit_activated=False) - self.test_p2sh_witness(segwit_activated=False) - self.test_standardness_v0(segwit_activated=False) - - sync_blocks(self.nodes) - - # Now activate segwit - self.log.info("Testing behavior after segwit activation") - self.advance_to_segwit_active() - - sync_blocks(self.nodes) - - # Test P2SH witness handling again - self.test_p2sh_witness(segwit_activated=True) - self.test_witness_commitments() - self.test_block_malleability() - self.test_witness_block_size() - self.test_submit_block() - self.test_extra_witness_data() - self.test_max_witness_push_length() - self.test_max_witness_program_length() - self.test_witness_input_length() - self.test_block_relay(segwit_activated=True) - self.test_tx_relay_after_segwit_activation() - self.test_standardness_v0(segwit_activated=True) - self.test_segwit_versions() - self.test_premature_coinbase_witness_spend() - self.test_uncompressed_pubkey() - self.test_signature_version_1() - self.test_non_standard_witness() - sync_blocks(self.nodes) - self.test_upgrade_after_activation(node_id=2) - self.test_witness_sigops() - - -if __name__ == '__main__': - SegWitTest().main() diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py deleted file mode 100755 index 6d21095cc6..0000000000 --- a/test/functional/p2p-timeouts.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various net timeouts. - -- Create three bitcoind nodes: - - no_verack_node - we never send a verack in response to their version - no_version_node - we never send a version (only a ping) - no_send_node - we never send any P2P message. - -- Start all three nodes -- Wait 1 second -- Assert that we're connected -- Send a ping to no_verack_node and no_version_node -- Wait 30 seconds -- Assert that we're still connected -- Send a ping to no_verack_node and no_version_node -- Wait 31 seconds -- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) -""" - -from time import sleep - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class TestNode(P2PInterface): - def on_version(self, message): - # Don't send a verack in response - pass - -class TimeoutsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def run_test(self): - # Setup the p2p connections and start up the network thread. - no_verack_node = self.nodes[0].add_p2p_connection(TestNode()) - no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) - no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) - - network_thread_start() - - sleep(1) - - assert no_verack_node.connected - assert no_version_node.connected - assert no_send_node.connected - - no_verack_node.send_message(msg_ping()) - no_version_node.send_message(msg_ping()) - - sleep(30) - - assert "version" in no_verack_node.last_message - - assert no_verack_node.connected - assert no_version_node.connected - assert no_send_node.connected - - no_verack_node.send_message(msg_ping()) - no_version_node.send_message(msg_ping()) - - sleep(31) - - assert not no_verack_node.connected - assert not no_version_node.connected - assert not no_send_node.connected - -if __name__ == '__main__': - TimeoutsTest().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py new file mode 100755 index 0000000000..d9f461a049 --- /dev/null +++ b/test/functional/p2p_compactblocks.py @@ -0,0 +1,921 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test compact blocks (BIP 152). + +Version 1 compact blocks are pre-segwit (txids) +Version 2 compact blocks are post-segwit (wtxids) +""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment +from test_framework.script import CScript, OP_TRUE + +# TestNode: A peer we use to send messages to bitcoind, and store responses. +class TestNode(P2PInterface): + def __init__(self): + super().__init__() + self.last_sendcmpct = [] + self.block_announced = False + # Store the hashes of blocks we've seen announced. + # This is for synchronizing the p2p message traffic, + # so we can eg wait until a particular block is announced. + self.announced_blockhashes = set() + + def on_sendcmpct(self, message): + self.last_sendcmpct.append(message) + + def on_cmpctblock(self, message): + self.block_announced = True + self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() + self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256) + + def on_headers(self, message): + self.block_announced = True + for x in self.last_message["headers"].headers: + x.calc_sha256() + self.announced_blockhashes.add(x.sha256) + + def on_inv(self, message): + for x in self.last_message["inv"].inv: + if x.type == 2: + self.block_announced = True + self.announced_blockhashes.add(x.hash) + + # Requires caller to hold mininode_lock + def received_block_announcement(self): + return self.block_announced + + def clear_block_announcement(self): + with mininode_lock: + self.block_announced = False + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) + self.last_message.pop("cmpctblock", None) + + def get_headers(self, locator, hashstop): + msg = msg_getheaders() + msg.locator.vHave = locator + msg.hashstop = hashstop + self.send_message(msg) + + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [CBlockHeader(b) for b in new_blocks] + self.send_message(headers_message) + + def request_headers_and_sync(self, locator, hashstop=0): + self.clear_block_announcement() + self.get_headers(locator, hashstop) + wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) + self.clear_block_announcement() + + # Block until a block announcement for a particular block hash is + # received. + def wait_for_block_announcement(self, block_hash, timeout=30): + def received_hash(): + return (block_hash in self.announced_blockhashes) + wait_until(received_hash, timeout=timeout, lock=mininode_lock) + + def send_await_disconnect(self, message, timeout=30): + """Sends a message to the node and wait for disconnect. + + This is used when we want to send a message into the node that we expect + will get us disconnected, eg an invalid block.""" + self.send_message(message) + wait_until(lambda: self.state != "connected", timeout=timeout, lock=mininode_lock) + +class CompactBlocksTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + # Node0 = pre-segwit, node1 = segwit-aware + self.num_nodes = 2 + # This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window). + # TODO: Rewrite this test to support SegWit being always active. + self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex", "-deprecatedrpc=addwitnessaddress"]] + self.utxos = [] + + def build_block_on_tip(self, node, segwit=False): + height = node.getblockcount() + tip = node.getbestblockhash() + mtp = node.getblockheader(tip)['mediantime'] + block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) + block.nVersion = 4 + if segwit: + add_witness_commitment(block) + block.solve() + return block + + # Create 10 more anyone-can-spend utxo's for testing. + def make_utxos(self): + # Doesn't matter which node we use, just use node0. + block = self.build_block_on_tip(self.nodes[0]) + self.test_node.send_and_ping(msg_block(block)) + assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) + self.nodes[0].generate(100) + + total_value = block.vtx[0].vout[0].nValue + out_value = total_value // 10 + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) + for i in range(10): + tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) + tx.rehash() + + block2 = self.build_block_on_tip(self.nodes[0]) + block2.vtx.append(tx) + block2.hashMerkleRoot = block2.calc_merkle_root() + block2.solve() + self.test_node.send_and_ping(msg_block(block2)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) + self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) + return + + # Test "sendcmpct" (between peers preferring the same version): + # - No compact block announcements unless sendcmpct is sent. + # - If sendcmpct is sent with version > preferred_version, the message is ignored. + # - If sendcmpct is sent with boolean 0, then block announcements are not + # made with compact blocks. + # - If sendcmpct is then sent with boolean 1, then new block announcements + # are made with compact blocks. + # If old_node is passed in, request compact blocks with version=preferred-1 + # and verify that it receives block announcements via compact block. + def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): + # Make sure we get a SENDCMPCT message from our peer + def received_sendcmpct(): + return (len(test_node.last_sendcmpct) > 0) + wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) + with mininode_lock: + # Check that the first version received is the preferred one + assert_equal(test_node.last_sendcmpct[0].version, preferred_version) + # And that we receive versions down to 1. + assert_equal(test_node.last_sendcmpct[-1].version, 1) + test_node.last_sendcmpct = [] + + tip = int(node.getbestblockhash(), 16) + + def check_announcement_of_new_block(node, peer, predicate): + peer.clear_block_announcement() + block_hash = int(node.generate(1)[0], 16) + peer.wait_for_block_announcement(block_hash, timeout=30) + assert(peer.block_announced) + + with mininode_lock: + assert predicate(peer), ( + "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( + block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) + + # We shouldn't get any block announcements via cmpctblock yet. + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) + + # Try one more time, this time after requesting headers. + test_node.request_headers_and_sync(locator=[tip]) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) + + # Test a few ways of using sendcmpct that should NOT + # result in compact block announcements. + # Before each test, sync the headers chain. + test_node.request_headers_and_sync(locator=[tip]) + + # Now try a SENDCMPCT message with too-high version + sendcmpct = msg_sendcmpct() + sendcmpct.version = preferred_version+1 + sendcmpct.announce = True + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) + + # Headers sync before next test. + test_node.request_headers_and_sync(locator=[tip]) + + # Now try a SENDCMPCT message with valid version, but announce=False + sendcmpct.version = preferred_version + sendcmpct.announce = False + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) + + # Headers sync before next test. + test_node.request_headers_and_sync(locator=[tip]) + + # Finally, try a SENDCMPCT message with announce=True + sendcmpct.version = preferred_version + sendcmpct.announce = True + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) + + # Try one more time (no headers sync should be needed!) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) + + # Try one more time, after turning on sendheaders + test_node.send_and_ping(msg_sendheaders()) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) + + # Try one more time, after sending a version-1, announce=false message. + sendcmpct.version = preferred_version-1 + sendcmpct.announce = False + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) + + # Now turn off announcements + sendcmpct.version = preferred_version + sendcmpct.announce = False + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) + + if old_node is not None: + # Verify that a peer using an older protocol version can receive + # announcements from this node. + sendcmpct.version = preferred_version-1 + sendcmpct.announce = True + old_node.send_and_ping(sendcmpct) + # Header sync + old_node.request_headers_and_sync(locator=[tip]) + check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message) + + # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last. + def test_invalid_cmpctblock_message(self): + self.nodes[0].generate(101) + block = self.build_block_on_tip(self.nodes[0]) + + cmpct_block = P2PHeaderAndShortIDs() + cmpct_block.header = CBlockHeader(block) + cmpct_block.prefilled_txn_length = 1 + # This index will be too high + prefilled_txn = PrefilledTransaction(1, block.vtx[0]) + cmpct_block.prefilled_txn = [prefilled_txn] + self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) + + # Compare the generated shortids to what we expect based on BIP 152, given + # bitcoind's choice of nonce. + def test_compactblock_construction(self, node, test_node, version, use_witness_address): + # Generate a bunch of transactions. + node.generate(101) + num_transactions = 25 + address = node.getnewaddress() + if use_witness_address: + # Want at least one segwit spend, so move all funds to + # a witness address. + address = node.addwitnessaddress(address) + value_to_send = node.getbalance() + node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1))) + node.generate(1) + + segwit_tx_generated = False + for i in range(num_transactions): + txid = node.sendtoaddress(address, 0.1) + hex_tx = node.gettransaction(txid)["hex"] + tx = FromHex(CTransaction(), hex_tx) + if not tx.wit.is_null(): + segwit_tx_generated = True + + if use_witness_address: + assert(segwit_tx_generated) # check that our test is not broken + + # Wait until we've seen the block announcement for the resulting tip + tip = int(node.getbestblockhash(), 16) + test_node.wait_for_block_announcement(tip) + + # Make sure we will receive a fast-announce compact block + self.request_cb_announcements(test_node, node, version) + + # Now mine a block, and look at the resulting compact block. + test_node.clear_block_announcement() + block_hash = int(node.generate(1)[0], 16) + + # Store the raw block in our internal format. + block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) + for tx in block.vtx: + tx.calc_sha256() + block.rehash() + + # Wait until the block was announced (via compact blocks) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + + # Now fetch and check the compact block + header_and_shortids = None + with mininode_lock: + assert("cmpctblock" in test_node.last_message) + # Convert the on-the-wire representation to absolute indexes + header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) + self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) + + # Now fetch the compact block using a normal non-announce getdata + with mininode_lock: + test_node.clear_block_announcement() + inv = CInv(4, block_hash) # 4 == "CompactBlock" + test_node.send_message(msg_getdata([inv])) + + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + + # Now fetch and check the compact block + header_and_shortids = None + with mininode_lock: + assert("cmpctblock" in test_node.last_message) + # Convert the on-the-wire representation to absolute indexes + header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) + self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) + + def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block): + # Check that we got the right block! + header_and_shortids.header.calc_sha256() + assert_equal(header_and_shortids.header.sha256, block_hash) + + # Make sure the prefilled_txn appears to have included the coinbase + assert(len(header_and_shortids.prefilled_txn) >= 1) + assert_equal(header_and_shortids.prefilled_txn[0].index, 0) + + # Check that all prefilled_txn entries match what's in the block. + for entry in header_and_shortids.prefilled_txn: + entry.tx.calc_sha256() + # This checks the non-witness parts of the tx agree + assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) + + # And this checks the witness + wtxid = entry.tx.calc_sha256(True) + if version == 2: + assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) + else: + # Shouldn't have received a witness + assert(entry.tx.wit.is_null()) + + # Check that the cmpctblock message announced all the transactions. + assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) + + # And now check that all the shortids are as expected as well. + # Determine the siphash keys to use. + [k0, k1] = header_and_shortids.get_siphash_keys() + + index = 0 + while index < len(block.vtx): + if (len(header_and_shortids.prefilled_txn) > 0 and + header_and_shortids.prefilled_txn[0].index == index): + # Already checked prefilled transactions above + header_and_shortids.prefilled_txn.pop(0) + else: + tx_hash = block.vtx[index].sha256 + if version == 2: + tx_hash = block.vtx[index].calc_sha256(True) + shortid = calculate_shortid(k0, k1, tx_hash) + assert_equal(shortid, header_and_shortids.shortids[0]) + header_and_shortids.shortids.pop(0) + index += 1 + + # Test that bitcoind requests compact blocks when we announce new blocks + # via header or inv, and that responding to getblocktxn causes the block + # to be successfully reconstructed. + # Post-segwit: upgraded nodes would only make this request of cb-version-2, + # NODE_WITNESS peers. Unupgraded nodes would still make this request of + # any cb-version-1-supporting peer. + def test_compactblock_requests(self, node, test_node, version, segwit): + # Try announcing a block with an inv or header, expect a compactblock + # request + for announce in ["inv", "header"]: + block = self.build_block_on_tip(node, segwit=segwit) + with mininode_lock: + test_node.last_message.pop("getdata", None) + + if announce == "inv": + test_node.send_message(msg_inv([CInv(2, block.sha256)])) + wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) + test_node.send_header_for_blocks([block]) + else: + test_node.send_header_for_blocks([block]) + wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) + assert_equal(len(test_node.last_message["getdata"].inv), 1) + assert_equal(test_node.last_message["getdata"].inv[0].type, 4) + assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) + + # Send back a compactblock message that omits the coinbase + comp_block = HeaderAndShortIDs() + comp_block.header = CBlockHeader(block) + comp_block.nonce = 0 + [k0, k1] = comp_block.get_siphash_keys() + coinbase_hash = block.vtx[0].sha256 + if version == 2: + coinbase_hash = block.vtx[0].calc_sha256(True) + comp_block.shortids = [ + calculate_shortid(k0, k1, coinbase_hash) ] + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) + # Expect a getblocktxn message. + with mininode_lock: + assert("getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() + assert_equal(absolute_indexes, [0]) # should be a coinbase request + + # Send the coinbase, and verify that the tip advances. + if version == 2: + msg = msg_witness_blocktxn() + else: + msg = msg_blocktxn() + msg.block_transactions.blockhash = block.sha256 + msg.block_transactions.transactions = [block.vtx[0]] + test_node.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + + # Create a chain of transactions from given utxo, and add to a new block. + def build_block_with_transactions(self, node, utxo, num_transactions): + block = self.build_block_on_tip(node) + + for i in range(num_transactions): + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) + tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) + tx.rehash() + utxo = [tx.sha256, 0, tx.vout[0].nValue] + block.vtx.append(tx) + + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + return block + + # Test that we only receive getblocktxn requests for transactions that the + # node needs, and that responding to them causes the block to be + # reconstructed. + def test_getblocktxn_requests(self, node, test_node, version): + with_witness = (version==2) + + def test_getblocktxn_response(compact_block, peer, expected_result): + msg = msg_cmpctblock(compact_block.to_p2p()) + peer.send_and_ping(msg) + with mininode_lock: + assert("getblocktxn" in peer.last_message) + absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute() + assert_equal(absolute_indexes, expected_result) + + def test_tip_after_message(node, peer, msg, tip): + peer.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), tip) + + # First try announcing compactblocks that won't reconstruct, and verify + # that we receive getblocktxn messages back. + utxo = self.utxos.pop(0) + + block = self.build_block_with_transactions(node, utxo, 5) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block, use_witness=with_witness) + + test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) + + msg_bt = msg_blocktxn() + if with_witness: + msg_bt = msg_witness_blocktxn() # serialize with witnesses + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) + + utxo = self.utxos.pop(0) + block = self.build_block_with_transactions(node, utxo, 5) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + + # Now try interspersing the prefilled transactions + comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness) + test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) + + # Now try giving one transaction ahead of time. + utxo = self.utxos.pop(0) + block = self.build_block_with_transactions(node, utxo, 5) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + test_node.send_and_ping(msg_tx(block.vtx[1])) + assert(block.vtx[1].hash in node.getrawmempool()) + + # Prefill 4 out of the 6 transactions, and verify that only the one + # that was not in the mempool is requested. + comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) + test_getblocktxn_response(comp_block, test_node, [5]) + + msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) + + # Now provide all transactions to the node before the block is + # announced and verify reconstruction happens immediately. + utxo = self.utxos.pop(0) + block = self.build_block_with_transactions(node, utxo, 10) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + for tx in block.vtx[1:]: + test_node.send_message(msg_tx(tx)) + test_node.sync_with_ping() + # Make sure all transactions were accepted. + mempool = node.getrawmempool() + for tx in block.vtx[1:]: + assert(tx.hash in mempool) + + # Clear out last request. + with mininode_lock: + test_node.last_message.pop("getblocktxn", None) + + # Send compact block + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness) + test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) + with mininode_lock: + # Shouldn't have gotten a request for any transaction + assert("getblocktxn" not in test_node.last_message) + + # Incorrectly responding to a getblocktxn shouldn't cause the block to be + # permanently failed. + def test_incorrect_blocktxn_response(self, node, test_node, version): + if (len(self.utxos) == 0): + self.make_utxos() + utxo = self.utxos.pop(0) + + block = self.build_block_with_transactions(node, utxo, 10) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + # Relay the first 5 transactions from the block in advance + for tx in block.vtx[1:6]: + test_node.send_message(msg_tx(tx)) + test_node.sync_with_ping() + # Make sure all transactions were accepted. + mempool = node.getrawmempool() + for tx in block.vtx[1:6]: + assert(tx.hash in mempool) + + # Send compact block + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + absolute_indexes = [] + with mininode_lock: + assert("getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() + assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) + + # Now give an incorrect response. + # Note that it's possible for bitcoind to be smart enough to know we're + # lying, since it could check to see if the shortid matches what we're + # sending, and eg disconnect us for misbehavior. If that behavior + # change were made, we could just modify this test by having a + # different peer provide the block further down, so that we're still + # verifying that the block isn't marked bad permanently. This is good + # enough for now. + msg = msg_blocktxn() + if version==2: + msg = msg_witness_blocktxn() + msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) + test_node.send_and_ping(msg) + + # Tip should not have updated + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) + + # We should receive a getdata request + wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) + assert_equal(len(test_node.last_message["getdata"].inv), 1) + assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG) + assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) + + # Deliver the block + if version==2: + test_node.send_and_ping(msg_witness_block(block)) + else: + test_node.send_and_ping(msg_block(block)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + + def test_getblocktxn_handler(self, node, test_node, version): + # bitcoind will not send blocktxn responses for blocks whose height is + # more than 10 blocks deep. + MAX_GETBLOCKTXN_DEPTH = 10 + chain_height = node.getblockcount() + current_height = chain_height + while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): + block_hash = node.getblockhash(current_height) + block = FromHex(CBlock(), node.getblock(block_hash, False)) + + msg = msg_getblocktxn() + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) + num_to_request = random.randint(1, len(block.vtx)) + msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) + test_node.send_message(msg) + wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) + + [tx.calc_sha256() for tx in block.vtx] + with mininode_lock: + assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16)) + all_indices = msg.block_txn_request.to_absolute() + for index in all_indices: + tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0) + tx.calc_sha256() + assert_equal(tx.sha256, block.vtx[index].sha256) + if version == 1: + # Witnesses should have been stripped + assert(tx.wit.is_null()) + else: + # Check that the witness matches + assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) + test_node.last_message.pop("blocktxn", None) + current_height -= 1 + + # Next request should send a full block response, as we're past the + # allowed depth for a blocktxn response. + block_hash = node.getblockhash(current_height) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) + with mininode_lock: + test_node.last_message.pop("block", None) + test_node.last_message.pop("blocktxn", None) + test_node.send_and_ping(msg) + with mininode_lock: + test_node.last_message["block"].block.calc_sha256() + assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) + assert "blocktxn" not in test_node.last_message + + def test_compactblocks_not_at_tip(self, node, test_node): + # Test that requesting old compactblocks doesn't work. + MAX_CMPCTBLOCK_DEPTH = 5 + new_blocks = [] + for i in range(MAX_CMPCTBLOCK_DEPTH + 1): + test_node.clear_block_announcement() + new_blocks.append(node.generate(1)[0]) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + + test_node.clear_block_announcement() + test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) + + test_node.clear_block_announcement() + node.generate(1) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + test_node.clear_block_announcement() + with mininode_lock: + test_node.last_message.pop("block", None) + test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) + wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) + with mininode_lock: + test_node.last_message["block"].block.calc_sha256() + assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) + + # Generate an old compactblock, and verify that it's not accepted. + cur_height = node.getblockcount() + hashPrevBlock = int(node.getblockhash(cur_height-5), 16) + block = self.build_block_on_tip(node) + block.hashPrevBlock = hashPrevBlock + block.solve() + + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + + tips = node.getchaintips() + found = False + for x in tips: + if x["hash"] == block.hash: + assert_equal(x["status"], "headers-only") + found = True + break + assert(found) + + # Requesting this block via getblocktxn should silently fail + # (to avoid fingerprinting attacks). + msg = msg_getblocktxn() + msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) + with mininode_lock: + test_node.last_message.pop("blocktxn", None) + test_node.send_and_ping(msg) + with mininode_lock: + assert "blocktxn" not in test_node.last_message + + def activate_segwit(self, node): + node.generate(144*3) + assert_equal(get_bip9_status(node, "segwit")["status"], 'active') + + def test_end_to_end_block_relay(self, node, listeners): + utxo = self.utxos.pop(0) + + block = self.build_block_with_transactions(node, utxo, 10) + + [l.clear_block_announcement() for l in listeners] + + # ToHex() won't serialize with witness, but this block has no witnesses + # anyway. TODO: repeat this test with witness tx's to a segwit node. + node.submitblock(ToHex(block)) + + for l in listeners: + wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) + with mininode_lock: + for l in listeners: + assert "cmpctblock" in l.last_message + l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() + assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) + + # Test that we don't get disconnected if we relay a compact block with valid header, + # but invalid transactions. + def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit): + assert(len(self.utxos)) + utxo = self.utxos[0] + + block = self.build_block_with_transactions(node, utxo, 5) + del block.vtx[3] + block.hashMerkleRoot = block.calc_merkle_root() + if use_segwit: + # If we're testing with segwit, also drop the coinbase witness, + # but include the witness commitment. + add_witness_commitment(block) + block.vtx[0].wit.vtxinwit = [] + block.solve() + + # Now send the compact block with all transactions prefilled, and + # verify that we don't get disconnected. + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit) + msg = msg_cmpctblock(comp_block.to_p2p()) + test_node.send_and_ping(msg) + + # Check that the tip didn't advance + assert(int(node.getbestblockhash(), 16) is not block.sha256) + test_node.sync_with_ping() + + # Helper for enabling cb announcements + # Send the sendcmpct request and sync headers + def request_cb_announcements(self, peer, node, version): + tip = node.getbestblockhash() + peer.get_headers(locator=[int(tip, 16)], hashstop=0) + + msg = msg_sendcmpct() + msg.version = version + msg.announce = True + peer.send_and_ping(msg) + + def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): + assert(len(self.utxos)) + + def announce_cmpct_block(node, peer): + utxo = self.utxos.pop(0) + block = self.build_block_with_transactions(node, utxo, 5) + + cmpct_block = HeaderAndShortIDs() + cmpct_block.initialize_from_block(block) + msg = msg_cmpctblock(cmpct_block.to_p2p()) + peer.send_and_ping(msg) + with mininode_lock: + assert "getblocktxn" in peer.last_message + return block, cmpct_block + + block, cmpct_block = announce_cmpct_block(node, stalling_peer) + + for tx in block.vtx[1:]: + delivery_peer.send_message(msg_tx(tx)) + delivery_peer.sync_with_ping() + mempool = node.getrawmempool() + for tx in block.vtx[1:]: + assert(tx.hash in mempool) + + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + + # Now test that delivering an invalid compact block won't break relay + + block, cmpct_block = announce_cmpct_block(node, stalling_peer) + for tx in block.vtx[1:]: + delivery_peer.send_message(msg_tx(tx)) + delivery_peer.sync_with_ping() + + cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ] + cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] + + cmpct_block.use_witness = True + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + assert(int(node.getbestblockhash(), 16) != block.sha256) + + msg = msg_blocktxn() + msg.block_transactions.blockhash = block.sha256 + msg.block_transactions.transactions = block.vtx[1:] + stalling_peer.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + + def run_test(self): + # Setup the p2p connections and start up the network thread. + self.test_node = self.nodes[0].add_p2p_connection(TestNode()) + self.segwit_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) + self.old_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK) + + network_thread_start() + + self.test_node.wait_for_verack() + + # We will need UTXOs to construct transactions in later tests. + self.make_utxos() + + self.log.info("Running tests, pre-segwit activation:") + + self.log.info("Testing SENDCMPCT p2p message... ") + self.test_sendcmpct(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node) + sync_blocks(self.nodes) + + self.log.info("Testing compactblock construction...") + self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False) + sync_blocks(self.nodes) + self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False) + sync_blocks(self.nodes) + + self.log.info("Testing compactblock requests... ") + self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False) + sync_blocks(self.nodes) + self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False) + sync_blocks(self.nodes) + + self.log.info("Testing getblocktxn requests...") + self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + self.log.info("Testing getblocktxn handler...") + self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) + self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) + sync_blocks(self.nodes) + + self.log.info("Testing compactblock requests/announcements not at chain tip...") + self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) + sync_blocks(self.nodes) + self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node) + self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) + sync_blocks(self.nodes) + + self.log.info("Testing handling of incorrect blocktxn responses...") + self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + # End-to-end block relay tests + self.log.info("Testing end-to-end block relay...") + self.request_cb_announcements(self.test_node, self.nodes[0], 1) + self.request_cb_announcements(self.old_node, self.nodes[1], 1) + self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) + self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node]) + self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) + + self.log.info("Testing handling of invalid compact blocks...") + self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) + self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False) + self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False) + + self.log.info("Testing reconstructing compact blocks from all peers...") + self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node) + sync_blocks(self.nodes) + + # Advance to segwit activation + self.log.info("Advancing to segwit activation") + self.activate_segwit(self.nodes[1]) + self.log.info("Running tests, post-segwit activation...") + + self.log.info("Testing compactblock construction...") + self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True) + self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True) + sync_blocks(self.nodes) + + self.log.info("Testing compactblock requests (unupgraded node)... ") + self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True) + + self.log.info("Testing getblocktxn requests (unupgraded node)...") + self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) + + # Need to manually sync node0 and node1, because post-segwit activation, + # node1 will not download blocks from node0. + self.log.info("Syncing nodes...") + assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) + while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()): + block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1) + self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False)) + assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) + + self.log.info("Testing compactblock requests (segwit node)... ") + self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True) + + self.log.info("Testing getblocktxn requests (segwit node)...") + self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...") + self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) + self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) + + # Test that if we submitblock to node1, we'll get a compact block + # announcement to all peers. + # (Post-segwit activation, blocks won't propagate from node0 to node1 + # automatically, so don't bother testing a block announced to node0.) + self.log.info("Testing end-to-end block relay...") + self.request_cb_announcements(self.test_node, self.nodes[0], 1) + self.request_cb_announcements(self.old_node, self.nodes[1], 1) + self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) + self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) + + self.log.info("Testing handling of invalid compact blocks...") + self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) + self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True) + self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True) + + self.log.info("Testing invalid index in cmpctblock message...") + self.test_invalid_cmpctblock_message() + + +if __name__ == '__main__': + CompactBlocksTest().main() diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py new file mode 100755 index 0000000000..c6067befb2 --- /dev/null +++ b/test/functional/p2p_disconnect_ban.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node disconnect and ban behavior""" +import time + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + connect_nodes_bi, + wait_until, +) + +class DisconnectBanTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + + def run_test(self): + self.log.info("Test setban and listbanned RPCs") + + self.log.info("setban: successfully ban single IP address") + assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point + self.nodes[1].setban("127.0.0.1", "add") + wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) + assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point + assert_equal(len(self.nodes[1].listbanned()), 1) + + self.log.info("clearbanned: successfully clear ban list") + self.nodes[1].clearbanned() + assert_equal(len(self.nodes[1].listbanned()), 0) + self.nodes[1].setban("127.0.0.0/24", "add") + + self.log.info("setban: fail to ban an already banned subnet") + assert_equal(len(self.nodes[1].listbanned()), 1) + assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") + + self.log.info("setban: fail to ban an invalid subnet") + assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") + assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24 + + self.log.info("setban remove: fail to unban a non-banned subnet") + assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") + assert_equal(len(self.nodes[1].listbanned()), 1) + + self.log.info("setban remove: successfully unban subnet") + self.nodes[1].setban("127.0.0.0/24", "remove") + assert_equal(len(self.nodes[1].listbanned()), 0) + self.nodes[1].clearbanned() + assert_equal(len(self.nodes[1].listbanned()), 0) + + self.log.info("setban: test persistence across node restart") + self.nodes[1].setban("127.0.0.0/32", "add") + self.nodes[1].setban("127.0.0.0/24", "add") + # Set the mocktime so we can control when bans expire + old_time = int(time.time()) + self.nodes[1].setmocktime(old_time) + self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds + self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds + listBeforeShutdown = self.nodes[1].listbanned() + assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) + # Move time forward by 3 seconds so the third ban has expired + self.nodes[1].setmocktime(old_time + 3) + assert_equal(len(self.nodes[1].listbanned()), 3) + + self.stop_node(1) + self.start_node(1) + + listAfterShutdown = self.nodes[1].listbanned() + assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) + assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) + assert_equal("/19" in listAfterShutdown[2]['address'], True) + + # Clear ban lists + self.nodes[1].clearbanned() + connect_nodes_bi(self.nodes, 0, 1) + + self.log.info("Test disconnectnode RPCs") + + self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid") + address1 = self.nodes[0].getpeerinfo()[0]['addr'] + node1 = self.nodes[0].getpeerinfo()[0]['addr'] + assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) + + self.log.info("disconnectnode: fail to disconnect when calling with junk address") + assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street") + + self.log.info("disconnectnode: successfully disconnect node by address") + address1 = self.nodes[0].getpeerinfo()[0]['addr'] + self.nodes[0].disconnectnode(address=address1) + wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] + + self.log.info("disconnectnode: successfully reconnect node") + connect_nodes_bi(self.nodes, 0, 1) # reconnect the node + assert_equal(len(self.nodes[0].getpeerinfo()), 2) + assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] + + self.log.info("disconnectnode: successfully disconnect node by node id") + id1 = self.nodes[0].getpeerinfo()[0]['id'] + self.nodes[0].disconnectnode(nodeid=id1) + wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) + assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1] + +if __name__ == '__main__': + DisconnectBanTest().main() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py new file mode 100755 index 0000000000..47d9c55160 --- /dev/null +++ b/test/functional/p2p_feefilter.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test processing of feefilter messages.""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import time + + +def hashToHex(hash): + return format(hash, '064x') + +# Wait up to 60 secs to see if the testnode has received all the expected invs +def allInvsMatch(invsExpected, testnode): + for x in range(60): + with mininode_lock: + if (sorted(invsExpected) == sorted(testnode.txinvs)): + return True + time.sleep(1) + return False + +class TestNode(P2PInterface): + def __init__(self): + super().__init__() + self.txinvs = [] + + def on_inv(self, message): + for i in message.inv: + if (i.type == 1): + self.txinvs.append(hashToHex(i.hash)) + + def clear_invs(self): + with mininode_lock: + self.txinvs = [] + +class FeeFilterTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + + def run_test(self): + node1 = self.nodes[1] + node0 = self.nodes[0] + # Get out of IBD + node1.generate(1) + sync_blocks(self.nodes) + + # Setup the p2p connections and start up the network thread. + self.nodes[0].add_p2p_connection(TestNode()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + # Test that invs are received for all txs at feerate of 20 sat/byte + node1.settxfee(Decimal("0.00020000")) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() + + # Set a filter of 15 sat/byte + self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) + + # Test that txs are still being received (paying 20 sat/byte) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() + + # Change tx fee rate to 10 sat/byte and test they are no longer received + node1.settxfee(Decimal("0.00010000")) + [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + sync_mempools(self.nodes) # must be sure node 0 has received all txs + + # Send one transaction from node0 that should be received, so that we + # we can sync the test on receipt (if node1's txs were relayed, they'd + # be received by the time this node0 tx is received). This is + # unfortunately reliant on the current relay behavior where we batch up + # to 35 entries in an inv, which means that when this next transaction + # is eligible for relay, the prior transactions from node1 are eligible + # as well. + node0.settxfee(Decimal("0.00020000")) + txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() + + # Remove fee filter and check that txs are received again + self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + assert(allInvsMatch(txids, self.nodes[0].p2p)) + self.nodes[0].p2p.clear_invs() + +if __name__ == '__main__': + FeeFilterTest().main() diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py new file mode 100755 index 0000000000..93ef73e25e --- /dev/null +++ b/test/functional/p2p_fingerprint.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various fingerprinting protections. + +If an stale block more than a month old or its header are requested by a peer, +the node should pretend that it does not have it to avoid fingerprinting. +""" + +import time + +from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.mininode import ( + CInv, + P2PInterface, + msg_headers, + msg_block, + msg_getdata, + msg_getheaders, + network_thread_start, + wait_until, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, +) + +class P2PFingerprintTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + # Build a chain of blocks on top of given one + def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time): + blocks = [] + for _ in range(nblocks): + coinbase = create_coinbase(prev_height + 1) + block_time = prev_median_time + 1 + block = create_block(int(prev_hash, 16), coinbase, block_time) + block.solve() + + blocks.append(block) + prev_hash = block.hash + prev_height += 1 + prev_median_time = block_time + return blocks + + # Send a getdata request for a given block hash + def send_block_request(self, block_hash, node): + msg = msg_getdata() + msg.inv.append(CInv(2, block_hash)) # 2 == "Block" + node.send_message(msg) + + # Send a getheaders request for a given single block hash + def send_header_request(self, block_hash, node): + msg = msg_getheaders() + msg.hashstop = block_hash + node.send_message(msg) + + # Check whether last block received from node has a given hash + def last_block_equals(self, expected_hash, node): + block_msg = node.last_message.get("block") + return block_msg and block_msg.block.rehash() == expected_hash + + # Check whether last block header received from node has a given hash + def last_header_equals(self, expected_hash, node): + headers_msg = node.last_message.get("headers") + return (headers_msg and + headers_msg.headers and + headers_msg.headers[0].rehash() == expected_hash) + + # Checks that stale blocks timestamped more than a month ago are not served + # by the node while recent stale blocks and old active chain blocks are. + # This does not currently test that stale blocks timestamped within the + # last month but that have over a month's worth of work are also withheld. + def run_test(self): + node0 = self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + node0.wait_for_verack() + + # Set node time to 60 days ago + self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) + + # Generating a chain of 10 blocks + block_hashes = self.nodes[0].generate(nblocks=10) + + # Create longer chain starting 2 blocks before current tip + height = len(block_hashes) - 2 + block_hash = block_hashes[height - 1] + block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 + new_blocks = self.build_chain(5, block_hash, height, block_time) + + # Force reorg to a longer chain + node0.send_message(msg_headers(new_blocks)) + node0.wait_for_getdata() + for block in new_blocks: + node0.send_and_ping(msg_block(block)) + + # Check that reorg succeeded + assert_equal(self.nodes[0].getblockcount(), 13) + + stale_hash = int(block_hashes[-1], 16) + + # Check that getdata request for stale block succeeds + self.send_block_request(stale_hash, node0) + test_function = lambda: self.last_block_equals(stale_hash, node0) + wait_until(test_function, timeout=3) + + # Check that getheader request for stale block header succeeds + self.send_header_request(stale_hash, node0) + test_function = lambda: self.last_header_equals(stale_hash, node0) + wait_until(test_function, timeout=3) + + # Longest chain is extended so stale is much older than chain tip + self.nodes[0].setmocktime(0) + tip = self.nodes[0].generate(nblocks=1)[0] + assert_equal(self.nodes[0].getblockcount(), 14) + + # Send getdata & getheaders to refresh last received getheader message + block_hash = int(tip, 16) + self.send_block_request(block_hash, node0) + self.send_header_request(block_hash, node0) + node0.sync_with_ping() + + # Request for very old stale block should now fail + self.send_block_request(stale_hash, node0) + time.sleep(3) + assert not self.last_block_equals(stale_hash, node0) + + # Request for very old stale block header should now fail + self.send_header_request(stale_hash, node0) + time.sleep(3) + assert not self.last_header_equals(stale_hash, node0) + + # Verify we can fetch very old blocks and headers on the active chain + block_hash = int(block_hashes[2], 16) + self.send_block_request(block_hash, node0) + self.send_header_request(block_hash, node0) + node0.sync_with_ping() + + self.send_block_request(block_hash, node0) + test_function = lambda: self.last_block_equals(block_hash, node0) + wait_until(test_function, timeout=3) + + self.send_header_request(block_hash, node0) + test_function = lambda: self.last_header_equals(block_hash, node0) + wait_until(test_function, timeout=3) + +if __name__ == '__main__': + P2PFingerprintTest().main() diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py new file mode 100755 index 0000000000..edcade63c1 --- /dev/null +++ b/test/functional/p2p_invalid_block.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node responses to invalid blocks. + +In this test we connect to one node over p2p, and test block requests: +1) Valid blocks should be requested and become chain tip. +2) Invalid block with duplicated transaction should be re-requested. +3) Invalid block with bad coinbase value should be rejected and not +re-requested. +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +from test_framework.mininode import network_thread_start +import copy +import time + +# Use the ComparisonTestFramework with 1 node: only use --testbinary. +class InvalidBlockRequestTest(ComparisonTestFramework): + + ''' Can either run this test as 1 node with expected answers, or two and compare them. + Change the "outcome" variable from each TestInstance object to only do the comparison. ''' + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + test = TestManager(self, self.options.tmpdir) + test.add_all_connections(self.nodes) + self.tip = None + self.block_time = None + network_thread_start() + test.run() + + def get_tests(self): + if self.tip is None: + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.block_time = int(time.time())+1 + + ''' + Create a new block with an anyone-can-spend coinbase + ''' + height = 1 + block = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block.solve() + # Save the coinbase for later + self.block1 = block + self.tip = block.sha256 + height += 1 + yield TestInstance([[block, True]]) + + ''' + Now we need that block to mature so we can spend the coinbase. + ''' + test = TestInstance(sync_every_block=False) + for i in range(100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.solve() + self.tip = block.sha256 + self.block_time += 1 + test.blocks_and_transactions.append([block, True]) + height += 1 + yield test + + ''' + Now we use merkle-root malleability to generate an invalid block with + same blockheader. + Manufacture a block with 3 transactions (coinbase, spend of prior + coinbase, spend of that spend). Duplicate the 3rd transaction to + leave merkle root and blockheader unchanged but invalidate the block. + ''' + block2 = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + + # b'0x51' is OP_TRUE + tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN) + tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN) + + block2.vtx.extend([tx1, tx2]) + block2.hashMerkleRoot = block2.calc_merkle_root() + block2.rehash() + block2.solve() + orig_hash = block2.sha256 + block2_orig = copy.deepcopy(block2) + + # Mutate block 2 + block2.vtx.append(tx2) + assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) + assert_equal(orig_hash, block2.rehash()) + assert(block2_orig.vtx != block2.vtx) + + self.tip = block2.sha256 + yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]]) + height += 1 + + ''' + Make sure that a totally screwed up block is not valid. + ''' + block3 = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! + block3.vtx[0].sha256=None + block3.vtx[0].calc_sha256() + block3.hashMerkleRoot = block3.calc_merkle_root() + block3.rehash() + block3.solve() + + yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]]) + + +if __name__ == '__main__': + InvalidBlockRequestTest().main() diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py new file mode 100755 index 0000000000..9c1100e070 --- /dev/null +++ b/test/functional/p2p_invalid_tx.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node responses to invalid transactions. + +In this test we connect to one node over p2p, and test tx requests. +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time + + + +# Use the ComparisonTestFramework with 1 node: only use --testbinary. +class InvalidTxRequestTest(ComparisonTestFramework): + + ''' Can either run this test as 1 node with expected answers, or two and compare them. + Change the "outcome" variable from each TestInstance object to only do the comparison. ''' + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + test = TestManager(self, self.options.tmpdir) + test.add_all_connections(self.nodes) + self.tip = None + self.block_time = None + network_thread_start() + test.run() + + def get_tests(self): + if self.tip is None: + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.block_time = int(time.time())+1 + + ''' + Create a new block with an anyone-can-spend coinbase + ''' + height = 1 + block = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block.solve() + # Save the coinbase for later + self.block1 = block + self.tip = block.sha256 + height += 1 + yield TestInstance([[block, True]]) + + ''' + Now we need that block to mature so we can spend the coinbase. + ''' + test = TestInstance(sync_every_block=False) + for i in range(100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.solve() + self.tip = block.sha256 + self.block_time += 1 + test.blocks_and_transactions.append([block, True]) + height += 1 + yield test + + # b'\x64' is OP_NOTIF + # Transaction will be rejected with code 16 (REJECT_INVALID) + tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000) + yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]]) + + # TODO: test further transactions... + +if __name__ == '__main__': + InvalidTxRequestTest().main() diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py new file mode 100755 index 0000000000..ce4e6e9144 --- /dev/null +++ b/test/functional/p2p_leak.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test message sending before handshake completion. + +A node should never send anything other than VERSION/VERACK/REJECT until it's +received a VERACK. + +This test connects to a node and sends it a few messages, trying to intice it +into sending us something it shouldn't. + +Also test that nodes that send unsupported service bits to bitcoind are disconnected +and don't receive a VERACK. Unsupported service bits are currently 1 << 5 and +1 << 7 (until August 1st 2018).""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +banscore = 10 + +class CLazyNode(P2PInterface): + def __init__(self): + super().__init__() + self.unexpected_msg = False + self.ever_connected = False + + def bad_message(self, message): + self.unexpected_msg = True + self.log.info("should not have received message: %s" % message.command) + + def on_open(self): + self.ever_connected = True + + def on_version(self, message): self.bad_message(message) + def on_verack(self, message): self.bad_message(message) + def on_reject(self, message): self.bad_message(message) + def on_inv(self, message): self.bad_message(message) + def on_addr(self, message): self.bad_message(message) + def on_getdata(self, message): self.bad_message(message) + def on_getblocks(self, message): self.bad_message(message) + def on_tx(self, message): self.bad_message(message) + def on_block(self, message): self.bad_message(message) + def on_getaddr(self, message): self.bad_message(message) + def on_headers(self, message): self.bad_message(message) + def on_getheaders(self, message): self.bad_message(message) + def on_ping(self, message): self.bad_message(message) + def on_mempool(self, message): self.bad_message(message) + def on_pong(self, message): self.bad_message(message) + def on_feefilter(self, message): self.bad_message(message) + def on_sendheaders(self, message): self.bad_message(message) + def on_sendcmpct(self, message): self.bad_message(message) + def on_cmpctblock(self, message): self.bad_message(message) + def on_getblocktxn(self, message): self.bad_message(message) + def on_blocktxn(self, message): self.bad_message(message) + +# Node that never sends a version. We'll use this to send a bunch of messages +# anyway, and eventually get disconnected. +class CNodeNoVersionBan(CLazyNode): + # send a bunch of veracks without sending a message. This should get us disconnected. + # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes + def on_open(self): + super().on_open() + for i in range(banscore): + self.send_message(msg_verack()) + + def on_reject(self, message): pass + +# Node that never sends a version. This one just sits idle and hopes to receive +# any message (it shouldn't!) +class CNodeNoVersionIdle(CLazyNode): + def __init__(self): + super().__init__() + +# Node that sends a version but not a verack. +class CNodeNoVerackIdle(CLazyNode): + def __init__(self): + self.version_received = False + super().__init__() + + def on_reject(self, message): pass + def on_verack(self, message): pass + # When version is received, don't reply with a verack. Instead, see if the + # node will give us a message that it shouldn't. This is not an exhaustive + # list! + def on_version(self, message): + self.version_received = True + self.send_message(msg_ping()) + self.send_message(msg_getaddr()) + +class P2PLeakTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-banscore='+str(banscore)]] + + def run_test(self): + self.nodes[0].setmocktime(1501545600) # August 1st 2017 + + no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False) + no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False) + no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle()) + unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) + unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) + + network_thread_start() + + wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) + wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) + wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) + wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10, lock=mininode_lock) + wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10, lock=mininode_lock) + + # Mine a block and make sure that it's not sent to the connected nodes + self.nodes[0].generate(1) + + #Give the node enough time to possibly leak out a message + time.sleep(5) + + #This node should have been banned + assert no_version_bannode.state != "connected" + + # These nodes should have been disconnected + assert unsupported_service_bit5_node.state != "connected" + assert unsupported_service_bit7_node.state != "connected" + + self.nodes[0].disconnect_p2ps() + + # Wait until all connections are closed and the network thread has terminated + wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) + network_thread_join() + + # Make sure no unexpected messages came in + assert(no_version_bannode.unexpected_msg == False) + assert(no_version_idlenode.unexpected_msg == False) + assert(no_verack_idlenode.unexpected_msg == False) + assert not unsupported_service_bit5_node.unexpected_msg + assert not unsupported_service_bit7_node.unexpected_msg + + self.log.info("Service bits 5 and 7 are allowed after August 1st 2018") + self.nodes[0].setmocktime(1533168000) # August 2nd 2018 + + allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5) + allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7) + + # Network thread stopped when all previous P2PInterfaces disconnected. Restart it + network_thread_start() + + wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock) + wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock) + +if __name__ == '__main__': + P2PLeakTest().main() diff --git a/test/functional/p2p_mempool.py b/test/functional/p2p_mempool.py new file mode 100755 index 0000000000..485a8af3d0 --- /dev/null +++ b/test/functional/p2p_mempool.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test p2p mempool message. + +Test that nodes are disconnected if they send mempool messages when bloom +filters are not enabled. +""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class P2PMempoolTests(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-peerbloomfilters=0"]] + + def run_test(self): + # Add a p2p connection + self.nodes[0].add_p2p_connection(P2PInterface()) + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + #request mempool + self.nodes[0].p2p.send_message(msg_mempool()) + self.nodes[0].p2p.wait_for_disconnect() + + #mininode must be disconnected at this point + assert_equal(len(self.nodes[0].getpeerinfo()), 0) + +if __name__ == '__main__': + P2PMempoolTests().main() diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py new file mode 100755 index 0000000000..70415e0168 --- /dev/null +++ b/test/functional/p2p_node_network_limited.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Tests NODE_NETWORK_LIMITED. + +Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly +and that it responds to getdata requests for blocks correctly: + - send a block within 288 + 2 of the tip + - disconnect peers who request blocks older than that.""" +from test_framework.messages import CInv, msg_getdata +from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS, NetworkThread, P2PInterface +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class P2PIgnoreInv(P2PInterface): + def on_inv(self, message): + # The node will send us invs for other blocks. Ignore them. + pass + + def send_getdata_for_block(self, blockhash): + getdata_request = msg_getdata() + getdata_request.inv.append(CInv(2, int(blockhash, 16))) + self.send_message(getdata_request) + +class NodeNetworkLimitedTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [['-prune=550']] + + def run_test(self): + node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) + NetworkThread().start() + node.wait_for_verack() + + expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED + + self.log.info("Check that node has signalled expected services.") + assert_equal(node.nServices, expected_services) + + self.log.info("Check that the localservices is as expected.") + assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services) + + self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") + blocks = self.nodes[0].generate(292) + + self.log.info("Make sure we can max retrive block at tip-288.") + node.send_getdata_for_block(blocks[1]) # last block in valid range + node.wait_for_block(int(blocks[1], 16), timeout=3) + + self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).") + node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit + node.wait_for_disconnect(5) + +if __name__ == '__main__': + NodeNetworkLimitedTest().main() diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py new file mode 100755 index 0000000000..20e4805df0 --- /dev/null +++ b/test/functional/p2p_segwit.py @@ -0,0 +1,1952 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test segwit transactions and blocks on P2P network.""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.script import * +from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER +from test_framework.key import CECKey, CPubKey +import time +import random +from binascii import hexlify + +# The versionbit bit used to signal activation of SegWit +VB_WITNESS_BIT = 1 +VB_PERIOD = 144 +VB_TOP_BITS = 0x20000000 + +MAX_SIGOP_COST = 80000 + + +# Calculate the virtual size of a witness block: +# (base + witness/4) +def get_virtual_size(witness_block): + base_size = len(witness_block.serialize(with_witness=False)) + total_size = len(witness_block.serialize(with_witness=True)) + # the "+3" is so we round up + vsize = int((3*base_size + total_size + 3)/4) + return vsize + +def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None): + """Send a transaction to the node and check that it's accepted to the mempool + + - Submit the transaction over the p2p interface + - use the getrawmempool rpc to check for acceptance.""" + tx_message = msg_tx(tx) + if with_witness: + tx_message = msg_witness_tx(tx) + p2p.send_message(tx_message) + p2p.sync_with_ping() + assert_equal(tx.hash in rpc.getrawmempool(), accepted) + if (reason != None and not accepted): + # Check the rejection reason as well. + with mininode_lock: + assert_equal(p2p.last_message["reject"].reason, reason) + +def test_witness_block(rpc, p2p, block, accepted, with_witness=True): + """Send a block to the node and check that it's accepted + + - Submit the block over the p2p interface + - use the getbestblockhash rpc to check for acceptance.""" + if with_witness: + p2p.send_message(msg_witness_block(block)) + else: + p2p.send_message(msg_block(block)) + p2p.sync_with_ping() + assert_equal(rpc.getbestblockhash() == block.hash, accepted) + +class TestNode(P2PInterface): + def __init__(self): + super().__init__() + self.getdataset = set() + + def on_getdata(self, message): + for inv in message.inv: + self.getdataset.add(inv.hash) + + def announce_tx_and_wait_for_getdata(self, tx, timeout=60): + with mininode_lock: + self.last_message.pop("getdata", None) + self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) + self.wait_for_getdata(timeout) + + def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): + with mininode_lock: + self.last_message.pop("getdata", None) + self.last_message.pop("getheaders", None) + msg = msg_headers() + msg.headers = [ CBlockHeader(block) ] + if use_header: + self.send_message(msg) + else: + self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) + self.wait_for_getheaders() + self.send_message(msg) + self.wait_for_getdata() + + def request_block(self, blockhash, inv_type, timeout=60): + with mininode_lock: + self.last_message.pop("block", None) + self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) + self.wait_for_block(blockhash, timeout) + return self.last_message["block"].block + +# Used to keep track of anyone-can-spend outputs that we can use in the tests +class UTXO(): + def __init__(self, sha256, n, nValue): + self.sha256 = sha256 + self.n = n + self.nValue = nValue + +# Helper for getting the script associated with a P2PKH +def GetP2PKHScript(pubkeyhash): + return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) + +# Add signature for a P2PK witness program. +def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key): + tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value) + signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1') + txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script] + txTo.rehash() + + +class SegWitTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. + self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]] + + def setup_network(self): + self.setup_nodes() + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[0], 2) + self.sync_all() + + ''' Helpers ''' + # Build a block on top of node0's tip. + def build_next_block(self, nVersion=4): + tip = self.nodes[0].getbestblockhash() + height = self.nodes[0].getblockcount() + 1 + block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 + block = create_block(int(tip, 16), create_coinbase(height), block_time) + block.nVersion = nVersion + block.rehash() + return block + + # Adds list of transactions to block, adds witness commitment, then solves. + def update_witness_block_with_transactions(self, block, tx_list, nonce=0): + block.vtx.extend(tx_list) + add_witness_commitment(block, nonce) + block.solve() + return + + ''' Individual tests ''' + def test_witness_services(self): + self.log.info("Verifying NODE_WITNESS service bit") + assert((self.test_node.nServices & NODE_WITNESS) != 0) + + + # See if sending a regular transaction works, and create a utxo + # to use in later tests. + def test_non_witness_transaction(self): + # Mine a block with an anyone-can-spend coinbase, + # let it mature, then try to spend it. + self.log.info("Testing non-witness transaction") + block = self.build_next_block(nVersion=1) + block.solve() + self.test_node.send_message(msg_block(block)) + self.test_node.sync_with_ping() # make sure the block was processed + txid = block.vtx[0].sha256 + + self.nodes[0].generate(99) # let the block mature + + # Create a transaction that spends the coinbase + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) + tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE]))) + tx.calc_sha256() + + # Check that serializing it with or without witness is the same + # This is a sanity check of our testing framework. + assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) + + self.test_node.send_message(msg_witness_tx(tx)) + self.test_node.sync_with_ping() # make sure the tx was processed + assert(tx.hash in self.nodes[0].getrawmempool()) + # Save this transaction for later + self.utxo.append(UTXO(tx.sha256, 0, 49*100000000)) + self.nodes[0].generate(1) + + + # Verify that blocks with witnesses are rejected before activation. + def test_unnecessary_witness_before_segwit_activation(self): + self.log.info("Testing behavior of unnecessary witnesses") + # For now, rely on earlier tests to have created at least one utxo for + # us to use + assert(len(self.utxo) > 0) + assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active') + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) + tx.wit.vtxinwit.append(CTxInWitness()) + tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] + + # Verify the hash with witness differs from the txid + # (otherwise our testing framework must be broken!) + tx.rehash() + assert(tx.sha256 != tx.calc_sha256(with_witness=True)) + + # Construct a segwit-signaling block that includes the transaction. + block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT))) + self.update_witness_block_with_transactions(block, [tx]) + # Sending witness data before activation is not allowed (anti-spam + # rule). + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + # TODO: fix synchronization so we can test reject reason + # Right now, bitcoind delays sending reject messages for blocks + # until the future, making synchronization here difficult. + #assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness") + + # But it should not be permanently marked bad... + # Resend without witness information. + self.test_node.send_message(msg_block(block)) + self.test_node.sync_with_ping() + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + sync_blocks(self.nodes) + + # Create a p2sh output -- this is so we can pass the standardness + # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped + # in P2SH). + p2sh_program = CScript([OP_TRUE]) + p2sh_pubkey = hash160(p2sh_program) + scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) + + # Now check that unnecessary witnesses can't be used to blind a node + # to a transaction, eg by violating standardness checks. + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey)) + tx2.rehash() + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + # We'll add an unnecessary witness to this transaction that would cause + # it to be non-standard, to test that violating policy with a witness before + # segwit activation doesn't blind a node to a transaction. Transactions + # rejected for having a witness before segwit activation shouldn't be added + # to the rejection cache. + tx3 = CTransaction() + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) + tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey)) + tx3.wit.vtxinwit.append(CTxInWitness()) + tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000] + tx3.rehash() + # Note that this should be rejected for the premature witness reason, + # rather than a policy check, since segwit hasn't activated yet. + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet') + + # If we send without witness, it should be accepted. + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True) + + # Now create a new anyone-can-spend utxo for the next test. + tx4 = CTransaction() + tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program]))) + tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE]))) + tx4.rehash() + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True) + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True) + + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + # Update our utxo list; we spent the first entry. + self.utxo.pop(0) + self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue)) + + + # Mine enough blocks for segwit's vb state to be 'started'. + def advance_to_segwit_started(self): + height = self.nodes[0].getblockcount() + # Will need to rewrite the tests here if we are past the first period + assert(height < VB_PERIOD - 1) + # Genesis block is 'defined'. + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined') + # Advance to end of period, status should now be 'started' + self.nodes[0].generate(VB_PERIOD-height-1) + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') + + # Mine enough blocks to lock in segwit, but don't activate. + # TODO: we could verify that lockin only happens at the right threshold of + # signalling blocks, rather than just at the right period boundary. + def advance_to_segwit_lockin(self): + height = self.nodes[0].getblockcount() + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') + # Advance to end of period, and verify lock-in happens at the end + self.nodes[0].generate(VB_PERIOD-1) + height = self.nodes[0].getblockcount() + assert((height % VB_PERIOD) == VB_PERIOD - 2) + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') + self.nodes[0].generate(1) + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') + + + # Mine enough blocks to activate segwit. + # TODO: we could verify that activation only happens at the right threshold + # of signalling blocks, rather than just at the right period boundary. + def advance_to_segwit_active(self): + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') + height = self.nodes[0].getblockcount() + self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2) + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') + self.nodes[0].generate(1) + assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active') + + + # This test can only be run after segwit has activated + def test_witness_commitments(self): + self.log.info("Testing witness commitments") + + # First try a correct witness commitment. + block = self.build_next_block() + add_witness_commitment(block) + block.solve() + + # Test the test -- witness serialization should be different + assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) + + # This empty block should be valid. + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Try to tweak the nonce + block_2 = self.build_next_block() + add_witness_commitment(block_2, nonce=28) + block_2.solve() + + # The commitment should have changed! + assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]) + + # This should also be valid. + test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True) + + # Now test commitments with actual transactions + assert (len(self.utxo) > 0) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + + # Let's construct a witness program + witness_program = CScript([OP_TRUE]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) + tx.rehash() + + # tx2 will spend tx1, and send back to a regular anyone-can-spend address + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.rehash() + + block_3 = self.build_next_block() + self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) + # Add an extra OP_RETURN output that matches the witness commitment template, + # even though it has extra data after the incorrect commitment. + # This block should fail. + block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) + block_3.vtx[0].rehash() + block_3.hashMerkleRoot = block_3.calc_merkle_root() + block_3.rehash() + block_3.solve() + + test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) + + # Add a different commitment with different nonce, but in the + # right location, and with some funds burned(!). + # This should succeed (nValue shouldn't affect finding the + # witness commitment). + add_witness_commitment(block_3, nonce=0) + block_3.vtx[0].vout[0].nValue -= 1 + block_3.vtx[0].vout[-1].nValue += 1 + block_3.vtx[0].rehash() + block_3.hashMerkleRoot = block_3.calc_merkle_root() + block_3.rehash() + assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns + block_3.solve() + test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True) + + # Finally test that a block with no witness transactions can + # omit the commitment. + block_4 = self.build_next_block() + tx3 = CTransaction() + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) + tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) + tx3.rehash() + block_4.vtx.append(tx3) + block_4.hashMerkleRoot = block_4.calc_merkle_root() + block_4.solve() + test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True) + + # Update available utxo's for use in later test. + self.utxo.pop(0) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + + + def test_block_malleability(self): + self.log.info("Testing witness block malleability") + + # Make sure that a block that has too big a virtual size + # because of a too-large coinbase witness is not permanently + # marked bad. + block = self.build_next_block() + add_witness_commitment(block) + block.solve() + + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000) + assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE) + + # We can't send over the p2p network, because this is too big to relay + # TODO: repeat this test with a block that can be relayed + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + + assert(self.nodes[0].getbestblockhash() != block.hash) + + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() + assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE) + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + + assert(self.nodes[0].getbestblockhash() == block.hash) + + # Now make sure that malleating the witness nonce doesn't + # result in a block permanently marked bad. + block = self.build_next_block() + add_witness_commitment(block) + block.solve() + + # Change the nonce -- should not cause the block to be permanently + # failed + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ] + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Changing the witness nonce doesn't change the block hash + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ] + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + + def test_witness_block_size(self): + self.log.info("Testing witness block size limit") + # TODO: Test that non-witness carrying blocks can't exceed 1MB + # Skipping this test for now; this is covered in p2p-fullblocktest.py + + # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. + block = self.build_next_block() + + assert(len(self.utxo) > 0) + + # Create a P2WSH transaction. + # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. + # This should give us plenty of room to tweak the spending tx's + # virtual size. + NUM_DROPS = 200 # 201 max ops per script! + NUM_OUTPUTS = 50 + + witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE]) + witness_hash = uint256_from_str(sha256(witness_program)) + scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)]) + + prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) + value = self.utxo[0].nValue + + parent_tx = CTransaction() + parent_tx.vin.append(CTxIn(prevout, b"")) + child_value = int(value/NUM_OUTPUTS) + for i in range(NUM_OUTPUTS): + parent_tx.vout.append(CTxOut(child_value, scriptPubKey)) + parent_tx.vout[0].nValue -= 50000 + assert(parent_tx.vout[0].nValue > 0) + parent_tx.rehash() + + child_tx = CTransaction() + for i in range(NUM_OUTPUTS): + child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) + child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] + for i in range(NUM_OUTPUTS): + child_tx.wit.vtxinwit.append(CTxInWitness()) + child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program] + child_tx.rehash() + self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) + + vsize = get_virtual_size(block) + additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4 + i = 0 + while additional_bytes > 0: + # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 + extra_bytes = min(additional_bytes+1, 55) + block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes) + additional_bytes -= extra_bytes + i += 1 + + block.vtx[0].vout.pop() # Remove old commitment + add_witness_commitment(block) + block.solve() + vsize = get_virtual_size(block) + assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) + # Make sure that our test case would exceed the old max-network-message + # limit + assert(len(block.serialize(True)) > 2*1024*1024) + + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Now resize the second transaction to make the block fit. + cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) + block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1) + block.vtx[0].vout.pop() + add_witness_commitment(block) + block.solve() + assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE) + + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Update available utxo's + self.utxo.pop(0) + self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) + + + # submitblock will try to add the nonce automatically, so that mining + # software doesn't need to worry about doing so itself. + def test_submit_block(self): + block = self.build_next_block() + + # Try using a custom nonce and then don't supply it. + # This shouldn't possibly work. + add_witness_commitment(block, nonce=1) + block.vtx[0].wit = CTxWitness() # drop the nonce + block.solve() + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + assert(self.nodes[0].getbestblockhash() != block.hash) + + # Now redo commitment with the standard nonce, but let bitcoind fill it in. + add_witness_commitment(block, nonce=0) + block.vtx[0].wit = CTxWitness() + block.solve() + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + # This time, add a tx with non-empty witness, but don't supply + # the commitment. + block_2 = self.build_next_block() + + add_witness_commitment(block_2) + + block_2.solve() + + # Drop commitment and nonce -- submitblock should not fill in. + block_2.vtx[0].vout.pop() + block_2.vtx[0].wit = CTxWitness() + + self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True))) + # Tip should not advance! + assert(self.nodes[0].getbestblockhash() != block_2.hash) + + + # Consensus tests of extra witness data in a transaction. + def test_extra_witness_data(self): + self.log.info("Testing extra witness data in tx") + + assert(len(self.utxo) > 0) + + block = self.build_next_block() + + witness_program = CScript([OP_DROP, OP_TRUE]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + # First try extra witness data on a tx that doesn't require a witness + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey)) + tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output + tx.wit.vtxinwit.append(CTxInWitness()) + tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] + tx.rehash() + self.update_witness_block_with_transactions(block, [tx]) + + # Extra witness data should not be allowed. + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Try extra signature data. Ok if we're not spending a witness output. + block.vtx[1].wit.vtxinwit = [] + block.vtx[1].vin[0].scriptSig = CScript([OP_0]) + block.vtx[1].rehash() + add_witness_commitment(block) + block.solve() + + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Now try extra witness/signature data on an input that DOES require a + # witness + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness + tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) + tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) + tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ] + tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ] + + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx2]) + + # This has extra witness data, so it should fail. + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Now get rid of the extra witness, but add extra scriptSig data + tx2.vin[0].scriptSig = CScript([OP_TRUE]) + tx2.vin[1].scriptSig = CScript([OP_TRUE]) + tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) + tx2.wit.vtxinwit[1].scriptWitness.stack = [] + tx2.rehash() + add_witness_commitment(block) + block.solve() + + # This has extra signature data for a witness input, so it should fail. + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Now get rid of the extra scriptsig on the witness input, and verify + # success (even with extra scriptsig data in the non-witness input) + tx2.vin[0].scriptSig = b"" + tx2.rehash() + add_witness_commitment(block) + block.solve() + + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Update utxo for later tests + self.utxo.pop(0) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + + def test_max_witness_push_length(self): + ''' Should only allow up to 520 byte pushes in witness stack ''' + self.log.info("Testing maximum witness push size") + MAX_SCRIPT_ELEMENT_SIZE = 520 + assert(len(self.utxo)) + + block = self.build_next_block() + + witness_program = CScript([OP_DROP, OP_TRUE]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) + tx.rehash() + + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) + tx2.wit.vtxinwit.append(CTxInWitness()) + # First try a 521-byte stack element + tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ] + tx2.rehash() + + self.update_witness_block_with_transactions(block, [tx, tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Now reduce the length of the stack element + tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE) + + add_witness_commitment(block) + block.solve() + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Update the utxo for later tests + self.utxo.pop() + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + def test_max_witness_program_length(self): + # Can create witness outputs that are long, but can't be greater than + # 10k bytes to successfully spend + self.log.info("Testing maximum witness program length") + assert(len(self.utxo)) + MAX_PROGRAM_LENGTH = 10000 + + # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. + long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE]) + assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1) + long_witness_hash = sha256(long_witness_program) + long_scriptPubKey = CScript([OP_0, long_witness_hash]) + + block = self.build_next_block() + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey)) + tx.rehash() + + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program] + tx2.rehash() + + self.update_witness_block_with_transactions(block, [tx, tx2]) + + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Try again with one less byte in the witness program + witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE]) + assert(len(witness_program) == MAX_PROGRAM_LENGTH) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey) + tx.rehash() + tx2.vin[0].prevout.hash = tx.sha256 + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program] + tx2.rehash() + block.vtx = [block.vtx[0]] + self.update_witness_block_with_transactions(block, [tx, tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + self.utxo.pop() + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + + def test_witness_input_length(self): + ''' Ensure that vin length must match vtxinwit length ''' + self.log.info("Testing witness input length") + assert(len(self.utxo)) + + witness_program = CScript([OP_DROP, OP_TRUE]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + # Create a transaction that splits our utxo into many outputs + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + nValue = self.utxo[0].nValue + for i in range(10): + tx.vout.append(CTxOut(int(nValue/10), scriptPubKey)) + tx.vout[0].nValue -= 1000 + assert(tx.vout[0].nValue >= 0) + + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Try various ways to spend tx that should all break. + # This "broken" transaction serializer will not normalize + # the length of vtxinwit. + class BrokenCTransaction(CTransaction): + def serialize_with_witness(self): + flags = 0 + if not self.wit.is_null(): + flags |= 1 + r = b"" + r += struct.pack(" version 1 transactions + # are non-standard + scriptPubKey = CScript([CScriptOp(OP_1), witness_hash]) + tx2 = CTransaction() + tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] + tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)] + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] + tx2.rehash() + # Gets accepted to test_node, because standardness of outputs isn't + # checked with fRequireStandard + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True) + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False) + temp_utxo.pop() # last entry in temp_utxo was the output we just spent + temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + # Spend everything in temp_utxo back to an OP_TRUE output. + tx3 = CTransaction() + total_value = 0 + for i in temp_utxo: + tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) + tx3.wit.vtxinwit.append(CTxInWitness()) + total_value += i.nValue + tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] + tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) + tx3.rehash() + # Spending a higher version witness output is not allowed by policy, + # even with fRequireStandard=false. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False) + self.test_node.sync_with_ping() + with mininode_lock: + assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason) + + # Building a block with the transaction must be valid, however. + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx2, tx3]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + sync_blocks(self.nodes) + + # Add utxo to our list + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + + + def test_premature_coinbase_witness_spend(self): + self.log.info("Testing premature coinbase witness spend") + block = self.build_next_block() + # Change the output of the block to be a witness output. + witness_program = CScript([OP_TRUE]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + block.vtx[0].vout[0].scriptPubKey = scriptPubKey + # This next line will rehash the coinbase and update the merkle + # root, and solve. + self.update_witness_block_with_transactions(block, []) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + spend_tx = CTransaction() + spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] + spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] + spend_tx.wit.vtxinwit.append(CTxInWitness()) + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] + spend_tx.rehash() + + # Now test a premature spend. + self.nodes[0].generate(98) + sync_blocks(self.nodes) + block2 = self.build_next_block() + self.update_witness_block_with_transactions(block2, [spend_tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False) + + # Advancing one more block should allow the spend. + self.nodes[0].generate(1) + block2 = self.build_next_block() + self.update_witness_block_with_transactions(block2, [spend_tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True) + sync_blocks(self.nodes) + + + def test_signature_version_1(self): + self.log.info("Testing segwit signature hash version 1") + key = CECKey() + key.set_secretbytes(b"9") + pubkey = CPubKey(key.get_pubkey()) + + witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + # First create a witness output for use in the tests. + assert(len(self.utxo)) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) + tx.rehash() + + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True) + # Mine this transaction in preparation for following tests. + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + sync_blocks(self.nodes) + self.utxo.pop(0) + + # Test each hashtype + prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) + for sigflag in [ 0, SIGHASH_ANYONECANPAY ]: + for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: + hashtype |= sigflag + block = self.build_next_block() + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) + tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey)) + tx.wit.vtxinwit.append(CTxInWitness()) + # Too-large input value + sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key) + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Too-small input value + sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key) + block.vtx.pop() # remove last tx + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Now try correct value + sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) + block.vtx.pop() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) + + # Test combinations of signature hashes. + # Split the utxo into a lot of outputs. + # Randomly choose up to 10 to spend, sign with different hashtypes, and + # output to a random number of outputs. Repeat NUM_TESTS times. + # Ensure that we've tested a situation where we use SIGHASH_SINGLE with + # an input index > number of outputs. + NUM_TESTS = 500 + temp_utxos = [] + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) + split_value = prev_utxo.nValue // NUM_TESTS + for i in range(NUM_TESTS): + tx.vout.append(CTxOut(split_value, scriptPubKey)) + tx.wit.vtxinwit.append(CTxInWitness()) + sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) + for i in range(NUM_TESTS): + temp_utxos.append(UTXO(tx.sha256, i, split_value)) + + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + block = self.build_next_block() + used_sighash_single_out_of_bounds = False + for i in range(NUM_TESTS): + # Ping regularly to keep the connection alive + if (not i % 100): + self.test_node.sync_with_ping() + # Choose random number of inputs to use. + num_inputs = random.randint(1, 10) + # Create a slight bias for producing more utxos + num_outputs = random.randint(1, 11) + random.shuffle(temp_utxos) + assert(len(temp_utxos) > num_inputs) + tx = CTransaction() + total_value = 0 + for i in range(num_inputs): + tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) + tx.wit.vtxinwit.append(CTxInWitness()) + total_value += temp_utxos[i].nValue + split_value = total_value // num_outputs + for i in range(num_outputs): + tx.vout.append(CTxOut(split_value, scriptPubKey)) + for i in range(num_inputs): + # Now try to sign each input, using a random hashtype. + anyonecanpay = 0 + if random.randint(0, 1): + anyonecanpay = SIGHASH_ANYONECANPAY + hashtype = random.randint(1, 3) | anyonecanpay + sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) + if (hashtype == SIGHASH_SINGLE and i >= num_outputs): + used_sighash_single_out_of_bounds = True + tx.rehash() + for i in range(num_outputs): + temp_utxos.append(UTXO(tx.sha256, i, split_value)) + temp_utxos = temp_utxos[num_inputs:] + + block.vtx.append(tx) + + # Test the block periodically, if we're close to maxblocksize + if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): + self.update_witness_block_with_transactions(block, []) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + block = self.build_next_block() + + if (not used_sighash_single_out_of_bounds): + self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") + # Test the transactions we've added to the block + if (len(block.vtx) > 1): + self.update_witness_block_with_transactions(block, []) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Now test witness version 0 P2PKH transactions + pubkeyhash = hash160(pubkey) + scriptPKH = CScript([OP_0, pubkeyhash]) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) + tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH)) + tx.wit.vtxinwit.append(CTxInWitness()) + sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) + + script = GetP2PKHScript(pubkeyhash) + sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) + signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL + + # Check that we can't have a scriptSig + tx2.vin[0].scriptSig = CScript([signature, pubkey]) + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx, tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False) + + # Move the signature to the witness. + block.vtx.pop() + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] + tx2.vin[0].scriptSig = b"" + tx2.rehash() + + self.update_witness_block_with_transactions(block, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + temp_utxos.pop(0) + + # Update self.utxos for later tests by creating two outputs + # that consolidate all the coins in temp_utxos. + output_value = sum(i.nValue for i in temp_utxos) // 2 + + tx = CTransaction() + index = 0 + # Just spend to our usual anyone-can-spend output + tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2 + for i in temp_utxos: + # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up + # the signatures as we go. + tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) + tx.wit.vtxinwit.append(CTxInWitness()) + sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_ALL|SIGHASH_ANYONECANPAY, i.nValue, key) + index += 1 + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + for i in range(len(tx.vout)): + self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) + + + # Test P2SH wrapped witness programs. + def test_p2sh_witness(self, segwit_activated): + self.log.info("Testing P2SH witness transactions") + + assert(len(self.utxo)) + + # Prepare the p2sh-wrapped witness output + witness_program = CScript([OP_DROP, OP_TRUE]) + witness_hash = sha256(witness_program) + p2wsh_pubkey = CScript([OP_0, witness_hash]) + p2sh_witness_hash = hash160(p2wsh_pubkey) + scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) + scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script + + # Fund the P2SH output + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) + tx.rehash() + + # Verify mempool acceptance and block validity + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=segwit_activated) + sync_blocks(self.nodes) + + # Now test attempts to spend the output. + spend_tx = CTransaction() + spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig)) + spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) + spend_tx.rehash() + + # This transaction should not be accepted into the mempool pre- or + # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which + # will require a witness to spend a witness program regardless of + # segwit activation. Note that older bitcoind's that are not + # segwit-aware would also reject this for failing CLEANSTACK. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) + + # Try to put the witness script in the scriptSig, should also fail. + spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) + spend_tx.rehash() + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False) + + # Now put the witness script in the witness, should succeed after + # segwit activates. + spend_tx.vin[0].scriptSig = scriptSig + spend_tx.rehash() + spend_tx.wit.vtxinwit.append(CTxInWitness()) + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ] + + # Verify mempool acceptance + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=segwit_activated) + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [spend_tx]) + + # If we're before activation, then sending this without witnesses + # should be valid. If we're after activation, then sending this with + # witnesses should be valid. + if segwit_activated: + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + else: + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=False) + + # Update self.utxo + self.utxo.pop(0) + self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) + + # Test the behavior of starting up a segwit-aware node after the softfork + # has activated. As segwit requires different block data than pre-segwit + # nodes would have stored, this requires special handling. + # To enable this test, pass --oldbinary= to + # the test. + def test_upgrade_after_activation(self, node_id): + self.log.info("Testing software upgrade after softfork activation") + + assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind + + # Make sure the nodes are all up + sync_blocks(self.nodes) + + # Restart with the new binary + self.stop_node(node_id) + self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"]) + connect_nodes(self.nodes[0], node_id) + + sync_blocks(self.nodes) + + # Make sure that this peer thinks segwit has activated. + assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active") + + # Make sure this peers blocks match those of node0. + height = self.nodes[node_id].getblockcount() + while height >= 0: + block_hash = self.nodes[node_id].getblockhash(height) + assert_equal(block_hash, self.nodes[0].getblockhash(height)) + assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash)) + height -= 1 + + + def test_witness_sigops(self): + '''Ensure sigop counting is correct inside witnesses.''' + self.log.info("Testing sigops limit") + + assert(len(self.utxo)) + + # Keep this under MAX_OPS_PER_SCRIPT (201) + witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF]) + witness_hash = sha256(witness_program) + scriptPubKey = CScript([OP_0, witness_hash]) + + sigops_per_script = 20*5 + 193*1 + # We'll produce 2 extra outputs, one with a program that would take us + # over max sig ops, and one with a program that would exactly reach max + # sig ops + outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 + extra_sigops_available = MAX_SIGOP_COST % sigops_per_script + + # We chose the number of checkmultisigs/checksigs to make this work: + assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT + + # This script, when spent with the first + # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, + # would push us just over the block sigop limit. + witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF]) + witness_hash_toomany = sha256(witness_program_toomany) + scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany]) + + # If we spend this script instead, we would exactly reach our sigop + # limit (for witness sigops). + witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF]) + witness_hash_justright = sha256(witness_program_justright) + scriptPubKey_justright = CScript([OP_0, witness_hash_justright]) + + # First split our available utxo into a bunch of outputs + split_value = self.utxo[0].nValue // outputs + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + for i in range(outputs): + tx.vout.append(CTxOut(split_value, scriptPubKey)) + tx.vout[-2].scriptPubKey = scriptPubKey_toomany + tx.vout[-1].scriptPubKey = scriptPubKey_justright + tx.rehash() + + block_1 = self.build_next_block() + self.update_witness_block_with_transactions(block_1, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True) + + tx2 = CTransaction() + # If we try to spend the first n-1 outputs from tx, that should be + # too many sigops. + total_value = 0 + for i in range(outputs-1): + tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ] + total_value += tx.vout[i].nValue + tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ] + tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) + tx2.rehash() + + block_2 = self.build_next_block() + self.update_witness_block_with_transactions(block_2, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False) + + # Try dropping the last input in tx2, and add an output that has + # too many sigops (contributing to legacy sigop count). + checksig_count = (extra_sigops_available // 4) + 1 + scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count) + tx2.vout.append(CTxOut(0, scriptPubKey_checksigs)) + tx2.vin.pop() + tx2.wit.vtxinwit.pop() + tx2.vout[0].nValue -= tx.vout[-2].nValue + tx2.rehash() + block_3 = self.build_next_block() + self.update_witness_block_with_transactions(block_3, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False) + + # If we drop the last checksig in this output, the tx should succeed. + block_4 = self.build_next_block() + tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1)) + tx2.rehash() + self.update_witness_block_with_transactions(block_4, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True) + + # Reset the tip back down for the next test + sync_blocks(self.nodes) + for x in self.nodes: + x.invalidateblock(block_4.hash) + + # Try replacing the last input of tx2 to be spending the last + # output of tx + block_5 = self.build_next_block() + tx2.vout.pop() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b"")) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ] + tx2.rehash() + self.update_witness_block_with_transactions(block_5, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True) + + # TODO: test p2sh sigop counting + + def test_getblocktemplate_before_lockin(self): + self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)") + # Node0 is segwit aware, node2 is not. + for node in [self.nodes[0], self.nodes[2]]: + gbt_results = node.getblocktemplate() + block_version = gbt_results['version'] + # If we're not indicating segwit support, we will still be + # signalling for segwit activation. + assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0]) + # If we don't specify the segwit rule, then we won't get a default + # commitment. + assert('default_witness_commitment' not in gbt_results) + + # Workaround: + # Can either change the tip, or change the mempool and wait 5 seconds + # to trigger a recomputation of getblocktemplate. + txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) + # Using mocktime lets us avoid sleep() + sync_mempools(self.nodes) + self.nodes[0].setmocktime(int(time.time())+10) + self.nodes[2].setmocktime(int(time.time())+10) + + for node in [self.nodes[0], self.nodes[2]]: + gbt_results = node.getblocktemplate({"rules" : ["segwit"]}) + block_version = gbt_results['version'] + if node == self.nodes[2]: + # If this is a non-segwit node, we should still not get a witness + # commitment, nor a version bit signalling segwit. + assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) + assert('default_witness_commitment' not in gbt_results) + else: + # For segwit-aware nodes, check the version bit and the witness + # commitment are correct. + assert(block_version & (1 << VB_WITNESS_BIT) != 0) + assert('default_witness_commitment' in gbt_results) + witness_commitment = gbt_results['default_witness_commitment'] + + # Check that default_witness_commitment is present. + witness_root = CBlock.get_merkle_root([ser_uint256(0), + ser_uint256(txid)]) + script = get_witness_script(witness_root, 0) + assert_equal(witness_commitment, bytes_to_hex_str(script)) + + # undo mocktime + self.nodes[0].setmocktime(0) + self.nodes[2].setmocktime(0) + + # Uncompressed pubkeys are no longer supported in default relay policy, + # but (for now) are still valid in blocks. + def test_uncompressed_pubkey(self): + self.log.info("Testing uncompressed pubkeys") + # Segwit transactions using uncompressed pubkeys are not accepted + # under default policy, but should still pass consensus. + key = CECKey() + key.set_secretbytes(b"9") + key.set_compressed(False) + pubkey = CPubKey(key.get_pubkey()) + assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey + + assert(len(self.utxo) > 0) + utxo = self.utxo.pop(0) + + # Test 1: P2WPKH + # First create a P2WPKH output that uses an uncompressed pubkey + pubkeyhash = hash160(pubkey) + scriptPKH = CScript([OP_0, pubkeyhash]) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) + tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH)) + tx.rehash() + + # Confirm it in a block. + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Now try to spend it. Send it to a P2WSH output, which we'll + # use in the next test. + witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) + witness_hash = sha256(witness_program) + scriptWSH = CScript([OP_0, witness_hash]) + + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH)) + script = GetP2PKHScript(pubkeyhash) + sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) + signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ] + tx2.rehash() + + # Should fail policy test. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + # But passes consensus. + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx2]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Test 2: P2WSH + # Try to spend the P2WSH output created in last test. + # Send it to a P2SH(P2WSH) output, which we'll use in the next test. + p2sh_witness_hash = hash160(scriptWSH) + scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) + scriptSig = CScript([scriptWSH]) + + tx3 = CTransaction() + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) + tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH)) + tx3.wit.vtxinwit.append(CTxInWitness()) + sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) + + # Should fail policy test. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + # But passes consensus. + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx3]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Test 3: P2SH(P2WSH) + # Try to spend the P2SH output created in the last test. + # Send it to a P2PKH output, which we'll use in the next test. + scriptPubKey = GetP2PKHScript(pubkeyhash) + tx4 = CTransaction() + tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig)) + tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey)) + tx4.wit.vtxinwit.append(CTxInWitness()) + sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) + + # Should fail policy test. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx4]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + + # Test 4: Uncompressed pubkeys should still be valid in non-segwit + # transactions. + tx5 = CTransaction() + tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) + tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE]))) + (sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL) + signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL + tx5.vin[0].scriptSig = CScript([signature, pubkey]) + tx5.rehash() + # Should pass policy and consensus. + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True) + block = self.build_next_block() + self.update_witness_block_with_transactions(block, [tx5]) + test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True) + self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) + + def test_non_standard_witness(self): + self.log.info("Testing detection of non-standard P2WSH witness") + pad = chr(1).encode('latin-1') + + # Create scripts for tests + scripts = [] + scripts.append(CScript([OP_DROP] * 100)) + scripts.append(CScript([OP_DROP] * 99)) + scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) + scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) + + p2wsh_scripts = [] + + assert(len(self.utxo)) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + + # For each script, generate a pair of P2WSH and P2SH-P2WSH output. + outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) + for i in scripts: + p2wsh = CScript([OP_0, sha256(i)]) + p2sh = hash160(p2wsh) + p2wsh_scripts.append(p2wsh) + tx.vout.append(CTxOut(outputvalue, p2wsh)) + tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) + tx.rehash() + txid = tx.sha256 + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True) + + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + # Creating transactions for tests + p2wsh_txs = [] + p2sh_txs = [] + for i in range(len(scripts)): + p2wsh_tx = CTransaction() + p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2))) + p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) + p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) + p2wsh_tx.rehash() + p2wsh_txs.append(p2wsh_tx) + p2sh_tx = CTransaction() + p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]]))) + p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) + p2sh_tx.wit.vtxinwit.append(CTxInWitness()) + p2sh_tx.rehash() + p2sh_txs.append(p2sh_tx) + + # Testing native P2WSH + # Witness stack size, excluding witnessScript, over 100 is non-standard + p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard') + # Non-standard nodes should accept + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True) + + # Stack element size over 80 bytes is non-standard + p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard') + # Non-standard nodes should accept + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True) + # Standard nodes should accept if element size is not over 80 bytes + p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True) + + # witnessScript size at 3600 bytes is standard + p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True) + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True) + + # witnessScript size at 3601 bytes is non-standard + p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard') + # Non-standard nodes should accept + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True) + + # Repeating the same tests with P2SH-P2WSH + p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard') + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True) + p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard') + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True) + p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True) + p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True) + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True) + p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] + test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard') + test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True) + + self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node + # Valid but non-standard transactions in a block should be accepted by standard node + sync_blocks(self.nodes) + assert_equal(len(self.nodes[0].getrawmempool()), 0) + assert_equal(len(self.nodes[1].getrawmempool()), 0) + + self.utxo.pop(0) + + + def run_test(self): + # Setup the p2p connections and start up the network thread. + # self.test_node sets NODE_WITNESS|NODE_NETWORK + self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) + # self.old_node sets only NODE_NETWORK + self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK) + # self.std_node is for testing node1 (fRequireStandard=true) + self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS) + + network_thread_start() + + # Keep a place to store utxo's that can be used in later tests + self.utxo = [] + + # Test logic begins here + self.test_node.wait_for_verack() + + self.log.info("Starting tests before segwit lock in:") + + self.test_witness_services() # Verifies NODE_WITNESS + self.test_non_witness_transaction() # non-witness tx's are accepted + self.test_unnecessary_witness_before_segwit_activation() + self.test_block_relay(segwit_activated=False) + + # Advance to segwit being 'started' + self.advance_to_segwit_started() + sync_blocks(self.nodes) + self.test_getblocktemplate_before_lockin() + + sync_blocks(self.nodes) + + # At lockin, nothing should change. + self.log.info("Testing behavior post lockin, pre-activation") + self.advance_to_segwit_lockin() + + # Retest unnecessary witnesses + self.test_unnecessary_witness_before_segwit_activation() + self.test_witness_tx_relay_before_segwit_activation() + self.test_block_relay(segwit_activated=False) + self.test_p2sh_witness(segwit_activated=False) + self.test_standardness_v0(segwit_activated=False) + + sync_blocks(self.nodes) + + # Now activate segwit + self.log.info("Testing behavior after segwit activation") + self.advance_to_segwit_active() + + sync_blocks(self.nodes) + + # Test P2SH witness handling again + self.test_p2sh_witness(segwit_activated=True) + self.test_witness_commitments() + self.test_block_malleability() + self.test_witness_block_size() + self.test_submit_block() + self.test_extra_witness_data() + self.test_max_witness_push_length() + self.test_max_witness_program_length() + self.test_witness_input_length() + self.test_block_relay(segwit_activated=True) + self.test_tx_relay_after_segwit_activation() + self.test_standardness_v0(segwit_activated=True) + self.test_segwit_versions() + self.test_premature_coinbase_witness_spend() + self.test_uncompressed_pubkey() + self.test_signature_version_1() + self.test_non_standard_witness() + sync_blocks(self.nodes) + self.test_upgrade_after_activation(node_id=2) + self.test_witness_sigops() + + +if __name__ == '__main__': + SegWitTest().main() diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py new file mode 100755 index 0000000000..8869aeaaea --- /dev/null +++ b/test/functional/p2p_sendheaders.py @@ -0,0 +1,598 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test behavior of headers messages to announce blocks. + +Setup: + +- Two nodes: + - node0 is the node-under-test. We create two p2p connections to it. The + first p2p connection is a control and should only ever receive inv's. The + second p2p connection tests the headers sending logic. + - node1 is used to create reorgs. + +test_null_locators +================== + +Sends two getheaders requests with null locator values. First request's hashstop +value refers to validated block, while second request's hashstop value refers to +a block which hasn't been validated. Verifies only the first request returns +headers. + +test_nonnull_locators +===================== + +Part 1: No headers announcements before "sendheaders" +a. node mines a block [expect: inv] + send getdata for the block [expect: block] +b. node mines another block [expect: inv] + send getheaders and getdata [expect: headers, then block] +c. node mines another block [expect: inv] + peer mines a block, announces with header [expect: getdata] +d. node mines another block [expect: inv] + +Part 2: After "sendheaders", headers announcements should generally work. +a. peer sends sendheaders [expect: no response] + peer sends getheaders with current tip [expect: no response] +b. node mines a block [expect: tip header] +c. for N in 1, ..., 10: + * for announce-type in {inv, header} + - peer mines N blocks, announces with announce-type + [ expect: getheaders/getdata or getdata, deliver block(s) ] + - node mines a block [ expect: 1 header ] + +Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. +- For response-type in {inv, getheaders} + * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] + * node mines an 8-block reorg [ expect: inv at tip ] + * peer responds with getblocks/getdata [expect: inv, blocks ] + * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] + * node mines another block at tip [ expect: inv ] + * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] + * peer requests block [ expect: block ] + * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] + * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] + * node mines 1 block [expect: 1 header, peer responds with getdata] + +Part 4: Test direct fetch behavior +a. Announce 2 old block headers. + Expect: no getdata requests. +b. Announce 3 new blocks via 1 headers message. + Expect: one getdata request for all 3 blocks. + (Send blocks.) +c. Announce 1 header that forks off the last two blocks. + Expect: no response. +d. Announce 1 more header that builds on that fork. + Expect: one getdata request for two blocks. +e. Announce 16 more headers that build on that fork. + Expect: getdata request for 14 more blocks. +f. Announce 1 more header that builds on that fork. + Expect: no response. + +Part 5: Test handling of headers that don't connect. +a. Repeat 10 times: + 1. Announce a header that doesn't connect. + Expect: getheaders message + 2. Send headers chain. + Expect: getdata for the missing blocks, tip update. +b. Then send 9 more headers that don't connect. + Expect: getheaders message each time. +c. Announce a header that does connect. + Expect: no response. +d. Announce 49 headers that don't connect. + Expect: getheaders message each time. +e. Announce one more that doesn't connect. + Expect: disconnect. +""" +from test_framework.blocktools import create_block, create_coinbase +from test_framework.mininode import ( + CBlockHeader, + CInv, + NODE_WITNESS, + network_thread_start, + P2PInterface, + mininode_lock, + msg_block, + msg_getblocks, + msg_getdata, + msg_getheaders, + msg_headers, + msg_inv, + msg_sendheaders, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + sync_blocks, + wait_until, +) + +DIRECT_FETCH_RESPONSE_TIME = 0.05 + +class BaseNode(P2PInterface): + def __init__(self): + super().__init__() + + self.block_announced = False + self.last_blockhash_announced = None + + def send_get_data(self, block_hashes): + """Request data for a list of block hashes.""" + msg = msg_getdata() + for x in block_hashes: + msg.inv.append(CInv(2, x)) + self.send_message(msg) + + def send_get_headers(self, locator, hashstop): + msg = msg_getheaders() + msg.locator.vHave = locator + msg.hashstop = hashstop + self.send_message(msg) + + def send_block_inv(self, blockhash): + msg = msg_inv() + msg.inv = [CInv(2, blockhash)] + self.send_message(msg) + + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [CBlockHeader(b) for b in new_blocks] + self.send_message(headers_message) + + def send_getblocks(self, locator): + getblocks_message = msg_getblocks() + getblocks_message.locator.vHave = locator + self.send_message(getblocks_message) + + def wait_for_getdata(self, hash_list, timeout=60): + if hash_list == []: + return + + test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list + wait_until(test_function, timeout=timeout, lock=mininode_lock) + + def wait_for_block_announcement(self, block_hash, timeout=60): + test_function = lambda: self.last_blockhash_announced == block_hash + wait_until(test_function, timeout=timeout, lock=mininode_lock) + + def on_inv(self, message): + self.block_announced = True + self.last_blockhash_announced = message.inv[-1].hash + + def on_headers(self, message): + if len(message.headers): + self.block_announced = True + message.headers[-1].calc_sha256() + self.last_blockhash_announced = message.headers[-1].sha256 + + def clear_last_announcement(self): + with mininode_lock: + self.block_announced = False + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) + + def check_last_announcement(self, headers=None, inv=None): + """Test whether the last announcement received had the right header or the right inv. + + inv and headers should be lists of block hashes.""" + + test_function = lambda: self.block_announced + wait_until(test_function, timeout=60, lock=mininode_lock) + + with mininode_lock: + self.block_announced = False + + compare_inv = [] + if "inv" in self.last_message: + compare_inv = [x.hash for x in self.last_message["inv"].inv] + if inv is not None: + assert_equal(compare_inv, inv) + + compare_headers = [] + if "headers" in self.last_message: + compare_headers = [x.sha256 for x in self.last_message["headers"].headers] + if headers is not None: + assert_equal(compare_headers, headers) + + self.last_message.pop("inv", None) + self.last_message.pop("headers", None) + +class SendHeadersTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + + def mine_blocks(self, count): + """Mine count blocks and return the new tip.""" + + # Clear out last block announcement from each p2p listener + [x.clear_last_announcement() for x in self.nodes[0].p2ps] + self.nodes[0].generate(count) + return int(self.nodes[0].getbestblockhash(), 16) + + def mine_reorg(self, length): + """Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). + + Note: we clear the state of our p2p connections after the + to-be-reorged-out blocks are mined, so that we don't break later tests. + return the list of block hashes newly mined.""" + + self.nodes[0].generate(length) # make sure all invalidated blocks are node0's + sync_blocks(self.nodes, wait=0.1) + for x in self.nodes[0].p2ps: + x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) + x.clear_last_announcement() + + tip_height = self.nodes[1].getblockcount() + hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1)) + self.nodes[1].invalidateblock(hash_to_invalidate) + all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain + sync_blocks(self.nodes, wait=0.1) + return [int(x, 16) for x in all_hashes] + + def run_test(self): + # Setup the p2p connections and start up the network thread. + inv_node = self.nodes[0].add_p2p_connection(BaseNode()) + # Make sure NODE_NETWORK is not set for test_node, so no block download + # will occur outside of direct fetching + test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS) + + network_thread_start() + + # Test logic begins here + inv_node.wait_for_verack() + test_node.wait_for_verack() + + # Ensure verack's have been processed by our peer + inv_node.sync_with_ping() + test_node.sync_with_ping() + + self.test_null_locators(test_node, inv_node) + self.test_nonnull_locators(test_node, inv_node) + + def test_null_locators(self, test_node, inv_node): + tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0]) + tip_hash = int(tip["hash"], 16) + + inv_node.check_last_announcement(inv=[tip_hash], headers=[]) + test_node.check_last_announcement(inv=[tip_hash], headers=[]) + + self.log.info("Verify getheaders with null locator and valid hashstop returns headers.") + test_node.clear_last_announcement() + test_node.send_get_headers(locator=[], hashstop=tip_hash) + test_node.check_last_announcement(headers=[tip_hash]) + + self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.") + block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1) + block.solve() + test_node.send_header_for_blocks([block]) + test_node.clear_last_announcement() + test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16)) + test_node.sync_with_ping() + assert_equal(test_node.block_announced, False) + inv_node.clear_last_announcement() + test_node.send_message(msg_block(block)) + inv_node.check_last_announcement(inv=[int(block.hash, 16)], headers=[]) + + def test_nonnull_locators(self, test_node, inv_node): + tip = int(self.nodes[0].getbestblockhash(), 16) + + # PART 1 + # 1. Mine a block; expect inv announcements each time + self.log.info("Part 1: headers don't start before sendheaders message...") + for i in range(4): + old_tip = tip + tip = self.mine_blocks(1) + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(inv=[tip], headers=[]) + # Try a few different responses; none should affect next announcement + if i == 0: + # first request the block + test_node.send_get_data([tip]) + test_node.wait_for_block(tip) + elif i == 1: + # next try requesting header and block + test_node.send_get_headers(locator=[old_tip], hashstop=tip) + test_node.send_get_data([tip]) + test_node.wait_for_block(tip) + test_node.clear_last_announcement() # since we requested headers... + elif i == 2: + # this time announce own block via headers + height = self.nodes[0].getblockcount() + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + block_time = last_time + 1 + new_block = create_block(tip, create_coinbase(height + 1), block_time) + new_block.solve() + test_node.send_header_for_blocks([new_block]) + test_node.wait_for_getdata([new_block.sha256]) + test_node.send_message(msg_block(new_block)) + test_node.sync_with_ping() # make sure this block is processed + inv_node.clear_last_announcement() + test_node.clear_last_announcement() + + self.log.info("Part 1: success!") + self.log.info("Part 2: announce blocks with headers after sendheaders message...") + # PART 2 + # 2. Send a sendheaders message and test that headers announcements + # commence and keep working. + test_node.send_message(msg_sendheaders()) + prev_tip = int(self.nodes[0].getbestblockhash(), 16) + test_node.send_get_headers(locator=[prev_tip], hashstop=0) + test_node.sync_with_ping() + + # Now that we've synced headers, headers announcements should work + tip = self.mine_blocks(1) + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(headers=[tip]) + + height = self.nodes[0].getblockcount() + 1 + block_time += 10 # Advance far enough ahead + for i in range(10): + # Mine i blocks, and alternate announcing either via + # inv (of tip) or via headers. After each, new blocks + # mined by the node should successfully be announced + # with block header, even though the blocks are never requested + for j in range(2): + blocks = [] + for b in range(i + 1): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + if j == 0: + # Announce via inv + test_node.send_block_inv(tip) + test_node.wait_for_getheaders() + # Should have received a getheaders now + test_node.send_header_for_blocks(blocks) + # Test that duplicate inv's won't result in duplicate + # getdata requests, or duplicate headers announcements + [inv_node.send_block_inv(x.sha256) for x in blocks] + test_node.wait_for_getdata([x.sha256 for x in blocks]) + inv_node.sync_with_ping() + else: + # Announce via headers + test_node.send_header_for_blocks(blocks) + test_node.wait_for_getdata([x.sha256 for x in blocks]) + # Test that duplicate headers won't result in duplicate + # getdata requests (the check is further down) + inv_node.send_header_for_blocks(blocks) + inv_node.sync_with_ping() + [test_node.send_message(msg_block(x)) for x in blocks] + test_node.sync_with_ping() + inv_node.sync_with_ping() + # This block should not be announced to the inv node (since it also + # broadcast it) + assert "inv" not in inv_node.last_message + assert "headers" not in inv_node.last_message + tip = self.mine_blocks(1) + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(headers=[tip]) + height += 1 + block_time += 1 + + self.log.info("Part 2: success!") + + self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") + + # PART 3. Headers announcements can stop after large reorg, and resume after + # getheaders or inv from peer. + for j in range(2): + # First try mining a reorg that can propagate with header announcement + new_block_hashes = self.mine_reorg(length=7) + tip = new_block_hashes[-1] + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(headers=new_block_hashes) + + block_time += 8 + + # Mine a too-large reorg, which should be announced with a single inv + new_block_hashes = self.mine_reorg(length=8) + tip = new_block_hashes[-1] + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(inv=[tip], headers=[]) + + block_time += 9 + + fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] + fork_point = int(fork_point, 16) + + # Use getblocks/getdata + test_node.send_getblocks(locator=[fork_point]) + test_node.check_last_announcement(inv=new_block_hashes, headers=[]) + test_node.send_get_data(new_block_hashes) + test_node.wait_for_block(new_block_hashes[-1]) + + for i in range(3): + # Mine another block, still should get only an inv + tip = self.mine_blocks(1) + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(inv=[tip], headers=[]) + if i == 0: + # Just get the data -- shouldn't cause headers announcements to resume + test_node.send_get_data([tip]) + test_node.wait_for_block(tip) + elif i == 1: + # Send a getheaders message that shouldn't trigger headers announcements + # to resume (best header sent will be too old) + test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) + test_node.send_get_data([tip]) + test_node.wait_for_block(tip) + elif i == 2: + test_node.send_get_data([tip]) + test_node.wait_for_block(tip) + # This time, try sending either a getheaders to trigger resumption + # of headers announcements, or mine a new block and inv it, also + # triggering resumption of headers announcements. + if j == 0: + test_node.send_get_headers(locator=[tip], hashstop=0) + test_node.sync_with_ping() + else: + test_node.send_block_inv(tip) + test_node.sync_with_ping() + # New blocks should now be announced with header + tip = self.mine_blocks(1) + inv_node.check_last_announcement(inv=[tip], headers=[]) + test_node.check_last_announcement(headers=[tip]) + + self.log.info("Part 3: success!") + + self.log.info("Part 4: Testing direct fetch behavior...") + tip = self.mine_blocks(1) + height = self.nodes[0].getblockcount() + 1 + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + block_time = last_time + 1 + + # Create 2 blocks. Send the blocks, then send the headers. + blocks = [] + for b in range(2): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + inv_node.send_message(msg_block(blocks[-1])) + + inv_node.sync_with_ping() # Make sure blocks are processed + test_node.last_message.pop("getdata", None) + test_node.send_header_for_blocks(blocks) + test_node.sync_with_ping() + # should not have received any getdata messages + with mininode_lock: + assert "getdata" not in test_node.last_message + + # This time, direct fetch should work + blocks = [] + for b in range(3): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + + test_node.send_header_for_blocks(blocks) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) + + [test_node.send_message(msg_block(x)) for x in blocks] + + test_node.sync_with_ping() + + # Now announce a header that forks the last two blocks + tip = blocks[0].sha256 + height -= 1 + blocks = [] + + # Create extra blocks for later + for b in range(20): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + + # Announcing one block on fork should not trigger direct fetch + # (less work than tip) + test_node.last_message.pop("getdata", None) + test_node.send_header_for_blocks(blocks[0:1]) + test_node.sync_with_ping() + with mininode_lock: + assert "getdata" not in test_node.last_message + + # Announcing one more block on fork should trigger direct fetch for + # both blocks (same work as tip) + test_node.send_header_for_blocks(blocks[1:2]) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) + + # Announcing 16 more headers should trigger direct fetch for 14 more + # blocks + test_node.send_header_for_blocks(blocks[2:18]) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) + + # Announcing 1 more header should not trigger any response + test_node.last_message.pop("getdata", None) + test_node.send_header_for_blocks(blocks[18:19]) + test_node.sync_with_ping() + with mininode_lock: + assert "getdata" not in test_node.last_message + + self.log.info("Part 4: success!") + + # Now deliver all those blocks we announced. + [test_node.send_message(msg_block(x)) for x in blocks] + + self.log.info("Part 5: Testing handling of unconnecting headers") + # First we test that receipt of an unconnecting header doesn't prevent + # chain sync. + for i in range(10): + test_node.last_message.pop("getdata", None) + blocks = [] + # Create two more blocks. + for j in range(2): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + # Send the header of the second block -> this won't connect. + with mininode_lock: + test_node.last_message.pop("getheaders", None) + test_node.send_header_for_blocks([blocks[1]]) + test_node.wait_for_getheaders() + test_node.send_header_for_blocks(blocks) + test_node.wait_for_getdata([x.sha256 for x in blocks]) + [test_node.send_message(msg_block(x)) for x in blocks] + test_node.sync_with_ping() + assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) + + blocks = [] + # Now we test that if we repeatedly don't send connecting headers, we + # don't go into an infinite loop trying to get them to connect. + MAX_UNCONNECTING_HEADERS = 10 + for j in range(MAX_UNCONNECTING_HEADERS + 1): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + + for i in range(1, MAX_UNCONNECTING_HEADERS): + # Send a header that doesn't connect, check that we get a getheaders. + with mininode_lock: + test_node.last_message.pop("getheaders", None) + test_node.send_header_for_blocks([blocks[i]]) + test_node.wait_for_getheaders() + + # Next header will connect, should re-set our count: + test_node.send_header_for_blocks([blocks[0]]) + + # Remove the first two entries (blocks[1] would connect): + blocks = blocks[2:] + + # Now try to see how many unconnecting headers we can send + # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS + for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): + # Send a header that doesn't connect, check that we get a getheaders. + with mininode_lock: + test_node.last_message.pop("getheaders", None) + test_node.send_header_for_blocks([blocks[i % len(blocks)]]) + test_node.wait_for_getheaders() + + # Eventually this stops working. + test_node.send_header_for_blocks([blocks[-1]]) + + # Should get disconnected + test_node.wait_for_disconnect() + + self.log.info("Part 5: success!") + + # Finally, check that the inv node never received a getdata request, + # throughout the test + assert "getdata" not in inv_node.last_message + +if __name__ == '__main__': + SendHeadersTest().main() diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py new file mode 100755 index 0000000000..6d21095cc6 --- /dev/null +++ b/test/functional/p2p_timeouts.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various net timeouts. + +- Create three bitcoind nodes: + + no_verack_node - we never send a verack in response to their version + no_version_node - we never send a version (only a ping) + no_send_node - we never send any P2P message. + +- Start all three nodes +- Wait 1 second +- Assert that we're connected +- Send a ping to no_verack_node and no_version_node +- Wait 30 seconds +- Assert that we're still connected +- Send a ping to no_verack_node and no_version_node +- Wait 31 seconds +- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) +""" + +from time import sleep + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class TestNode(P2PInterface): + def on_version(self, message): + # Don't send a verack in response + pass + +class TimeoutsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + # Setup the p2p connections and start up the network thread. + no_verack_node = self.nodes[0].add_p2p_connection(TestNode()) + no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) + no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False) + + network_thread_start() + + sleep(1) + + assert no_verack_node.connected + assert no_version_node.connected + assert no_send_node.connected + + no_verack_node.send_message(msg_ping()) + no_version_node.send_message(msg_ping()) + + sleep(30) + + assert "version" in no_verack_node.last_message + + assert no_verack_node.connected + assert no_version_node.connected + assert no_send_node.connected + + no_verack_node.send_message(msg_ping()) + no_version_node.send_message(msg_ping()) + + sleep(31) + + assert not no_verack_node.connected + assert not no_version_node.connected + assert not no_send_node.connected + +if __name__ == '__main__': + TimeoutsTest().main() diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py new file mode 100755 index 0000000000..672626f15b --- /dev/null +++ b/test/functional/p2p_unrequested_blocks.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test processing of unrequested blocks. + +Setup: two nodes, node0+node1, not connected to each other. Node1 will have +nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. + +We have one P2PInterface connection to node0 called test_node, and one to node1 +called min_work_node. + +The test: +1. Generate one block on each node, to leave IBD. + +2. Mine a new block on each tip, and deliver to each node from node's peer. + The tip should advance for node0, but node1 should skip processing due to + nMinimumChainWork. + +Node1 is unused in tests 3-7: + +3. Mine a block that forks from the genesis block, and deliver to test_node. + Node0 should not process this block (just accept the header), because it + is unrequested and doesn't have more or equal work to the tip. + +4a,b. Send another two blocks that build on the forking block. + Node0 should process the second block but be stuck on the shorter chain, + because it's missing an intermediate block. + +4c.Send 288 more blocks on the longer chain (the number of blocks ahead + we currently store). + Node0 should process all but the last block (too far ahead in height). + +5. Send a duplicate of the block in #3 to Node0. + Node0 should not process the block because it is unrequested, and stay on + the shorter chain. + +6. Send Node0 an inv for the height 3 block produced in #4 above. + Node0 should figure out that Node0 has the missing height 2 block and send a + getdata. + +7. Send Node0 the missing block again. + Node0 should process and the tip should advance. + +8. Create a fork which is invalid at a height longer than the current chain + (ie to which the node will try to reorg) but which has headers built on top + of the invalid block. Check that we get disconnected if we send more headers + on the chain the node now knows to be invalid. + +9. Test Node1 is able to sync when connected to node0 (which should have sufficient + work on its chain). +""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import time +from test_framework.blocktools import create_block, create_coinbase, create_transaction + +class AcceptBlockTest(BitcoinTestFramework): + def add_options(self, parser): + parser.add_option("--testbinary", dest="testbinary", + default=os.getenv("BITCOIND", "bitcoind"), + help="bitcoind binary to test") + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [[], ["-minimumchainwork=0x10"]] + + def setup_network(self): + # Node0 will be used to test behavior of processing unrequested blocks + # from peers which are not whitelisted, while Node1 will be used for + # the whitelisted case. + # Node2 will be used for non-whitelisted peers to test the interaction + # with nMinimumChainWork. + self.setup_nodes() + + def run_test(self): + # Setup the p2p connections and start up the network thread. + # test_node connects to node0 (not whitelisted) + test_node = self.nodes[0].add_p2p_connection(P2PInterface()) + # min_work_node connects to node1 (whitelisted) + min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) + + network_thread_start() + + # Test logic begins here + test_node.wait_for_verack() + min_work_node.wait_for_verack() + + # 1. Have nodes mine a block (leave IBD) + [ n.generate(1) for n in self.nodes ] + tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] + + # 2. Send one block that builds on each tip. + # This should be accepted by node0 + blocks_h2 = [] # the height 2 blocks on each node's chain + block_time = int(time.time()) + 1 + for i in range(2): + blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) + blocks_h2[i].solve() + block_time += 1 + test_node.send_message(msg_block(blocks_h2[0])) + min_work_node.send_message(msg_block(blocks_h2[1])) + + for x in [test_node, min_work_node]: + x.sync_with_ping() + assert_equal(self.nodes[0].getblockcount(), 2) + assert_equal(self.nodes[1].getblockcount(), 1) + self.log.info("First height 2 block accepted by node0; correctly rejected by node1") + + # 3. Send another block that builds on genesis. + block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) + block_time += 1 + block_h1f.solve() + test_node.send_message(msg_block(block_h1f)) + + test_node.sync_with_ping() + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_h1f.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) + + # 4. Send another two block that build on the fork. + block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) + block_time += 1 + block_h2f.solve() + test_node.send_message(msg_block(block_h2f)) + + test_node.sync_with_ping() + # Since the earlier block was not processed by node, the new block + # can't be fully validated. + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_h2f.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + + # But this block should be accepted by node since it has equal work. + self.nodes[0].getblock(block_h2f.hash) + self.log.info("Second height 2 block accepted, but not reorg'ed to") + + # 4b. Now send another block that builds on the forking chain. + block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) + block_h3.solve() + test_node.send_message(msg_block(block_h3)) + + test_node.sync_with_ping() + # Since the earlier block was not processed by node, the new block + # can't be fully validated. + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_h3.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + self.nodes[0].getblock(block_h3.hash) + + # But this block should be accepted by node since it has more work. + self.nodes[0].getblock(block_h3.hash) + self.log.info("Unrequested more-work block accepted") + + # 4c. Now mine 288 more blocks and deliver; all should be processed but + # the last (height-too-high) on node (as long as its not missing any headers) + tip = block_h3 + all_blocks = [] + for i in range(288): + next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) + next_block.solve() + all_blocks.append(next_block) + tip = next_block + + # Now send the block at height 5 and check that it wasn't accepted (missing header) + test_node.send_message(msg_block(all_blocks[1])) + test_node.sync_with_ping() + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) + + # The block at height 5 should be accepted if we provide the missing header, though + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(all_blocks[0])) + test_node.send_message(headers_message) + test_node.send_message(msg_block(all_blocks[1])) + test_node.sync_with_ping() + self.nodes[0].getblock(all_blocks[1].hash) + + # Now send the blocks in all_blocks + for i in range(288): + test_node.send_message(msg_block(all_blocks[i])) + test_node.sync_with_ping() + + # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead + for x in all_blocks[:-1]: + self.nodes[0].getblock(x.hash) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) + + # 5. Test handling of unrequested block on the node that didn't process + # Should still not be processed (even though it has a child that has more + # work). + + # The node should have requested the blocks at some point, so + # disconnect/reconnect first + + self.nodes[0].disconnect_p2ps() + self.nodes[1].disconnect_p2ps() + network_thread_join() + + test_node = self.nodes[0].add_p2p_connection(P2PInterface()) + network_thread_start() + test_node.wait_for_verack() + + test_node.send_message(msg_block(block_h1f)) + + test_node.sync_with_ping() + assert_equal(self.nodes[0].getblockcount(), 2) + self.log.info("Unrequested block that would complete more-work chain was ignored") + + # 6. Try to get node to request the missing block. + # Poke the node with an inv for block at height 3 and see if that + # triggers a getdata on block 2 (it should if block 2 is missing). + with mininode_lock: + # Clear state so we can check the getdata request + test_node.last_message.pop("getdata", None) + test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) + + test_node.sync_with_ping() + with mininode_lock: + getdata = test_node.last_message["getdata"] + + # Check that the getdata includes the right block + assert_equal(getdata.inv[0].hash, block_h1f.sha256) + self.log.info("Inv at tip triggered getdata for unprocessed block") + + # 7. Send the missing block for the third time (now it is requested) + test_node.send_message(msg_block(block_h1f)) + + test_node.sync_with_ping() + assert_equal(self.nodes[0].getblockcount(), 290) + self.nodes[0].getblock(all_blocks[286].hash) + assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) + self.log.info("Successfully reorged to longer chain from non-whitelisted peer") + + # 8. Create a chain which is invalid at a height longer than the + # current chain, but which has more blocks on top of that + block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) + block_289f.solve() + block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) + block_290f.solve() + block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) + # block_291 spends a coinbase below maturity! + block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) + block_291.hashMerkleRoot = block_291.calc_merkle_root() + block_291.solve() + block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) + block_292.solve() + + # Now send all the headers on the chain and enough blocks to trigger reorg + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(block_289f)) + headers_message.headers.append(CBlockHeader(block_290f)) + headers_message.headers.append(CBlockHeader(block_291)) + headers_message.headers.append(CBlockHeader(block_292)) + test_node.send_message(headers_message) + + test_node.sync_with_ping() + tip_entry_found = False + for x in self.nodes[0].getchaintips(): + if x['hash'] == block_292.hash: + assert_equal(x['status'], "headers-only") + tip_entry_found = True + assert(tip_entry_found) + assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) + + test_node.send_message(msg_block(block_289f)) + test_node.send_message(msg_block(block_290f)) + + test_node.sync_with_ping() + self.nodes[0].getblock(block_289f.hash) + self.nodes[0].getblock(block_290f.hash) + + test_node.send_message(msg_block(block_291)) + + # At this point we've sent an obviously-bogus block, wait for full processing + # without assuming whether we will be disconnected or not + try: + # Only wait a short while so the test doesn't take forever if we do get + # disconnected + test_node.sync_with_ping(timeout=1) + except AssertionError: + test_node.wait_for_disconnect() + + self.nodes[0].disconnect_p2ps() + test_node = self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + test_node.wait_for_verack() + + # We should have failed reorg and switched back to 290 (but have block 291) + assert_equal(self.nodes[0].getblockcount(), 290) + assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) + assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) + + # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected + block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) + block_293.solve() + headers_message = msg_headers() + headers_message.headers.append(CBlockHeader(block_293)) + test_node.send_message(headers_message) + test_node.wait_for_disconnect() + + # 9. Connect node1 to node0 and ensure it is able to sync + connect_nodes(self.nodes[0], 1) + sync_blocks([self.nodes[0], self.nodes[1]]) + self.log.info("Successfully synced nodes 1 and 0") + +if __name__ == '__main__': + AcceptBlockTest().main() diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py deleted file mode 100755 index 8869aeaaea..0000000000 --- a/test/functional/sendheaders.py +++ /dev/null @@ -1,598 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test behavior of headers messages to announce blocks. - -Setup: - -- Two nodes: - - node0 is the node-under-test. We create two p2p connections to it. The - first p2p connection is a control and should only ever receive inv's. The - second p2p connection tests the headers sending logic. - - node1 is used to create reorgs. - -test_null_locators -================== - -Sends two getheaders requests with null locator values. First request's hashstop -value refers to validated block, while second request's hashstop value refers to -a block which hasn't been validated. Verifies only the first request returns -headers. - -test_nonnull_locators -===================== - -Part 1: No headers announcements before "sendheaders" -a. node mines a block [expect: inv] - send getdata for the block [expect: block] -b. node mines another block [expect: inv] - send getheaders and getdata [expect: headers, then block] -c. node mines another block [expect: inv] - peer mines a block, announces with header [expect: getdata] -d. node mines another block [expect: inv] - -Part 2: After "sendheaders", headers announcements should generally work. -a. peer sends sendheaders [expect: no response] - peer sends getheaders with current tip [expect: no response] -b. node mines a block [expect: tip header] -c. for N in 1, ..., 10: - * for announce-type in {inv, header} - - peer mines N blocks, announces with announce-type - [ expect: getheaders/getdata or getdata, deliver block(s) ] - - node mines a block [ expect: 1 header ] - -Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. -- For response-type in {inv, getheaders} - * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] - * node mines an 8-block reorg [ expect: inv at tip ] - * peer responds with getblocks/getdata [expect: inv, blocks ] - * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] - * node mines another block at tip [ expect: inv ] - * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] - * peer requests block [ expect: block ] - * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] - * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] - * node mines 1 block [expect: 1 header, peer responds with getdata] - -Part 4: Test direct fetch behavior -a. Announce 2 old block headers. - Expect: no getdata requests. -b. Announce 3 new blocks via 1 headers message. - Expect: one getdata request for all 3 blocks. - (Send blocks.) -c. Announce 1 header that forks off the last two blocks. - Expect: no response. -d. Announce 1 more header that builds on that fork. - Expect: one getdata request for two blocks. -e. Announce 16 more headers that build on that fork. - Expect: getdata request for 14 more blocks. -f. Announce 1 more header that builds on that fork. - Expect: no response. - -Part 5: Test handling of headers that don't connect. -a. Repeat 10 times: - 1. Announce a header that doesn't connect. - Expect: getheaders message - 2. Send headers chain. - Expect: getdata for the missing blocks, tip update. -b. Then send 9 more headers that don't connect. - Expect: getheaders message each time. -c. Announce a header that does connect. - Expect: no response. -d. Announce 49 headers that don't connect. - Expect: getheaders message each time. -e. Announce one more that doesn't connect. - Expect: disconnect. -""" -from test_framework.blocktools import create_block, create_coinbase -from test_framework.mininode import ( - CBlockHeader, - CInv, - NODE_WITNESS, - network_thread_start, - P2PInterface, - mininode_lock, - msg_block, - msg_getblocks, - msg_getdata, - msg_getheaders, - msg_headers, - msg_inv, - msg_sendheaders, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - sync_blocks, - wait_until, -) - -DIRECT_FETCH_RESPONSE_TIME = 0.05 - -class BaseNode(P2PInterface): - def __init__(self): - super().__init__() - - self.block_announced = False - self.last_blockhash_announced = None - - def send_get_data(self, block_hashes): - """Request data for a list of block hashes.""" - msg = msg_getdata() - for x in block_hashes: - msg.inv.append(CInv(2, x)) - self.send_message(msg) - - def send_get_headers(self, locator, hashstop): - msg = msg_getheaders() - msg.locator.vHave = locator - msg.hashstop = hashstop - self.send_message(msg) - - def send_block_inv(self, blockhash): - msg = msg_inv() - msg.inv = [CInv(2, blockhash)] - self.send_message(msg) - - def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() - headers_message.headers = [CBlockHeader(b) for b in new_blocks] - self.send_message(headers_message) - - def send_getblocks(self, locator): - getblocks_message = msg_getblocks() - getblocks_message.locator.vHave = locator - self.send_message(getblocks_message) - - def wait_for_getdata(self, hash_list, timeout=60): - if hash_list == []: - return - - test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list - wait_until(test_function, timeout=timeout, lock=mininode_lock) - - def wait_for_block_announcement(self, block_hash, timeout=60): - test_function = lambda: self.last_blockhash_announced == block_hash - wait_until(test_function, timeout=timeout, lock=mininode_lock) - - def on_inv(self, message): - self.block_announced = True - self.last_blockhash_announced = message.inv[-1].hash - - def on_headers(self, message): - if len(message.headers): - self.block_announced = True - message.headers[-1].calc_sha256() - self.last_blockhash_announced = message.headers[-1].sha256 - - def clear_last_announcement(self): - with mininode_lock: - self.block_announced = False - self.last_message.pop("inv", None) - self.last_message.pop("headers", None) - - def check_last_announcement(self, headers=None, inv=None): - """Test whether the last announcement received had the right header or the right inv. - - inv and headers should be lists of block hashes.""" - - test_function = lambda: self.block_announced - wait_until(test_function, timeout=60, lock=mininode_lock) - - with mininode_lock: - self.block_announced = False - - compare_inv = [] - if "inv" in self.last_message: - compare_inv = [x.hash for x in self.last_message["inv"].inv] - if inv is not None: - assert_equal(compare_inv, inv) - - compare_headers = [] - if "headers" in self.last_message: - compare_headers = [x.sha256 for x in self.last_message["headers"].headers] - if headers is not None: - assert_equal(compare_headers, headers) - - self.last_message.pop("inv", None) - self.last_message.pop("headers", None) - -class SendHeadersTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - - def mine_blocks(self, count): - """Mine count blocks and return the new tip.""" - - # Clear out last block announcement from each p2p listener - [x.clear_last_announcement() for x in self.nodes[0].p2ps] - self.nodes[0].generate(count) - return int(self.nodes[0].getbestblockhash(), 16) - - def mine_reorg(self, length): - """Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). - - Note: we clear the state of our p2p connections after the - to-be-reorged-out blocks are mined, so that we don't break later tests. - return the list of block hashes newly mined.""" - - self.nodes[0].generate(length) # make sure all invalidated blocks are node0's - sync_blocks(self.nodes, wait=0.1) - for x in self.nodes[0].p2ps: - x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) - x.clear_last_announcement() - - tip_height = self.nodes[1].getblockcount() - hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1)) - self.nodes[1].invalidateblock(hash_to_invalidate) - all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain - sync_blocks(self.nodes, wait=0.1) - return [int(x, 16) for x in all_hashes] - - def run_test(self): - # Setup the p2p connections and start up the network thread. - inv_node = self.nodes[0].add_p2p_connection(BaseNode()) - # Make sure NODE_NETWORK is not set for test_node, so no block download - # will occur outside of direct fetching - test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS) - - network_thread_start() - - # Test logic begins here - inv_node.wait_for_verack() - test_node.wait_for_verack() - - # Ensure verack's have been processed by our peer - inv_node.sync_with_ping() - test_node.sync_with_ping() - - self.test_null_locators(test_node, inv_node) - self.test_nonnull_locators(test_node, inv_node) - - def test_null_locators(self, test_node, inv_node): - tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0]) - tip_hash = int(tip["hash"], 16) - - inv_node.check_last_announcement(inv=[tip_hash], headers=[]) - test_node.check_last_announcement(inv=[tip_hash], headers=[]) - - self.log.info("Verify getheaders with null locator and valid hashstop returns headers.") - test_node.clear_last_announcement() - test_node.send_get_headers(locator=[], hashstop=tip_hash) - test_node.check_last_announcement(headers=[tip_hash]) - - self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.") - block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1) - block.solve() - test_node.send_header_for_blocks([block]) - test_node.clear_last_announcement() - test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16)) - test_node.sync_with_ping() - assert_equal(test_node.block_announced, False) - inv_node.clear_last_announcement() - test_node.send_message(msg_block(block)) - inv_node.check_last_announcement(inv=[int(block.hash, 16)], headers=[]) - - def test_nonnull_locators(self, test_node, inv_node): - tip = int(self.nodes[0].getbestblockhash(), 16) - - # PART 1 - # 1. Mine a block; expect inv announcements each time - self.log.info("Part 1: headers don't start before sendheaders message...") - for i in range(4): - old_tip = tip - tip = self.mine_blocks(1) - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(inv=[tip], headers=[]) - # Try a few different responses; none should affect next announcement - if i == 0: - # first request the block - test_node.send_get_data([tip]) - test_node.wait_for_block(tip) - elif i == 1: - # next try requesting header and block - test_node.send_get_headers(locator=[old_tip], hashstop=tip) - test_node.send_get_data([tip]) - test_node.wait_for_block(tip) - test_node.clear_last_announcement() # since we requested headers... - elif i == 2: - # this time announce own block via headers - height = self.nodes[0].getblockcount() - last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] - block_time = last_time + 1 - new_block = create_block(tip, create_coinbase(height + 1), block_time) - new_block.solve() - test_node.send_header_for_blocks([new_block]) - test_node.wait_for_getdata([new_block.sha256]) - test_node.send_message(msg_block(new_block)) - test_node.sync_with_ping() # make sure this block is processed - inv_node.clear_last_announcement() - test_node.clear_last_announcement() - - self.log.info("Part 1: success!") - self.log.info("Part 2: announce blocks with headers after sendheaders message...") - # PART 2 - # 2. Send a sendheaders message and test that headers announcements - # commence and keep working. - test_node.send_message(msg_sendheaders()) - prev_tip = int(self.nodes[0].getbestblockhash(), 16) - test_node.send_get_headers(locator=[prev_tip], hashstop=0) - test_node.sync_with_ping() - - # Now that we've synced headers, headers announcements should work - tip = self.mine_blocks(1) - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(headers=[tip]) - - height = self.nodes[0].getblockcount() + 1 - block_time += 10 # Advance far enough ahead - for i in range(10): - # Mine i blocks, and alternate announcing either via - # inv (of tip) or via headers. After each, new blocks - # mined by the node should successfully be announced - # with block header, even though the blocks are never requested - for j in range(2): - blocks = [] - for b in range(i + 1): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - if j == 0: - # Announce via inv - test_node.send_block_inv(tip) - test_node.wait_for_getheaders() - # Should have received a getheaders now - test_node.send_header_for_blocks(blocks) - # Test that duplicate inv's won't result in duplicate - # getdata requests, or duplicate headers announcements - [inv_node.send_block_inv(x.sha256) for x in blocks] - test_node.wait_for_getdata([x.sha256 for x in blocks]) - inv_node.sync_with_ping() - else: - # Announce via headers - test_node.send_header_for_blocks(blocks) - test_node.wait_for_getdata([x.sha256 for x in blocks]) - # Test that duplicate headers won't result in duplicate - # getdata requests (the check is further down) - inv_node.send_header_for_blocks(blocks) - inv_node.sync_with_ping() - [test_node.send_message(msg_block(x)) for x in blocks] - test_node.sync_with_ping() - inv_node.sync_with_ping() - # This block should not be announced to the inv node (since it also - # broadcast it) - assert "inv" not in inv_node.last_message - assert "headers" not in inv_node.last_message - tip = self.mine_blocks(1) - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(headers=[tip]) - height += 1 - block_time += 1 - - self.log.info("Part 2: success!") - - self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") - - # PART 3. Headers announcements can stop after large reorg, and resume after - # getheaders or inv from peer. - for j in range(2): - # First try mining a reorg that can propagate with header announcement - new_block_hashes = self.mine_reorg(length=7) - tip = new_block_hashes[-1] - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(headers=new_block_hashes) - - block_time += 8 - - # Mine a too-large reorg, which should be announced with a single inv - new_block_hashes = self.mine_reorg(length=8) - tip = new_block_hashes[-1] - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(inv=[tip], headers=[]) - - block_time += 9 - - fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] - fork_point = int(fork_point, 16) - - # Use getblocks/getdata - test_node.send_getblocks(locator=[fork_point]) - test_node.check_last_announcement(inv=new_block_hashes, headers=[]) - test_node.send_get_data(new_block_hashes) - test_node.wait_for_block(new_block_hashes[-1]) - - for i in range(3): - # Mine another block, still should get only an inv - tip = self.mine_blocks(1) - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(inv=[tip], headers=[]) - if i == 0: - # Just get the data -- shouldn't cause headers announcements to resume - test_node.send_get_data([tip]) - test_node.wait_for_block(tip) - elif i == 1: - # Send a getheaders message that shouldn't trigger headers announcements - # to resume (best header sent will be too old) - test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) - test_node.send_get_data([tip]) - test_node.wait_for_block(tip) - elif i == 2: - test_node.send_get_data([tip]) - test_node.wait_for_block(tip) - # This time, try sending either a getheaders to trigger resumption - # of headers announcements, or mine a new block and inv it, also - # triggering resumption of headers announcements. - if j == 0: - test_node.send_get_headers(locator=[tip], hashstop=0) - test_node.sync_with_ping() - else: - test_node.send_block_inv(tip) - test_node.sync_with_ping() - # New blocks should now be announced with header - tip = self.mine_blocks(1) - inv_node.check_last_announcement(inv=[tip], headers=[]) - test_node.check_last_announcement(headers=[tip]) - - self.log.info("Part 3: success!") - - self.log.info("Part 4: Testing direct fetch behavior...") - tip = self.mine_blocks(1) - height = self.nodes[0].getblockcount() + 1 - last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] - block_time = last_time + 1 - - # Create 2 blocks. Send the blocks, then send the headers. - blocks = [] - for b in range(2): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - inv_node.send_message(msg_block(blocks[-1])) - - inv_node.sync_with_ping() # Make sure blocks are processed - test_node.last_message.pop("getdata", None) - test_node.send_header_for_blocks(blocks) - test_node.sync_with_ping() - # should not have received any getdata messages - with mininode_lock: - assert "getdata" not in test_node.last_message - - # This time, direct fetch should work - blocks = [] - for b in range(3): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - - test_node.send_header_for_blocks(blocks) - test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) - - [test_node.send_message(msg_block(x)) for x in blocks] - - test_node.sync_with_ping() - - # Now announce a header that forks the last two blocks - tip = blocks[0].sha256 - height -= 1 - blocks = [] - - # Create extra blocks for later - for b in range(20): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - - # Announcing one block on fork should not trigger direct fetch - # (less work than tip) - test_node.last_message.pop("getdata", None) - test_node.send_header_for_blocks(blocks[0:1]) - test_node.sync_with_ping() - with mininode_lock: - assert "getdata" not in test_node.last_message - - # Announcing one more block on fork should trigger direct fetch for - # both blocks (same work as tip) - test_node.send_header_for_blocks(blocks[1:2]) - test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) - - # Announcing 16 more headers should trigger direct fetch for 14 more - # blocks - test_node.send_header_for_blocks(blocks[2:18]) - test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) - - # Announcing 1 more header should not trigger any response - test_node.last_message.pop("getdata", None) - test_node.send_header_for_blocks(blocks[18:19]) - test_node.sync_with_ping() - with mininode_lock: - assert "getdata" not in test_node.last_message - - self.log.info("Part 4: success!") - - # Now deliver all those blocks we announced. - [test_node.send_message(msg_block(x)) for x in blocks] - - self.log.info("Part 5: Testing handling of unconnecting headers") - # First we test that receipt of an unconnecting header doesn't prevent - # chain sync. - for i in range(10): - test_node.last_message.pop("getdata", None) - blocks = [] - # Create two more blocks. - for j in range(2): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - # Send the header of the second block -> this won't connect. - with mininode_lock: - test_node.last_message.pop("getheaders", None) - test_node.send_header_for_blocks([blocks[1]]) - test_node.wait_for_getheaders() - test_node.send_header_for_blocks(blocks) - test_node.wait_for_getdata([x.sha256 for x in blocks]) - [test_node.send_message(msg_block(x)) for x in blocks] - test_node.sync_with_ping() - assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) - - blocks = [] - # Now we test that if we repeatedly don't send connecting headers, we - # don't go into an infinite loop trying to get them to connect. - MAX_UNCONNECTING_HEADERS = 10 - for j in range(MAX_UNCONNECTING_HEADERS + 1): - blocks.append(create_block(tip, create_coinbase(height), block_time)) - blocks[-1].solve() - tip = blocks[-1].sha256 - block_time += 1 - height += 1 - - for i in range(1, MAX_UNCONNECTING_HEADERS): - # Send a header that doesn't connect, check that we get a getheaders. - with mininode_lock: - test_node.last_message.pop("getheaders", None) - test_node.send_header_for_blocks([blocks[i]]) - test_node.wait_for_getheaders() - - # Next header will connect, should re-set our count: - test_node.send_header_for_blocks([blocks[0]]) - - # Remove the first two entries (blocks[1] would connect): - blocks = blocks[2:] - - # Now try to see how many unconnecting headers we can send - # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS - for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): - # Send a header that doesn't connect, check that we get a getheaders. - with mininode_lock: - test_node.last_message.pop("getheaders", None) - test_node.send_header_for_blocks([blocks[i % len(blocks)]]) - test_node.wait_for_getheaders() - - # Eventually this stops working. - test_node.send_header_for_blocks([blocks[-1]]) - - # Should get disconnected - test_node.wait_for_disconnect() - - self.log.info("Part 5: success!") - - # Finally, check that the inv node never received a getdata request, - # throughout the test - assert "getdata" not in inv_node.last_message - -if __name__ == '__main__': - SendHeadersTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index edc1fed8cb..26beef5a52 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -60,16 +60,16 @@ BASE_SCRIPTS= [ # vv Tests less than 5m vv 'feature_block.py', 'fundrawtransaction.py', - 'p2p-compactblocks.py', + 'p2p_compactblocks.py', 'feature_segwit.py', # vv Tests less than 2m vv 'wallet_basic.py', 'wallet_accounts.py', - 'p2p-segwit.py', + 'p2p_segwit.py', 'wallet_dump.py', 'listtransactions.py', # vv Tests less than 60s vv - 'sendheaders.py', + 'p2p_sendheaders.py', 'wallet_zapwallettxes.py', 'wallet_importmulti.py', 'mempool_limit.py', @@ -99,17 +99,17 @@ BASE_SCRIPTS= [ 'multi_rpc.py', 'feature_proxy.py', 'signrawtransactions.py', - 'disconnect_ban.py', + 'p2p_disconnect_ban.py', 'decodescript.py', 'blockchain.py', 'deprecated_rpc.py', 'wallet_disable.py', 'net.py', 'wallet_keypool.py', - 'p2p-mempool.py', + 'p2p_mempool.py', 'prioritise_transaction.py', - 'invalidblockrequest.py', - 'invalidtxrequest.py', + 'p2p_invalid_block.py', + 'p2p_invalid_tx.py', 'feature_versionbits_warning.py', 'preciousblock.py', 'wallet_importprunedfunds.py', @@ -120,18 +120,18 @@ BASE_SCRIPTS= [ 'wallet_bumpfee.py', 'rpcnamedargs.py', 'wallet_listsinceblock.py', - 'p2p-leaktests.py', + 'p2p_leak.py', 'wallet_encryption.py', 'feature_dersig.py', 'feature_cltv.py', 'uptime.py', 'wallet_resendwallettransactions.py', 'feature_minchainwork.py', - 'p2p-fingerprint.py', + 'p2p_fingerprint.py', 'feature_uacomment.py', - 'p2p-acceptblock.py', + 'p2p_unrequested_blocks.py', 'feature_logging.py', - 'node_network_limited.py', + 'p2p_node_network_limited.py', 'feature_config_args.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time @@ -150,10 +150,10 @@ EXTENDED_SCRIPTS = [ # vv Tests less than 2m vv 'feature_bip68_sequence.py', 'getblocktemplate_longpoll.py', - 'p2p-timeouts.py', + 'p2p_timeouts.py', # vv Tests less than 60s vv 'feature_bip9_softforks.py', - 'p2p-feefilter.py', + 'p2p_feefilter.py', 'rpcbind_test.py', # vv Tests less than 30s vv 'feature_assumevalid.py', @@ -474,7 +474,7 @@ class TestResult(): def check_script_prefixes(): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 37 + EXPECTED_VIOLATION_COUNT = 24 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming -- cgit v1.2.3 From 81b79f2c394c08e2bcf612f89a230afb60074a81 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:29 +1000 Subject: [tests] Rename rpc_* functional tests. --- test/functional/blockchain.py | 227 --------- test/functional/decodescript.py | 179 -------- test/functional/deprecated_rpc.py | 27 -- test/functional/fundrawtransaction.py | 732 ------------------------------ test/functional/getchaintips.py | 62 --- test/functional/invalidateblock.py | 64 --- test/functional/listtransactions.py | 202 --------- test/functional/merkle_blocks.py | 83 ---- test/functional/multi_rpc.py | 153 ------- test/functional/net.py | 98 ---- test/functional/preciousblock.py | 114 ----- test/functional/rawtransactions.py | 323 ------------- test/functional/rpc_bind.py | 107 +++++ test/functional/rpc_blockchain.py | 227 +++++++++ test/functional/rpc_decodescript.py | 179 ++++++++ test/functional/rpc_deprecated.py | 27 ++ test/functional/rpc_fundrawtransaction.py | 732 ++++++++++++++++++++++++++++++ test/functional/rpc_getchaintips.py | 62 +++ test/functional/rpc_invalidateblock.py | 64 +++ test/functional/rpc_listtransactions.py | 202 +++++++++ test/functional/rpc_named_arguments.py | 34 ++ test/functional/rpc_net.py | 98 ++++ test/functional/rpc_preciousblock.py | 114 +++++ test/functional/rpc_rawtransaction.py | 323 +++++++++++++ test/functional/rpc_signmessage.py | 39 ++ test/functional/rpc_signrawtransaction.py | 143 ++++++ test/functional/rpc_txoutproof.py | 83 ++++ test/functional/rpc_uptime.py | 30 ++ test/functional/rpc_users.py | 153 +++++++ test/functional/rpcbind_test.py | 107 ----- test/functional/rpcnamedargs.py | 34 -- test/functional/signmessages.py | 39 -- test/functional/signrawtransactions.py | 143 ------ test/functional/test_runner.py | 36 +- test/functional/uptime.py | 30 -- 35 files changed, 2635 insertions(+), 2635 deletions(-) delete mode 100755 test/functional/blockchain.py delete mode 100755 test/functional/decodescript.py delete mode 100755 test/functional/deprecated_rpc.py delete mode 100755 test/functional/fundrawtransaction.py delete mode 100755 test/functional/getchaintips.py delete mode 100755 test/functional/invalidateblock.py delete mode 100755 test/functional/listtransactions.py delete mode 100755 test/functional/merkle_blocks.py delete mode 100755 test/functional/multi_rpc.py delete mode 100755 test/functional/net.py delete mode 100755 test/functional/preciousblock.py delete mode 100755 test/functional/rawtransactions.py create mode 100755 test/functional/rpc_bind.py create mode 100755 test/functional/rpc_blockchain.py create mode 100755 test/functional/rpc_decodescript.py create mode 100755 test/functional/rpc_deprecated.py create mode 100755 test/functional/rpc_fundrawtransaction.py create mode 100755 test/functional/rpc_getchaintips.py create mode 100755 test/functional/rpc_invalidateblock.py create mode 100755 test/functional/rpc_listtransactions.py create mode 100755 test/functional/rpc_named_arguments.py create mode 100755 test/functional/rpc_net.py create mode 100755 test/functional/rpc_preciousblock.py create mode 100755 test/functional/rpc_rawtransaction.py create mode 100755 test/functional/rpc_signmessage.py create mode 100755 test/functional/rpc_signrawtransaction.py create mode 100755 test/functional/rpc_txoutproof.py create mode 100755 test/functional/rpc_uptime.py create mode 100755 test/functional/rpc_users.py delete mode 100755 test/functional/rpcbind_test.py delete mode 100755 test/functional/rpcnamedargs.py delete mode 100755 test/functional/signmessages.py delete mode 100755 test/functional/signrawtransactions.py delete mode 100755 test/functional/uptime.py (limited to 'test') diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py deleted file mode 100755 index 11acff4be1..0000000000 --- a/test/functional/blockchain.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPCs related to blockchainstate. - -Test the following RPCs: - - getblockchaininfo - - gettxoutsetinfo - - getdifficulty - - getbestblockhash - - getblockhash - - getblockheader - - getchaintxstats - - getnetworkhashps - - verifychain - -Tests correspond to code in rpc/blockchain.cpp. -""" - -from decimal import Decimal -import http.client -import subprocess - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises, - assert_raises_rpc_error, - assert_is_hex_string, - assert_is_hash_string, -) - -class BlockchainTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-stopatheight=207', '-prune=1']] - - def run_test(self): - self._test_getblockchaininfo() - self._test_getchaintxstats() - self._test_gettxoutsetinfo() - self._test_getblockheader() - self._test_getdifficulty() - self._test_getnetworkhashps() - self._test_stopatheight() - assert self.nodes[0].verifychain(4, 0) - - def _test_getblockchaininfo(self): - self.log.info("Test getblockchaininfo") - - keys = [ - 'bestblockhash', - 'bip9_softforks', - 'blocks', - 'chain', - 'chainwork', - 'difficulty', - 'headers', - 'initialblockdownload', - 'mediantime', - 'pruned', - 'size_on_disk', - 'softforks', - 'verificationprogress', - 'warnings', - ] - res = self.nodes[0].getblockchaininfo() - - # result should have these additional pruning keys if manual pruning is enabled - assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys)) - - # size_on_disk should be > 0 - assert_greater_than(res['size_on_disk'], 0) - - # pruneheight should be greater or equal to 0 - assert_greater_than_or_equal(res['pruneheight'], 0) - - # check other pruning fields given that prune=1 - assert res['pruned'] - assert not res['automatic_pruning'] - - self.restart_node(0, ['-stopatheight=207']) - res = self.nodes[0].getblockchaininfo() - # should have exact keys - assert_equal(sorted(res.keys()), keys) - - self.restart_node(0, ['-stopatheight=207', '-prune=550']) - res = self.nodes[0].getblockchaininfo() - # result should have these additional pruning keys if prune=550 - assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys)) - - # check related fields - assert res['pruned'] - assert_equal(res['pruneheight'], 0) - assert res['automatic_pruning'] - assert_equal(res['prune_target_size'], 576716800) - assert_greater_than(res['size_on_disk'], 0) - - def _test_getchaintxstats(self): - chaintxstats = self.nodes[0].getchaintxstats(1) - # 200 txs plus genesis tx - assert_equal(chaintxstats['txcount'], 201) - # tx rate should be 1 per 10 minutes, or 1/600 - # we have to round because of binary math - assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1)) - - b1 = self.nodes[0].getblock(self.nodes[0].getblockhash(1)) - b200 = self.nodes[0].getblock(self.nodes[0].getblockhash(200)) - time_diff = b200['mediantime'] - b1['mediantime'] - - chaintxstats = self.nodes[0].getchaintxstats() - assert_equal(chaintxstats['time'], b200['time']) - assert_equal(chaintxstats['txcount'], 201) - assert_equal(chaintxstats['window_block_count'], 199) - assert_equal(chaintxstats['window_tx_count'], 199) - assert_equal(chaintxstats['window_interval'], time_diff) - assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199)) - - chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1['hash']) - assert_equal(chaintxstats['time'], b1['time']) - assert_equal(chaintxstats['txcount'], 2) - assert_equal(chaintxstats['window_block_count'], 0) - assert('window_tx_count' not in chaintxstats) - assert('window_interval' not in chaintxstats) - assert('txrate' not in chaintxstats) - - assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, 201) - - def _test_gettxoutsetinfo(self): - node = self.nodes[0] - res = node.gettxoutsetinfo() - - assert_equal(res['total_amount'], Decimal('8725.00000000')) - assert_equal(res['transactions'], 200) - assert_equal(res['height'], 200) - assert_equal(res['txouts'], 200) - assert_equal(res['bogosize'], 17000), - assert_equal(res['bestblock'], node.getblockhash(200)) - size = res['disk_size'] - assert size > 6400 - assert size < 64000 - assert_equal(len(res['bestblock']), 64) - assert_equal(len(res['hash_serialized_2']), 64) - - self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block") - b1hash = node.getblockhash(1) - node.invalidateblock(b1hash) - - res2 = node.gettxoutsetinfo() - assert_equal(res2['transactions'], 0) - assert_equal(res2['total_amount'], Decimal('0')) - assert_equal(res2['height'], 0) - assert_equal(res2['txouts'], 0) - assert_equal(res2['bogosize'], 0), - assert_equal(res2['bestblock'], node.getblockhash(0)) - assert_equal(len(res2['hash_serialized_2']), 64) - - self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") - node.reconsiderblock(b1hash) - - res3 = node.gettxoutsetinfo() - assert_equal(res['total_amount'], res3['total_amount']) - assert_equal(res['transactions'], res3['transactions']) - assert_equal(res['height'], res3['height']) - assert_equal(res['txouts'], res3['txouts']) - assert_equal(res['bogosize'], res3['bogosize']) - assert_equal(res['bestblock'], res3['bestblock']) - assert_equal(res['hash_serialized_2'], res3['hash_serialized_2']) - - def _test_getblockheader(self): - node = self.nodes[0] - - assert_raises_rpc_error(-5, "Block not found", - node.getblockheader, "nonsense") - - besthash = node.getbestblockhash() - secondbesthash = node.getblockhash(199) - header = node.getblockheader(besthash) - - assert_equal(header['hash'], besthash) - assert_equal(header['height'], 200) - assert_equal(header['confirmations'], 1) - assert_equal(header['previousblockhash'], secondbesthash) - assert_is_hex_string(header['chainwork']) - assert_is_hash_string(header['hash']) - assert_is_hash_string(header['previousblockhash']) - assert_is_hash_string(header['merkleroot']) - assert_is_hash_string(header['bits'], length=None) - assert isinstance(header['time'], int) - assert isinstance(header['mediantime'], int) - assert isinstance(header['nonce'], int) - assert isinstance(header['version'], int) - assert isinstance(int(header['versionHex'], 16), int) - assert isinstance(header['difficulty'], Decimal) - - def _test_getdifficulty(self): - difficulty = self.nodes[0].getdifficulty() - # 1 hash in 2 should be valid, so difficulty should be 1/2**31 - # binary => decimal => binary math is why we do this check - assert abs(difficulty * 2**31 - 1) < 0.0001 - - def _test_getnetworkhashps(self): - hashes_per_second = self.nodes[0].getnetworkhashps() - # This should be 2 hashes every 10 minutes or 1/300 - assert abs(hashes_per_second * 300 - 1) < 0.0001 - - def _test_stopatheight(self): - assert_equal(self.nodes[0].getblockcount(), 200) - self.nodes[0].generate(6) - assert_equal(self.nodes[0].getblockcount(), 206) - self.log.debug('Node should not stop at this height') - assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3)) - try: - self.nodes[0].generate(1) - except (ConnectionError, http.client.BadStatusLine): - pass # The node already shut down before response - self.log.debug('Node should stop at this height...') - self.nodes[0].wait_until_stopped() - self.start_node(0) - assert_equal(self.nodes[0].getblockcount(), 207) - - -if __name__ == '__main__': - BlockchainTest().main() diff --git a/test/functional/decodescript.py b/test/functional/decodescript.py deleted file mode 100755 index 1ffc570437..0000000000 --- a/test/functional/decodescript.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test decoding scripts via decodescript RPC command.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from io import BytesIO - -class DecodeScriptTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def decodescript_script_sig(self): - signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' - push_signature = '48' + signature - public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' - push_public_key = '21' + public_key - - # below are test cases for all of the standard transaction types - - # 1) P2PK scriptSig - # the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack - rpc_result = self.nodes[0].decodescript(push_signature) - assert_equal(signature, rpc_result['asm']) - - # 2) P2PKH scriptSig - rpc_result = self.nodes[0].decodescript(push_signature + push_public_key) - assert_equal(signature + ' ' + public_key, rpc_result['asm']) - - # 3) multisig scriptSig - # this also tests the leading portion of a P2SH multisig scriptSig - # OP_0 - rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature) - assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) - - # 4) P2SH scriptSig - # an empty P2SH redeemScript is valid and makes for a very simple test case. - # thus, such a spending scriptSig would just need to pass the outer redeemScript - # hash test and leave true on the top of the stack. - rpc_result = self.nodes[0].decodescript('5100') - assert_equal('1 0', rpc_result['asm']) - - # 5) null data scriptSig - no such thing because null data scripts can not be spent. - # thus, no test case for that standard transaction type is here. - - def decodescript_script_pub_key(self): - public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' - push_public_key = '21' + public_key - public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' - push_public_key_hash = '14' + public_key_hash - - # below are test cases for all of the standard transaction types - - # 1) P2PK scriptPubKey - # OP_CHECKSIG - rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') - assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) - - # 2) P2PKH scriptPubKey - # OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIG - rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac') - assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) - - # 3) multisig scriptPubKey - # OP_CHECKMULTISIG - # just imagine that the pub keys used below are different. - # for our purposes here it does not matter that they are the same even though it is unrealistic. - rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae') - assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) - - # 4) P2SH scriptPubKey - # OP_HASH160 OP_EQUAL. - # push_public_key_hash here should actually be the hash of a redeem script. - # but this works the same for purposes of this test. - rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87') - assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) - - # 5) null data scriptPubKey - # use a signature look-alike here to make sure that we do not decode random data as a signature. - # this matters if/when signature sighash decoding comes along. - # would want to make sure that no such decoding takes place in this case. - signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' - # OP_RETURN - rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) - assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) - - # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. - # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. - # just imagine that the pub keys used below are different. - # for our purposes here it does not matter that they are the same even though it is unrealistic. - # - # OP_IF - # OP_CHECKSIGVERIFY - # OP_ELSE - # OP_CHECKLOCKTIMEVERIFY OP_DROP - # OP_ENDIF - # OP_CHECKSIG - # - # lock until block 500,000 - rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') - assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) - - def decoderawtransaction_asm_sighashtype(self): - """Test decoding scripts via RPC command "decoderawtransaction". - - This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. - """ - - # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output - tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' - rpc_result = self.nodes[0].decoderawtransaction(tx) - assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) - - # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. - # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc - # verify that we have not altered scriptPubKey decoding. - tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' - rpc_result = self.nodes[0].decoderawtransaction(tx) - assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) - assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) - assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) - assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) - txSave = CTransaction() - txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) - - # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type - tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' - rpc_result = self.nodes[0].decoderawtransaction(tx) - assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) - - # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks - tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' - rpc_result = self.nodes[0].decoderawtransaction(tx) - assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) - assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) - - # some more full transaction tests of varying specific scriptSigs. used instead of - # tests in decodescript_script_sig because the decodescript RPC is specifically - # for working on scriptPubKeys (argh!). - push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)] - signature = push_signature[2:] - der_signature = signature[:-2] - signature_sighash_decoded = der_signature + '[ALL]' - signature_2 = der_signature + '82' - push_signature_2 = '48' + signature_2 - signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' - - # 1) P2PK scriptSig - txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) - rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) - assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) - - # make sure that the sighash decodes come out correctly for a more complex / lesser used case. - txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) - rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) - assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) - - # 2) multisig scriptSig - txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2) - rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) - assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) - - # 3) test a scriptSig that contains more than push operations. - # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it. - txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') - rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) - assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) - - def run_test(self): - self.decodescript_script_sig() - self.decodescript_script_pub_key() - self.decoderawtransaction_asm_sighashtype() - -if __name__ == '__main__': - DecodeScriptTest().main() diff --git a/test/functional/deprecated_rpc.py b/test/functional/deprecated_rpc.py deleted file mode 100755 index d6f25158ef..0000000000 --- a/test/functional/deprecated_rpc.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test deprecation of RPC calls.""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_raises_rpc_error - -class DeprecatedRpcTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.setup_clean_chain = True - self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]] - - def run_test(self): - self.log.info("estimatefee: Shows deprecated message") - assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1) - - self.log.info("Using -deprecatedrpc=estimatefee bypasses the error") - self.nodes[1].estimatefee(1) - - self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses") - assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()]) - self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) - -if __name__ == '__main__': - DeprecatedRpcTest().main() diff --git a/test/functional/fundrawtransaction.py b/test/functional/fundrawtransaction.py deleted file mode 100755 index 5f7a0586e0..0000000000 --- a/test/functional/fundrawtransaction.py +++ /dev/null @@ -1,732 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the fundrawtransaction RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - - -def get_unspent(listunspent, amount): - for utx in listunspent: - if utx['amount'] == amount: - return utx - raise AssertionError('Could not find unspent with amount={}'.format(amount)) - -class RawTransactionsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - - def setup_network(self, split=False): - self.setup_nodes() - - connect_nodes_bi(self.nodes, 0, 1) - connect_nodes_bi(self.nodes, 1, 2) - connect_nodes_bi(self.nodes, 0, 2) - connect_nodes_bi(self.nodes, 0, 3) - - def run_test(self): - min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] - # This test is not meant to test fee estimation and we'd like - # to be sure all txs are sent at a consistent desired feerate - for node in self.nodes: - node.settxfee(min_relay_tx_fee) - - # if the fee's positive delta is higher than this value tests will fail, - # neg. delta always fail the tests. - # The size of the signature of every input may be at most 2 bytes larger - # than a minimum sized signature. - - # = 2 bytes * minRelayTxFeePerByte - feeTolerance = 2 * min_relay_tx_fee/1000 - - self.nodes[2].generate(1) - self.sync_all() - self.nodes[0].generate(121) - self.sync_all() - - # ensure that setting changePosition in fundraw with an exact match is handled properly - rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50}) - rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]}) - assert_equal(rawmatch["changepos"], -1) - - watchonly_address = self.nodes[0].getnewaddress() - watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"] - watchonly_amount = Decimal(200) - self.nodes[3].importpubkey(watchonly_pubkey, "", True) - watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount) - self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10) - - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) - - self.nodes[0].generate(1) - self.sync_all() - - ############### - # simple test # - ############### - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - assert(len(dec_tx['vin']) > 0) #test that we have enough inputs - - ############################## - # simple test with two coins # - ############################## - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 2.2 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - assert(len(dec_tx['vin']) > 0) #test if we have enough inputs - - ############################## - # simple test with two coins # - ############################## - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 2.6 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - assert(len(dec_tx['vin']) > 0) - assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') - - - ################################ - # simple test with two outputs # - ################################ - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - - assert(len(dec_tx['vin']) > 0) - assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') - - - ######################################################################### - # test a fundrawtransaction with a VIN greater than the required amount # - ######################################################################### - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - - assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee - - - ##################################################################### - # test a fundrawtransaction with which will not get a change output # - ##################################################################### - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - - assert_equal(rawtxfund['changepos'], -1) - assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee - - - #################################################### - # test a fundrawtransaction with an invalid option # - #################################################### - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'}) - - ############################################################ - # test a fundrawtransaction with an invalid change address # - ############################################################ - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'}) - - ############################################################ - # test a fundrawtransaction with a provided change address # - ############################################################ - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - change = self.nodes[2].getnewaddress() - assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2}) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0}) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - out = dec_tx['vout'][0] - assert_equal(change, out['scriptPubKey']['addresses'][0]) - - ######################################################### - # test a fundrawtransaction with a provided change type # - ######################################################### - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None}) - assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''}) - rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'}) - tx = self.nodes[2].decoderawtransaction(rawtx['hex']) - assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type']) - - ######################################################################### - # test a fundrawtransaction with a VIN smaller than the required amount # - ######################################################################### - utx = get_unspent(self.nodes[2].listunspent(), 1) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - - # 4-byte version + 1-byte vin count + 36-byte prevout then script_len - rawtx = rawtx[:82] + "0100" + rawtx[84:] - - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - matchingOuts = 0 - for i, out in enumerate(dec_tx['vout']): - totalOut += out['value'] - if out['scriptPubKey']['addresses'][0] in outputs: - matchingOuts+=1 - else: - assert_equal(i, rawtxfund['changepos']) - - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) - - assert_equal(matchingOuts, 1) - assert_equal(len(dec_tx['vout']), 2) - - - ########################################### - # test a fundrawtransaction with two VINs # - ########################################### - utx = get_unspent(self.nodes[2].listunspent(), 1) - utx2 = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] - outputs = { self.nodes[0].getnewaddress() : 6.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - matchingOuts = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - if out['scriptPubKey']['addresses'][0] in outputs: - matchingOuts+=1 - - assert_equal(matchingOuts, 1) - assert_equal(len(dec_tx['vout']), 2) - - matchingIns = 0 - for vinOut in dec_tx['vin']: - for vinIn in inputs: - if vinIn['txid'] == vinOut['txid']: - matchingIns+=1 - - assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params - - ######################################################### - # test a fundrawtransaction with two VINs and two vOUTs # - ######################################################### - utx = get_unspent(self.nodes[2].listunspent(), 1) - utx2 = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] - outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - matchingOuts = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - if out['scriptPubKey']['addresses'][0] in outputs: - matchingOuts+=1 - - assert_equal(matchingOuts, 2) - assert_equal(len(dec_tx['vout']), 3) - - ############################################## - # test a fundrawtransaction with invalid vin # - ############################################## - inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin! - outputs = { self.nodes[0].getnewaddress() : 1.0} - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx) - - ############################################################ - #compare fee of a standard pubkeyhash transaction - inputs = [] - outputs = {self.nodes[1].getnewaddress():1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - #create same transaction over sendtoaddress - txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) - signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] - - #compare fee - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) - ############################################################ - - ############################################################ - #compare fee of a standard pubkeyhash transaction with multiple outputs - inputs = [] - outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - #create same transaction over sendtoaddress - txId = self.nodes[0].sendmany("", outputs) - signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] - - #compare fee - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) - ############################################################ - - - ############################################################ - #compare fee of a 2of2 multisig p2sh transaction - - # create 2of2 addr - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[1].getnewaddress() - - addr1Obj = self.nodes[1].validateaddress(addr1) - addr2Obj = self.nodes[1].validateaddress(addr2) - - mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] - - inputs = [] - outputs = {mSigObj:1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - #create same transaction over sendtoaddress - txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) - signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] - - #compare fee - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) - ############################################################ - - - ############################################################ - #compare fee of a standard pubkeyhash transaction - - # create 4of5 addr - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[1].getnewaddress() - addr3 = self.nodes[1].getnewaddress() - addr4 = self.nodes[1].getnewaddress() - addr5 = self.nodes[1].getnewaddress() - - addr1Obj = self.nodes[1].validateaddress(addr1) - addr2Obj = self.nodes[1].validateaddress(addr2) - addr3Obj = self.nodes[1].validateaddress(addr3) - addr4Obj = self.nodes[1].validateaddress(addr4) - addr5Obj = self.nodes[1].validateaddress(addr5) - - mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address'] - - inputs = [] - outputs = {mSigObj:1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - #create same transaction over sendtoaddress - txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) - signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] - - #compare fee - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) - ############################################################ - - - ############################################################ - # spend a 2of2 multisig transaction over fundraw - - # create 2of2 addr - addr1 = self.nodes[2].getnewaddress() - addr2 = self.nodes[2].getnewaddress() - - addr1Obj = self.nodes[2].validateaddress(addr1) - addr2Obj = self.nodes[2].validateaddress(addr2) - - mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] - - - # send 1.2 BTC to msig addr - txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) - self.sync_all() - self.nodes[1].generate(1) - self.sync_all() - - oldBalance = self.nodes[1].getbalance() - inputs = [] - outputs = {self.nodes[1].getnewaddress():1.1} - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[2].fundrawtransaction(rawtx) - - signedTx = self.nodes[2].signrawtransaction(fundedTx['hex']) - txId = self.nodes[2].sendrawtransaction(signedTx['hex']) - self.sync_all() - self.nodes[1].generate(1) - self.sync_all() - - # make sure funds are received at node1 - assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance()) - - ############################################################ - # locked wallet test - self.stop_node(0) - self.nodes[1].node_encrypt_wallet("test") - self.stop_node(2) - self.stop_node(3) - - self.start_nodes() - # This test is not meant to test fee estimation and we'd like - # to be sure all txs are sent at a consistent desired feerate - for node in self.nodes: - node.settxfee(min_relay_tx_fee) - - connect_nodes_bi(self.nodes,0,1) - connect_nodes_bi(self.nodes,1,2) - connect_nodes_bi(self.nodes,0,2) - connect_nodes_bi(self.nodes,0,3) - self.sync_all() - - # drain the keypool - self.nodes[1].getnewaddress() - self.nodes[1].getrawchangeaddress() - inputs = [] - outputs = {self.nodes[0].getnewaddress():1.1} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - # fund a transaction that requires a new key for the change output - # creating the key must be impossible because the wallet is locked - assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx) - - #refill the keypool - self.nodes[1].walletpassphrase("test", 100) - self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address - self.nodes[1].walletlock() - - assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2) - - oldBalance = self.nodes[0].getbalance() - - inputs = [] - outputs = {self.nodes[0].getnewaddress():1.1} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[1].fundrawtransaction(rawtx) - - #now we need to unlock - self.nodes[1].walletpassphrase("test", 600) - signedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) - txId = self.nodes[1].sendrawtransaction(signedTx['hex']) - self.nodes[1].generate(1) - self.sync_all() - - # make sure funds are received at node1 - assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance()) - - - ############################################### - # multiple (~19) inputs tx test | Compare fee # - ############################################### - - #empty node1, send some small coins from node0 to node1 - self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - - for i in range(0,20): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) - self.nodes[0].generate(1) - self.sync_all() - - #fund a tx with ~20 small inputs - inputs = [] - outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[1].fundrawtransaction(rawtx) - - #create same transaction over sendtoaddress - txId = self.nodes[1].sendmany("", outputs) - signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] - - #compare fee - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs - - - ############################################# - # multiple (~19) inputs tx test | sign/send # - ############################################# - - #again, empty node1, send some small coins from node0 to node1 - self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - - for i in range(0,20): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) - self.nodes[0].generate(1) - self.sync_all() - - #fund a tx with ~20 small inputs - oldBalance = self.nodes[0].getbalance() - - inputs = [] - outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[1].fundrawtransaction(rawtx) - fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) - txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward - - ##################################################### - # test fundrawtransaction with OP_RETURN and no vin # - ##################################################### - - rawtx = "0100000000010000000000000000066a047465737400000000" - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - assert_equal(len(dec_tx['vin']), 0) - assert_equal(len(dec_tx['vout']), 1) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - - assert_greater_than(len(dec_tx['vin']), 0) # at least one vin - assert_equal(len(dec_tx['vout']), 2) # one change output added - - - ################################################## - # test a fundrawtransaction using only watchonly # - ################################################## - - inputs = [] - outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True }) - res_dec = self.nodes[0].decoderawtransaction(result["hex"]) - assert_equal(len(res_dec["vin"]), 1) - assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) - - assert("fee" in result.keys()) - assert_greater_than(result["changepos"], -1) - - ############################################################### - # test fundrawtransaction using the entirety of watched funds # - ############################################################### - - inputs = [] - outputs = {self.nodes[2].getnewaddress() : watchonly_amount} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - # Backward compatibility test (2nd param is includeWatching) - result = self.nodes[3].fundrawtransaction(rawtx, True) - res_dec = self.nodes[0].decoderawtransaction(result["hex"]) - assert_equal(len(res_dec["vin"]), 2) - assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid) - - assert_greater_than(result["fee"], 0) - assert_greater_than(result["changepos"], -1) - assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10) - - signedtx = self.nodes[3].signrawtransaction(result["hex"]) - assert(not signedtx["complete"]) - signedtx = self.nodes[0].signrawtransaction(signedtx["hex"]) - assert(signedtx["complete"]) - self.nodes[0].sendrawtransaction(signedtx["hex"]) - self.nodes[0].generate(1) - self.sync_all() - - ####################### - # Test feeRate option # - ####################### - - # Make sure there is exactly one input so coin selection can't skew the result - assert_equal(len(self.nodes[3].listunspent(1)), 1) - - inputs = [] - outputs = {self.nodes[3].getnewaddress() : 1} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee) - result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}) - result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee}) - result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex']) - assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) - assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) - - ################################ - # Test no address reuse occurs # - ################################ - - result3 = self.nodes[3].fundrawtransaction(rawtx) - res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) - changeaddress = "" - for out in res_dec['vout']: - if out['value'] > 1.0: - changeaddress += out['scriptPubKey']['addresses'][0] - assert(changeaddress != "") - nextaddr = self.nodes[3].getnewaddress() - # Now the change address key should be removed from the keypool - assert(changeaddress != nextaddr) - - ###################################### - # Test subtractFeeFromOutputs option # - ###################################### - - # Make sure there is exactly one input so coin selection can't skew the result - assert_equal(len(self.nodes[3].listunspent(1)), 1) - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): 1} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list - self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}), - self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] - - dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result] - output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] - change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] - - assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) - assert_equal(result[3]['fee'], result[4]['fee']) - assert_equal(change[0], change[1]) - assert_equal(output[0], output[1]) - assert_equal(output[0], output[2] + result[2]['fee']) - assert_equal(change[0] + result[0]['fee'], change[2]) - assert_equal(output[3], output[4] + result[4]['fee']) - assert_equal(change[3] + result[3]['fee'], change[4]) - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - result = [self.nodes[3].fundrawtransaction(rawtx), - # split the fee between outputs 0, 2, and 3, but not output 1 - self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})] - - dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), - self.nodes[3].decoderawtransaction(result[1]['hex'])] - - # Nested list of non-change output amounts for each transaction - output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] - for d, r in zip(dec_tx, result)] - - # List of differences in output amounts between normal and subtractFee transactions - share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] - - # output 1 is the same in both transactions - assert_equal(share[1], 0) - - # the other 3 outputs are smaller as a result of subtractFeeFromOutputs - assert_greater_than(share[0], 0) - assert_greater_than(share[2], 0) - assert_greater_than(share[3], 0) - - # outputs 2 and 3 take the same share of the fee - assert_equal(share[2], share[3]) - - # output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3 - assert_greater_than_or_equal(share[0], share[2]) - assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) - - # the fee is the same in both transactions - assert_equal(result[0]['fee'], result[1]['fee']) - - # the total subtracted from the outputs is equal to the fee - assert_equal(share[0] + share[2] + share[3], result[0]['fee']) - -if __name__ == '__main__': - RawTransactionsTest().main() diff --git a/test/functional/getchaintips.py b/test/functional/getchaintips.py deleted file mode 100755 index 277930bb1a..0000000000 --- a/test/functional/getchaintips.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the getchaintips RPC. - -- introduce a network split -- work on chains of different lengths -- join the network together again -- verify that getchaintips now returns two chain tips. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class GetChainTipsTest (BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - - def run_test (self): - tips = self.nodes[0].getchaintips () - assert_equal (len (tips), 1) - assert_equal (tips[0]['branchlen'], 0) - assert_equal (tips[0]['height'], 200) - assert_equal (tips[0]['status'], 'active') - - # Split the network and build two chains of different lengths. - self.split_network () - self.nodes[0].generate(10) - self.nodes[2].generate(20) - self.sync_all([self.nodes[:2], self.nodes[2:]]) - - tips = self.nodes[1].getchaintips () - assert_equal (len (tips), 1) - shortTip = tips[0] - assert_equal (shortTip['branchlen'], 0) - assert_equal (shortTip['height'], 210) - assert_equal (tips[0]['status'], 'active') - - tips = self.nodes[3].getchaintips () - assert_equal (len (tips), 1) - longTip = tips[0] - assert_equal (longTip['branchlen'], 0) - assert_equal (longTip['height'], 220) - assert_equal (tips[0]['status'], 'active') - - # Join the network halves and check that we now have two tips - # (at least at the nodes that previously had the short chain). - self.join_network () - - tips = self.nodes[0].getchaintips () - assert_equal (len (tips), 2) - assert_equal (tips[0], longTip) - - assert_equal (tips[1]['branchlen'], 10) - assert_equal (tips[1]['status'], 'valid-fork') - tips[1]['branchlen'] = 0 - tips[1]['status'] = 'active' - assert_equal (tips[1], shortTip) - -if __name__ == '__main__': - GetChainTipsTest ().main () diff --git a/test/functional/invalidateblock.py b/test/functional/invalidateblock.py deleted file mode 100755 index b037c2431d..0000000000 --- a/test/functional/invalidateblock.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the invalidateblock RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class InvalidateTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - def setup_network(self): - self.setup_nodes() - - def run_test(self): - self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") - self.log.info("Mine 4 blocks on Node 0") - self.nodes[0].generate(4) - assert(self.nodes[0].getblockcount() == 4) - besthash = self.nodes[0].getbestblockhash() - - self.log.info("Mine competing 6 blocks on Node 1") - self.nodes[1].generate(6) - assert(self.nodes[1].getblockcount() == 6) - - self.log.info("Connect nodes to force a reorg") - connect_nodes_bi(self.nodes,0,1) - sync_blocks(self.nodes[0:2]) - assert(self.nodes[0].getblockcount() == 6) - badhash = self.nodes[1].getblockhash(2) - - self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") - self.nodes[0].invalidateblock(badhash) - newheight = self.nodes[0].getblockcount() - newhash = self.nodes[0].getbestblockhash() - if (newheight != 4 or newhash != besthash): - raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight)) - - self.log.info("Make sure we won't reorg to a lower work chain:") - connect_nodes_bi(self.nodes,1,2) - self.log.info("Sync node 2 to node 1 so both have 6 blocks") - sync_blocks(self.nodes[1:3]) - assert(self.nodes[2].getblockcount() == 6) - self.log.info("Invalidate block 5 on node 1 so its tip is now at 4") - self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) - assert(self.nodes[1].getblockcount() == 4) - self.log.info("Invalidate block 3 on node 2, so its tip is now 2") - self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) - assert(self.nodes[2].getblockcount() == 2) - self.log.info("..and then mine a block") - self.nodes[2].generate(1) - self.log.info("Verify all nodes are at the right height") - time.sleep(5) - assert_equal(self.nodes[2].getblockcount(), 3) - assert_equal(self.nodes[0].getblockcount(), 4) - node1height = self.nodes[1].getblockcount() - if node1height < 4: - raise AssertionError("Node 1 reorged to a lower height: %d"%node1height) - -if __name__ == '__main__': - InvalidateTest().main() diff --git a/test/functional/listtransactions.py b/test/functional/listtransactions.py deleted file mode 100755 index ba71ac0967..0000000000 --- a/test/functional/listtransactions.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the listtransactions API.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import CTransaction, COIN -from io import BytesIO - -def txFromHex(hexstring): - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(hexstring)) - tx.deserialize(f) - return tx - -class ListTransactionsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.enable_mocktime() - - def run_test(self): - # Simple send, 0 to 1: - txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) - self.sync_all() - assert_array_result(self.nodes[0].listtransactions(), - {"txid":txid}, - {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) - assert_array_result(self.nodes[1].listtransactions(), - {"txid":txid}, - {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) - # mine a block, confirmations should change: - self.nodes[0].generate(1) - self.sync_all() - assert_array_result(self.nodes[0].listtransactions(), - {"txid":txid}, - {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) - assert_array_result(self.nodes[1].listtransactions(), - {"txid":txid}, - {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) - - # send-to-self: - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) - assert_array_result(self.nodes[0].listtransactions(), - {"txid":txid, "category":"send"}, - {"amount":Decimal("-0.2")}) - assert_array_result(self.nodes[0].listtransactions(), - {"txid":txid, "category":"receive"}, - {"amount":Decimal("0.2")}) - - # sendmany from node1: twice to self, twice to node2: - send_to = { self.nodes[0].getnewaddress() : 0.11, - self.nodes[1].getnewaddress() : 0.22, - self.nodes[0].getaccountaddress("from1") : 0.33, - self.nodes[1].getaccountaddress("toself") : 0.44 } - txid = self.nodes[1].sendmany("", send_to) - self.sync_all() - assert_array_result(self.nodes[1].listtransactions(), - {"category":"send","amount":Decimal("-0.11")}, - {"txid":txid} ) - assert_array_result(self.nodes[0].listtransactions(), - {"category":"receive","amount":Decimal("0.11")}, - {"txid":txid} ) - assert_array_result(self.nodes[1].listtransactions(), - {"category":"send","amount":Decimal("-0.22")}, - {"txid":txid} ) - assert_array_result(self.nodes[1].listtransactions(), - {"category":"receive","amount":Decimal("0.22")}, - {"txid":txid} ) - assert_array_result(self.nodes[1].listtransactions(), - {"category":"send","amount":Decimal("-0.33")}, - {"txid":txid} ) - assert_array_result(self.nodes[0].listtransactions(), - {"category":"receive","amount":Decimal("0.33")}, - {"txid":txid, "account" : "from1"} ) - assert_array_result(self.nodes[1].listtransactions(), - {"category":"send","amount":Decimal("-0.44")}, - {"txid":txid, "account" : ""} ) - assert_array_result(self.nodes[1].listtransactions(), - {"category":"receive","amount":Decimal("0.44")}, - {"txid":txid, "account" : "toself"} ) - - pubkey = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['pubkey'] - multisig = self.nodes[1].createmultisig(1, [pubkey]) - self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) - txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) - self.nodes[1].generate(1) - self.sync_all() - assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) - assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), - {"category":"receive","amount":Decimal("0.1")}, - {"txid":txid, "account" : "watchonly"} ) - - self.run_rbf_opt_in_test() - - # Check that the opt-in-rbf flag works properly, for sent and received - # transactions. - def run_rbf_opt_in_test(self): - # Check whether a transaction signals opt-in RBF itself - def is_opt_in(node, txid): - rawtx = node.getrawtransaction(txid, 1) - for x in rawtx["vin"]: - if x["sequence"] < 0xfffffffe: - return True - return False - - # Find an unconfirmed output matching a certain txid - def get_unconfirmed_utxo_entry(node, txid_to_match): - utxo = node.listunspent(0, 0) - for i in utxo: - if i["txid"] == txid_to_match: - return i - return None - - # 1. Chain a few transactions that don't opt-in. - txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) - assert(not is_opt_in(self.nodes[0], txid_1)) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) - sync_mempools(self.nodes) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) - - # Tx2 will build off txid_1, still not opting in to RBF. - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) - assert_equal(utxo_to_use["safe"], True) - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) - assert_equal(utxo_to_use["safe"], False) - - # Create tx2 using createrawtransaction - inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}] - outputs = {self.nodes[0].getnewaddress(): 0.999} - tx2 = self.nodes[1].createrawtransaction(inputs, outputs) - tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"] - txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) - - # ...and check the result - assert(not is_opt_in(self.nodes[1], txid_2)) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) - sync_mempools(self.nodes) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) - - # Tx3 will opt-in to RBF - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) - inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}] - outputs = {self.nodes[1].getnewaddress(): 0.998} - tx3 = self.nodes[0].createrawtransaction(inputs, outputs) - tx3_modified = txFromHex(tx3) - tx3_modified.vin[0].nSequence = 0 - tx3 = bytes_to_hex_str(tx3_modified.serialize()) - tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex'] - txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) - - assert(is_opt_in(self.nodes[0], txid_3)) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) - sync_mempools(self.nodes) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) - - # Tx4 will chain off tx3. Doesn't signal itself, but depends on one - # that does. - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) - inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}] - outputs = {self.nodes[0].getnewaddress(): 0.997} - tx4 = self.nodes[1].createrawtransaction(inputs, outputs) - tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"] - txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) - - assert(not is_opt_in(self.nodes[1], txid_4)) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) - sync_mempools(self.nodes) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) - - # Replace tx3, and check that tx4 becomes unknown - tx3_b = tx3_modified - tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee - tx3_b = bytes_to_hex_str(tx3_b.serialize()) - tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex'] - txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True) - assert(is_opt_in(self.nodes[0], txid_3b)) - - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) - sync_mempools(self.nodes) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) - - # Check gettransaction as well: - for n in self.nodes[0:2]: - assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") - assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") - assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") - assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") - assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") - - # After mining a transaction, it's no longer BIP125-replaceable - self.nodes[0].generate(1) - assert(txid_3b not in self.nodes[0].getrawmempool()) - assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") - assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") - - -if __name__ == '__main__': - ListTransactionsTest().main() - diff --git a/test/functional/merkle_blocks.py b/test/functional/merkle_blocks.py deleted file mode 100755 index 50e0371fdf..0000000000 --- a/test/functional/merkle_blocks.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test gettxoutproof and verifytxoutproof RPCs.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class MerkleBlockTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - # Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing - self.extra_args = [[], [], [], ["-txindex"]] - - def setup_network(self): - self.setup_nodes() - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[0], 3) - - self.sync_all() - - def run_test(self): - self.log.info("Mining blocks...") - self.nodes[0].generate(105) - self.sync_all() - - chain_height = self.nodes[1].getblockcount() - assert_equal(chain_height, 105) - assert_equal(self.nodes[1].getbalance(), 0) - assert_equal(self.nodes[2].getbalance(), 0) - - node0utxos = self.nodes[0].listunspent(1) - tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) - txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"]) - tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) - txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"]) - # This will raise an exception because the transaction is not yet in a block - assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1]) - - self.nodes[0].generate(1) - blockhash = self.nodes[0].getblockhash(chain_height + 1) - self.sync_all() - - txlist = [] - blocktxn = self.nodes[0].getblock(blockhash, True)["tx"] - txlist.append(blocktxn[1]) - txlist.append(blocktxn[2]) - - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1]) - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist) - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist) - - txin_spent = self.nodes[1].listunspent(1).pop() - tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98}) - txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"]) - self.nodes[0].generate(1) - self.sync_all() - - txid_spent = txin_spent["txid"] - txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2 - - # We can't find the block from a fully-spent tx - assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent]) - # We can get the proof if we specify the block - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent]) - # We can't get the proof if we specify a non-existent block - assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000") - # We can get the proof if the transaction is unspent - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent]) - # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter. - assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist)) - assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist)) - # We can always get a proof if we have a -txindex - assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent]) - # We can't get a proof if we specify transactions from different blocks - assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3]) - - -if __name__ == '__main__': - MerkleBlockTest().main() diff --git a/test/functional/multi_rpc.py b/test/functional/multi_rpc.py deleted file mode 100755 index 01f68344ae..0000000000 --- a/test/functional/multi_rpc.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test multiple RPC users.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import str_to_b64str, assert_equal - -import os -import http.client -import urllib.parse - -class HTTPBasicsTest (BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - - def setup_chain(self): - super().setup_chain() - #Append rpcauth to bitcoin.conf before initialization - rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" - rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" - rpcuser = "rpcuser=rpcuser💻" - rpcpassword = "rpcpassword=rpcpassword🔑" - with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f: - f.write(rpcauth+"\n") - f.write(rpcauth2+"\n") - with open(os.path.join(self.options.tmpdir+"/node1", "bitcoin.conf"), 'a', encoding='utf8') as f: - f.write(rpcuser+"\n") - f.write(rpcpassword+"\n") - - def run_test(self): - - ################################################## - # Check correctness of the rpcauth config option # - ################################################## - url = urllib.parse.urlparse(self.nodes[0].url) - - #Old authpair - authpair = url.username + ':' + url.password - - #New authpair generated via share/rpcuser tool - password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" - - #Second authpair with different username - password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" - authpairnew = "rt:"+password - - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 200) - conn.close() - - #Use new authpair to confirm both work - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 200) - conn.close() - - #Wrong login name with rt's password - authpairnew = "rtwrong:"+password - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 401) - conn.close() - - #Wrong password for rt - authpairnew = "rt:"+password+"wrong" - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 401) - conn.close() - - #Correct for rt2 - authpairnew = "rt2:"+password2 - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 200) - conn.close() - - #Wrong password for rt2 - authpairnew = "rt2:"+password2+"wrong" - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 401) - conn.close() - - ############################################################### - # Check correctness of the rpcuser/rpcpassword config options # - ############################################################### - url = urllib.parse.urlparse(self.nodes[1].url) - - # rpcuser and rpcpassword authpair - rpcuserauthpair = "rpcuser💻:rpcpassword🔑" - - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 200) - conn.close() - - #Wrong login name with rpcuser's password - rpcuserauthpair = "rpcuserwrong:rpcpassword" - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 401) - conn.close() - - #Wrong password for rpcuser - rpcuserauthpair = "rpcuser:rpcpasswordwrong" - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - resp = conn.getresponse() - assert_equal(resp.status, 401) - conn.close() - - -if __name__ == '__main__': - HTTPBasicsTest ().main () diff --git a/test/functional/net.py b/test/functional/net.py deleted file mode 100755 index 16e4f6adb4..0000000000 --- a/test/functional/net.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPC calls related to net. - -Tests correspond to code in rpc/net.cpp. -""" - -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - connect_nodes_bi, - p2p_port, -) - -class NetTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - - def run_test(self): - self._test_connection_count() - self._test_getnettotals() - self._test_getnetworkinginfo() - self._test_getaddednodeinfo() - self._test_getpeerinfo() - - def _test_connection_count(self): - # connect_nodes_bi connects each node to the other - assert_equal(self.nodes[0].getconnectioncount(), 2) - - def _test_getnettotals(self): - # check that getnettotals totalbytesrecv and totalbytessent - # are consistent with getpeerinfo - peer_info = self.nodes[0].getpeerinfo() - assert_equal(len(peer_info), 2) - net_totals = self.nodes[0].getnettotals() - assert_equal(sum([peer['bytesrecv'] for peer in peer_info]), - net_totals['totalbytesrecv']) - assert_equal(sum([peer['bytessent'] for peer in peer_info]), - net_totals['totalbytessent']) - # test getnettotals and getpeerinfo by doing a ping - # the bytes sent/received should change - # note ping and pong are 32 bytes each - self.nodes[0].ping() - time.sleep(0.1) - peer_info_after_ping = self.nodes[0].getpeerinfo() - net_totals_after_ping = self.nodes[0].getnettotals() - for before, after in zip(peer_info, peer_info_after_ping): - assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong']) - assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping']) - assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv']) - assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent']) - - def _test_getnetworkinginfo(self): - assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) - assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) - - self.nodes[0].setnetworkactive(False) - assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False) - timeout = 3 - while self.nodes[0].getnetworkinfo()['connections'] != 0: - # Wait a bit for all sockets to close - assert timeout > 0, 'not all connections closed in time' - timeout -= 0.1 - time.sleep(0.1) - - self.nodes[0].setnetworkactive(True) - connect_nodes_bi(self.nodes, 0, 1) - assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) - assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) - - def _test_getaddednodeinfo(self): - assert_equal(self.nodes[0].getaddednodeinfo(), []) - # add a node (node2) to node0 - ip_port = "127.0.0.1:{}".format(p2p_port(2)) - self.nodes[0].addnode(ip_port, 'add') - # check that the node has indeed been added - added_nodes = self.nodes[0].getaddednodeinfo(ip_port) - assert_equal(len(added_nodes), 1) - assert_equal(added_nodes[0]['addednode'], ip_port) - # check that a non-existent node returns an error - assert_raises_rpc_error(-24, "Node has not been added", - self.nodes[0].getaddednodeinfo, '1.1.1.1') - - def _test_getpeerinfo(self): - peer_info = [x.getpeerinfo() for x in self.nodes] - # check both sides of bidirectional connection between nodes - # the address bound to on one side will be the source address for the other node - assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr']) - assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr']) - -if __name__ == '__main__': - NetTest().main() diff --git a/test/functional/preciousblock.py b/test/functional/preciousblock.py deleted file mode 100755 index 960cd0ad12..0000000000 --- a/test/functional/preciousblock.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the preciousblock RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - connect_nodes_bi, - sync_chain, - sync_blocks, -) - -def unidirectional_node_sync_via_rpc(node_src, node_dest): - blocks_to_copy = [] - blockhash = node_src.getbestblockhash() - while True: - try: - assert(len(node_dest.getblock(blockhash, False)) > 0) - break - except: - blocks_to_copy.append(blockhash) - blockhash = node_src.getblockheader(blockhash, True)['previousblockhash'] - blocks_to_copy.reverse() - for blockhash in blocks_to_copy: - blockdata = node_src.getblock(blockhash, False) - assert(node_dest.submitblock(blockdata) in (None, 'inconclusive')) - -def node_sync_via_rpc(nodes): - for node_src in nodes: - for node_dest in nodes: - if node_src is node_dest: - continue - unidirectional_node_sync_via_rpc(node_src, node_dest) - -class PreciousTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - def setup_network(self): - self.setup_nodes() - - def run_test(self): - self.log.info("Ensure submitblock can in principle reorg to a competing chain") - self.nodes[0].generate(1) - assert_equal(self.nodes[0].getblockcount(), 1) - hashZ = self.nodes[1].generate(2)[-1] - assert_equal(self.nodes[1].getblockcount(), 2) - node_sync_via_rpc(self.nodes[0:3]) - assert_equal(self.nodes[0].getbestblockhash(), hashZ) - - self.log.info("Mine blocks A-B-C on Node 0") - hashC = self.nodes[0].generate(3)[-1] - assert_equal(self.nodes[0].getblockcount(), 5) - self.log.info("Mine competing blocks E-F-G on Node 1") - hashG = self.nodes[1].generate(3)[-1] - assert_equal(self.nodes[1].getblockcount(), 5) - assert(hashC != hashG) - self.log.info("Connect nodes and check no reorg occurs") - # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) - node_sync_via_rpc(self.nodes[0:2]) - connect_nodes_bi(self.nodes,0,1) - assert_equal(self.nodes[0].getbestblockhash(), hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block G") - self.nodes[0].preciousblock(hashG) - assert_equal(self.nodes[0].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block C again") - self.nodes[0].preciousblock(hashC) - assert_equal(self.nodes[0].getbestblockhash(), hashC) - self.log.info("Make Node1 prefer block C") - self.nodes[1].preciousblock(hashC) - sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC - assert_equal(self.nodes[1].getbestblockhash(), hashC) - self.log.info("Make Node1 prefer block G again") - self.nodes[1].preciousblock(hashG) - assert_equal(self.nodes[1].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block G again") - self.nodes[0].preciousblock(hashG) - assert_equal(self.nodes[0].getbestblockhash(), hashG) - self.log.info("Make Node1 prefer block C again") - self.nodes[1].preciousblock(hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashC) - self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") - self.nodes[0].generate(1) - assert_equal(self.nodes[0].getblockcount(), 6) - sync_blocks(self.nodes[0:2]) - hashH = self.nodes[0].getbestblockhash() - assert_equal(self.nodes[1].getbestblockhash(), hashH) - self.log.info("Node1 should not be able to prefer block C anymore") - self.nodes[1].preciousblock(hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashH) - self.log.info("Mine competing blocks I-J-K-L on Node 2") - self.nodes[2].generate(4) - assert_equal(self.nodes[2].getblockcount(), 6) - hashL = self.nodes[2].getbestblockhash() - self.log.info("Connect nodes and check no reorg occurs") - node_sync_via_rpc(self.nodes[1:3]) - connect_nodes_bi(self.nodes,1,2) - connect_nodes_bi(self.nodes,0,2) - assert_equal(self.nodes[0].getbestblockhash(), hashH) - assert_equal(self.nodes[1].getbestblockhash(), hashH) - assert_equal(self.nodes[2].getbestblockhash(), hashL) - self.log.info("Make Node1 prefer block L") - self.nodes[1].preciousblock(hashL) - assert_equal(self.nodes[1].getbestblockhash(), hashL) - self.log.info("Make Node2 prefer block H") - self.nodes[2].preciousblock(hashH) - assert_equal(self.nodes[2].getbestblockhash(), hashH) - -if __name__ == '__main__': - PreciousTest().main() diff --git a/test/functional/rawtransactions.py b/test/functional/rawtransactions.py deleted file mode 100755 index d39d86b310..0000000000 --- a/test/functional/rawtransactions.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the rawtransaction RPCs. - -Test the following RPCs: - - createrawtransaction - - signrawtransaction - - sendrawtransaction - - decoderawtransaction - - getrawtransaction -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - - -class multidict(dict): - """Dictionary that allows duplicate keys. - - Constructed with a list of (key, value) tuples. When dumped by the json module, - will output invalid json with repeated keys, eg: - >>> json.dumps(multidict([(1,2),(1,2)]) - '{"1": 2, "1": 2}' - - Used to test calls to rpc methods with repeated keys in the json object.""" - - def __init__(self, x): - dict.__init__(self, x) - self.x = x - - def items(self): - return self.x - - -# Create one-input, one-output, no-fee transaction: -class RawTransactionsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]] - - def setup_network(self, split=False): - super().setup_network() - connect_nodes_bi(self.nodes,0,2) - - def run_test(self): - - #prepare some coins for multiple *rawtransaction commands - self.nodes[2].generate(1) - self.sync_all() - self.nodes[0].generate(101) - self.sync_all() - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0) - self.sync_all() - self.nodes[0].generate(5) - self.sync_all() - - # Test `createrawtransaction` required parameters - assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction) - assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, []) - - # Test `createrawtransaction` invalid extra parameters - assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo') - - # Test `createrawtransaction` invalid `inputs` - txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' - assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {}) - assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {}) - assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {}) - assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {}) - assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {}) - assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {}) - assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {}) - assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {}) - - # Test `createrawtransaction` invalid `outputs` - address = self.nodes[0].getnewaddress() - assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo') - assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'}) - assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0}) - assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'}) - assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) - assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) - - # Test `createrawtransaction` invalid `locktime` - assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo') - assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1) - assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296) - - # Test `createrawtransaction` invalid `replaceable` - assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') - - ######################################### - # sendrawtransaction with missing input # - ######################################### - inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists - outputs = { self.nodes[0].getnewaddress() : 4.998 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - rawtx = self.nodes[2].signrawtransaction(rawtx) - - # This will raise an exception since there are missing inputs - assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex']) - - ##################################### - # getrawtransaction with block hash # - ##################################### - - # make a tx by sending then generate 2 blocks; block1 has the tx in it - tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1) - block1, block2 = self.nodes[2].generate(2) - self.sync_all() - # We should be able to get the raw transaction by providing the correct block - gottx = self.nodes[0].getrawtransaction(tx, True, block1) - assert_equal(gottx['txid'], tx) - assert_equal(gottx['in_active_chain'], True) - # We should not have the 'in_active_chain' flag when we don't provide a block - gottx = self.nodes[0].getrawtransaction(tx, True) - assert_equal(gottx['txid'], tx) - assert 'in_active_chain' not in gottx - # We should not get the tx if we provide an unrelated block - assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2) - # An invalid block hash should raise the correct errors - assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True) - assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar") - assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234") - assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000") - # Undo the blocks and check in_active_chain - self.nodes[0].invalidateblock(block1) - gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1) - assert_equal(gottx['in_active_chain'], False) - self.nodes[0].reconsiderblock(block1) - assert_equal(self.nodes[0].getbestblockhash(), block2) - - ######################### - # RAW TX MULTISIG TESTS # - ######################### - # 2of2 test - addr1 = self.nodes[2].getnewaddress() - addr2 = self.nodes[2].getnewaddress() - - addr1Obj = self.nodes[2].validateaddress(addr1) - addr2Obj = self.nodes[2].validateaddress(addr2) - - # Tests for createmultisig and addmultisigaddress - assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) - self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys - assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here. - - mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address'] - - #use balance deltas instead of absolute values - bal = self.nodes[2].getbalance() - - # send 1.2 BTC to msig adr - txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance - - - # 2of3 test from different nodes - bal = self.nodes[2].getbalance() - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[2].getnewaddress() - addr3 = self.nodes[2].getnewaddress() - - addr1Obj = self.nodes[1].validateaddress(addr1) - addr2Obj = self.nodes[2].validateaddress(addr2) - addr3Obj = self.nodes[2].validateaddress(addr3) - - mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address'] - - txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) - decTx = self.nodes[0].gettransaction(txId) - rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - - #THIS IS A INCOMPLETE FEATURE - #NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION - assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable - - txDetails = self.nodes[0].gettransaction(txId, True) - rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) - vout = False - for outpoint in rawTx['vout']: - if outpoint['value'] == Decimal('2.20000000'): - vout = outpoint - break - - bal = self.nodes[0].getbalance() - inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}] - outputs = { self.nodes[0].getnewaddress() : 2.19 } - rawTx = self.nodes[2].createrawtransaction(inputs, outputs) - rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs) - assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx - - rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs) - assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys - self.nodes[2].sendrawtransaction(rawTxSigned['hex']) - rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx - - # 2of2 test for combining transactions - bal = self.nodes[2].getbalance() - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[2].getnewaddress() - - addr1Obj = self.nodes[1].validateaddress(addr1) - addr2Obj = self.nodes[2].validateaddress(addr2) - - self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] - mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] - mSigObjValid = self.nodes[2].validateaddress(mSigObj) - - txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) - decTx = self.nodes[0].gettransaction(txId) - rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - - assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable - - txDetails = self.nodes[0].gettransaction(txId, True) - rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex']) - vout = False - for outpoint in rawTx2['vout']: - if outpoint['value'] == Decimal('2.20000000'): - vout = outpoint - break - - bal = self.nodes[0].getbalance() - inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}] - outputs = { self.nodes[0].getnewaddress() : 2.19 } - rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs) - rawTxPartialSigned1 = self.nodes[1].signrawtransaction(rawTx2, inputs) - self.log.info(rawTxPartialSigned1) - assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx - - rawTxPartialSigned2 = self.nodes[2].signrawtransaction(rawTx2, inputs) - self.log.info(rawTxPartialSigned2) - assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx - rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) - self.log.info(rawTxComb) - self.nodes[2].sendrawtransaction(rawTxComb) - rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) - self.sync_all() - self.nodes[0].generate(1) - self.sync_all() - assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx - - # decoderawtransaction tests - # witness transaction - encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000" - decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction - assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000')) - assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction - # non-witness transaction - encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000" - decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction - assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000')) - - # getrawtransaction tests - # 1. valid parameters - only supply txid - txHash = rawTx["hash"] - assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex']) - - # 2. valid parameters - supply txid and 0 for non-verbose - assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex']) - - # 3. valid parameters - supply txid and False for non-verbose - assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex']) - - # 4. valid parameters - supply txid and 1 for verbose. - # We only check the "hex" field of the output so we don't need to update this test every time the output format changes. - assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex']) - - # 5. valid parameters - supply txid and True for non-verbose - assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex']) - - # 6. invalid parameters - supply txid and string "Flase" - assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase") - - # 7. invalid parameters - supply txid and empty array - assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, []) - - # 8. invalid parameters - supply txid and empty dict - assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {}) - - inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}] - outputs = { self.nodes[0].getnewaddress() : 1 } - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - decrawtx= self.nodes[0].decoderawtransaction(rawtx) - assert_equal(decrawtx['vin'][0]['sequence'], 1000) - - # 9. invalid parameters - sequence number out of range - inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}] - outputs = { self.nodes[0].getnewaddress() : 1 } - assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) - - # 10. invalid parameters - sequence number out of range - inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}] - outputs = { self.nodes[0].getnewaddress() : 1 } - assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) - - inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}] - outputs = { self.nodes[0].getnewaddress() : 1 } - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - decrawtx= self.nodes[0].decoderawtransaction(rawtx) - assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) - -if __name__ == '__main__': - RawTransactionsTest().main() diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py new file mode 100755 index 0000000000..05433c7e24 --- /dev/null +++ b/test/functional/rpc_bind.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test running bitcoind with the -rpcbind and -rpcallowip options.""" + +import socket +import sys + +from test_framework.test_framework import BitcoinTestFramework, SkipTest +from test_framework.util import * +from test_framework.netutil import * + +class RPCBindTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def setup_network(self): + self.add_nodes(self.num_nodes, None) + + def run_bind_test(self, allow_ips, connect_to, addresses, expected): + ''' + Start a node with requested rpcallowip and rpcbind parameters, + then try to connect, and check if the set of bound addresses + matches the expected set. + ''' + self.log.info("Bind test for %s" % str(addresses)) + expected = [(addr_to_hex(addr), port) for (addr, port) in expected] + base_args = ['-disablewallet', '-nolisten'] + if allow_ips: + base_args += ['-rpcallowip=' + x for x in allow_ips] + binds = ['-rpcbind='+addr for addr in addresses] + self.nodes[0].rpchost = connect_to + self.start_node(0, base_args + binds) + pid = self.nodes[0].process.pid + assert_equal(set(get_bind_addrs(pid)), set(expected)) + self.stop_nodes() + + def run_allowip_test(self, allow_ips, rpchost, rpcport): + ''' + Start a node with rpcallow IP, and request getnetworkinfo + at a non-localhost IP. + ''' + self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport)) + base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips] + self.nodes[0].rpchost = None + self.start_nodes([base_args]) + # connect to node through non-loopback interface + node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir) + node.getnetworkinfo() + self.stop_nodes() + + def run_test(self): + # due to OS-specific network stats queries, this test works only on Linux + if not sys.platform.startswith('linux'): + raise SkipTest("This test can only be run on linux.") + # find the first non-loopback interface for testing + non_loopback_ip = None + for name,ip in all_interfaces(): + if ip != '127.0.0.1': + non_loopback_ip = ip + break + if non_loopback_ip is None: + raise SkipTest("This test requires at least one non-loopback IPv4 interface.") + try: + s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) + s.connect(("::1",1)) + s.close + except OSError: + raise SkipTest("This test requires IPv6 support.") + + self.log.info("Using interface %s for testing" % non_loopback_ip) + + defaultport = rpc_port(0) + + # check default without rpcallowip (IPv4 and IPv6 localhost) + self.run_bind_test(None, '127.0.0.1', [], + [('127.0.0.1', defaultport), ('::1', defaultport)]) + # check default with rpcallowip (IPv6 any) + self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], + [('::0', defaultport)]) + # check only IPv4 localhost (explicit) + self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'], + [('127.0.0.1', defaultport)]) + # check only IPv4 localhost (explicit) with alternative port + self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'], + [('127.0.0.1', 32171)]) + # check only IPv4 localhost (explicit) with multiple alternative ports on same host + self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'], + [('127.0.0.1', 32171), ('127.0.0.1', 32172)]) + # check only IPv6 localhost (explicit) + self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], + [('::1', defaultport)]) + # check both IPv4 and IPv6 localhost (explicit) + self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'], + [('127.0.0.1', defaultport), ('::1', defaultport)]) + # check only non-loopback interface + self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip], + [(non_loopback_ip, defaultport)]) + + # Check that with invalid rpcallowip, we are denied + self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport) + assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport) + +if __name__ == '__main__': + RPCBindTest().main() diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py new file mode 100755 index 0000000000..11acff4be1 --- /dev/null +++ b/test/functional/rpc_blockchain.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test RPCs related to blockchainstate. + +Test the following RPCs: + - getblockchaininfo + - gettxoutsetinfo + - getdifficulty + - getbestblockhash + - getblockhash + - getblockheader + - getchaintxstats + - getnetworkhashps + - verifychain + +Tests correspond to code in rpc/blockchain.cpp. +""" + +from decimal import Decimal +import http.client +import subprocess + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, + assert_greater_than_or_equal, + assert_raises, + assert_raises_rpc_error, + assert_is_hex_string, + assert_is_hash_string, +) + +class BlockchainTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-stopatheight=207', '-prune=1']] + + def run_test(self): + self._test_getblockchaininfo() + self._test_getchaintxstats() + self._test_gettxoutsetinfo() + self._test_getblockheader() + self._test_getdifficulty() + self._test_getnetworkhashps() + self._test_stopatheight() + assert self.nodes[0].verifychain(4, 0) + + def _test_getblockchaininfo(self): + self.log.info("Test getblockchaininfo") + + keys = [ + 'bestblockhash', + 'bip9_softforks', + 'blocks', + 'chain', + 'chainwork', + 'difficulty', + 'headers', + 'initialblockdownload', + 'mediantime', + 'pruned', + 'size_on_disk', + 'softforks', + 'verificationprogress', + 'warnings', + ] + res = self.nodes[0].getblockchaininfo() + + # result should have these additional pruning keys if manual pruning is enabled + assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys)) + + # size_on_disk should be > 0 + assert_greater_than(res['size_on_disk'], 0) + + # pruneheight should be greater or equal to 0 + assert_greater_than_or_equal(res['pruneheight'], 0) + + # check other pruning fields given that prune=1 + assert res['pruned'] + assert not res['automatic_pruning'] + + self.restart_node(0, ['-stopatheight=207']) + res = self.nodes[0].getblockchaininfo() + # should have exact keys + assert_equal(sorted(res.keys()), keys) + + self.restart_node(0, ['-stopatheight=207', '-prune=550']) + res = self.nodes[0].getblockchaininfo() + # result should have these additional pruning keys if prune=550 + assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys)) + + # check related fields + assert res['pruned'] + assert_equal(res['pruneheight'], 0) + assert res['automatic_pruning'] + assert_equal(res['prune_target_size'], 576716800) + assert_greater_than(res['size_on_disk'], 0) + + def _test_getchaintxstats(self): + chaintxstats = self.nodes[0].getchaintxstats(1) + # 200 txs plus genesis tx + assert_equal(chaintxstats['txcount'], 201) + # tx rate should be 1 per 10 minutes, or 1/600 + # we have to round because of binary math + assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1)) + + b1 = self.nodes[0].getblock(self.nodes[0].getblockhash(1)) + b200 = self.nodes[0].getblock(self.nodes[0].getblockhash(200)) + time_diff = b200['mediantime'] - b1['mediantime'] + + chaintxstats = self.nodes[0].getchaintxstats() + assert_equal(chaintxstats['time'], b200['time']) + assert_equal(chaintxstats['txcount'], 201) + assert_equal(chaintxstats['window_block_count'], 199) + assert_equal(chaintxstats['window_tx_count'], 199) + assert_equal(chaintxstats['window_interval'], time_diff) + assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199)) + + chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1['hash']) + assert_equal(chaintxstats['time'], b1['time']) + assert_equal(chaintxstats['txcount'], 2) + assert_equal(chaintxstats['window_block_count'], 0) + assert('window_tx_count' not in chaintxstats) + assert('window_interval' not in chaintxstats) + assert('txrate' not in chaintxstats) + + assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, 201) + + def _test_gettxoutsetinfo(self): + node = self.nodes[0] + res = node.gettxoutsetinfo() + + assert_equal(res['total_amount'], Decimal('8725.00000000')) + assert_equal(res['transactions'], 200) + assert_equal(res['height'], 200) + assert_equal(res['txouts'], 200) + assert_equal(res['bogosize'], 17000), + assert_equal(res['bestblock'], node.getblockhash(200)) + size = res['disk_size'] + assert size > 6400 + assert size < 64000 + assert_equal(len(res['bestblock']), 64) + assert_equal(len(res['hash_serialized_2']), 64) + + self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block") + b1hash = node.getblockhash(1) + node.invalidateblock(b1hash) + + res2 = node.gettxoutsetinfo() + assert_equal(res2['transactions'], 0) + assert_equal(res2['total_amount'], Decimal('0')) + assert_equal(res2['height'], 0) + assert_equal(res2['txouts'], 0) + assert_equal(res2['bogosize'], 0), + assert_equal(res2['bestblock'], node.getblockhash(0)) + assert_equal(len(res2['hash_serialized_2']), 64) + + self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") + node.reconsiderblock(b1hash) + + res3 = node.gettxoutsetinfo() + assert_equal(res['total_amount'], res3['total_amount']) + assert_equal(res['transactions'], res3['transactions']) + assert_equal(res['height'], res3['height']) + assert_equal(res['txouts'], res3['txouts']) + assert_equal(res['bogosize'], res3['bogosize']) + assert_equal(res['bestblock'], res3['bestblock']) + assert_equal(res['hash_serialized_2'], res3['hash_serialized_2']) + + def _test_getblockheader(self): + node = self.nodes[0] + + assert_raises_rpc_error(-5, "Block not found", + node.getblockheader, "nonsense") + + besthash = node.getbestblockhash() + secondbesthash = node.getblockhash(199) + header = node.getblockheader(besthash) + + assert_equal(header['hash'], besthash) + assert_equal(header['height'], 200) + assert_equal(header['confirmations'], 1) + assert_equal(header['previousblockhash'], secondbesthash) + assert_is_hex_string(header['chainwork']) + assert_is_hash_string(header['hash']) + assert_is_hash_string(header['previousblockhash']) + assert_is_hash_string(header['merkleroot']) + assert_is_hash_string(header['bits'], length=None) + assert isinstance(header['time'], int) + assert isinstance(header['mediantime'], int) + assert isinstance(header['nonce'], int) + assert isinstance(header['version'], int) + assert isinstance(int(header['versionHex'], 16), int) + assert isinstance(header['difficulty'], Decimal) + + def _test_getdifficulty(self): + difficulty = self.nodes[0].getdifficulty() + # 1 hash in 2 should be valid, so difficulty should be 1/2**31 + # binary => decimal => binary math is why we do this check + assert abs(difficulty * 2**31 - 1) < 0.0001 + + def _test_getnetworkhashps(self): + hashes_per_second = self.nodes[0].getnetworkhashps() + # This should be 2 hashes every 10 minutes or 1/300 + assert abs(hashes_per_second * 300 - 1) < 0.0001 + + def _test_stopatheight(self): + assert_equal(self.nodes[0].getblockcount(), 200) + self.nodes[0].generate(6) + assert_equal(self.nodes[0].getblockcount(), 206) + self.log.debug('Node should not stop at this height') + assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3)) + try: + self.nodes[0].generate(1) + except (ConnectionError, http.client.BadStatusLine): + pass # The node already shut down before response + self.log.debug('Node should stop at this height...') + self.nodes[0].wait_until_stopped() + self.start_node(0) + assert_equal(self.nodes[0].getblockcount(), 207) + + +if __name__ == '__main__': + BlockchainTest().main() diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py new file mode 100755 index 0000000000..1ffc570437 --- /dev/null +++ b/test/functional/rpc_decodescript.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test decoding scripts via decodescript RPC command.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import * +from io import BytesIO + +class DecodeScriptTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def decodescript_script_sig(self): + signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' + push_signature = '48' + signature + public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' + push_public_key = '21' + public_key + + # below are test cases for all of the standard transaction types + + # 1) P2PK scriptSig + # the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack + rpc_result = self.nodes[0].decodescript(push_signature) + assert_equal(signature, rpc_result['asm']) + + # 2) P2PKH scriptSig + rpc_result = self.nodes[0].decodescript(push_signature + push_public_key) + assert_equal(signature + ' ' + public_key, rpc_result['asm']) + + # 3) multisig scriptSig + # this also tests the leading portion of a P2SH multisig scriptSig + # OP_0 + rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature) + assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) + + # 4) P2SH scriptSig + # an empty P2SH redeemScript is valid and makes for a very simple test case. + # thus, such a spending scriptSig would just need to pass the outer redeemScript + # hash test and leave true on the top of the stack. + rpc_result = self.nodes[0].decodescript('5100') + assert_equal('1 0', rpc_result['asm']) + + # 5) null data scriptSig - no such thing because null data scripts can not be spent. + # thus, no test case for that standard transaction type is here. + + def decodescript_script_pub_key(self): + public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' + push_public_key = '21' + public_key + public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' + push_public_key_hash = '14' + public_key_hash + + # below are test cases for all of the standard transaction types + + # 1) P2PK scriptPubKey + # OP_CHECKSIG + rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') + assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) + + # 2) P2PKH scriptPubKey + # OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIG + rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac') + assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) + + # 3) multisig scriptPubKey + # OP_CHECKMULTISIG + # just imagine that the pub keys used below are different. + # for our purposes here it does not matter that they are the same even though it is unrealistic. + rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae') + assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) + + # 4) P2SH scriptPubKey + # OP_HASH160 OP_EQUAL. + # push_public_key_hash here should actually be the hash of a redeem script. + # but this works the same for purposes of this test. + rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87') + assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) + + # 5) null data scriptPubKey + # use a signature look-alike here to make sure that we do not decode random data as a signature. + # this matters if/when signature sighash decoding comes along. + # would want to make sure that no such decoding takes place in this case. + signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' + # OP_RETURN + rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) + assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) + + # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. + # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. + # just imagine that the pub keys used below are different. + # for our purposes here it does not matter that they are the same even though it is unrealistic. + # + # OP_IF + # OP_CHECKSIGVERIFY + # OP_ELSE + # OP_CHECKLOCKTIMEVERIFY OP_DROP + # OP_ENDIF + # OP_CHECKSIG + # + # lock until block 500,000 + rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') + assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) + + def decoderawtransaction_asm_sighashtype(self): + """Test decoding scripts via RPC command "decoderawtransaction". + + This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. + """ + + # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output + tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' + rpc_result = self.nodes[0].decoderawtransaction(tx) + assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) + + # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. + # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc + # verify that we have not altered scriptPubKey decoding. + tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' + rpc_result = self.nodes[0].decoderawtransaction(tx) + assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) + assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) + assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) + assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) + txSave = CTransaction() + txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) + + # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type + tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' + rpc_result = self.nodes[0].decoderawtransaction(tx) + assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) + + # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks + tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' + rpc_result = self.nodes[0].decoderawtransaction(tx) + assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) + assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) + + # some more full transaction tests of varying specific scriptSigs. used instead of + # tests in decodescript_script_sig because the decodescript RPC is specifically + # for working on scriptPubKeys (argh!). + push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)] + signature = push_signature[2:] + der_signature = signature[:-2] + signature_sighash_decoded = der_signature + '[ALL]' + signature_2 = der_signature + '82' + push_signature_2 = '48' + signature_2 + signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' + + # 1) P2PK scriptSig + txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) + rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) + assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) + + # make sure that the sighash decodes come out correctly for a more complex / lesser used case. + txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) + rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) + assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) + + # 2) multisig scriptSig + txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2) + rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) + assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) + + # 3) test a scriptSig that contains more than push operations. + # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it. + txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') + rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) + assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) + + def run_test(self): + self.decodescript_script_sig() + self.decodescript_script_pub_key() + self.decoderawtransaction_asm_sighashtype() + +if __name__ == '__main__': + DecodeScriptTest().main() diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py new file mode 100755 index 0000000000..d6f25158ef --- /dev/null +++ b/test/functional/rpc_deprecated.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test deprecation of RPC calls.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_raises_rpc_error + +class DeprecatedRpcTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]] + + def run_test(self): + self.log.info("estimatefee: Shows deprecated message") + assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1) + + self.log.info("Using -deprecatedrpc=estimatefee bypasses the error") + self.nodes[1].estimatefee(1) + + self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses") + assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()]) + self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) + +if __name__ == '__main__': + DeprecatedRpcTest().main() diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py new file mode 100755 index 0000000000..5f7a0586e0 --- /dev/null +++ b/test/functional/rpc_fundrawtransaction.py @@ -0,0 +1,732 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the fundrawtransaction RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + + +def get_unspent(listunspent, amount): + for utx in listunspent: + if utx['amount'] == amount: + return utx + raise AssertionError('Could not find unspent with amount={}'.format(amount)) + +class RawTransactionsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = True + + def setup_network(self, split=False): + self.setup_nodes() + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 1, 2) + connect_nodes_bi(self.nodes, 0, 2) + connect_nodes_bi(self.nodes, 0, 3) + + def run_test(self): + min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] + # This test is not meant to test fee estimation and we'd like + # to be sure all txs are sent at a consistent desired feerate + for node in self.nodes: + node.settxfee(min_relay_tx_fee) + + # if the fee's positive delta is higher than this value tests will fail, + # neg. delta always fail the tests. + # The size of the signature of every input may be at most 2 bytes larger + # than a minimum sized signature. + + # = 2 bytes * minRelayTxFeePerByte + feeTolerance = 2 * min_relay_tx_fee/1000 + + self.nodes[2].generate(1) + self.sync_all() + self.nodes[0].generate(121) + self.sync_all() + + # ensure that setting changePosition in fundraw with an exact match is handled properly + rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50}) + rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]}) + assert_equal(rawmatch["changepos"], -1) + + watchonly_address = self.nodes[0].getnewaddress() + watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"] + watchonly_amount = Decimal(200) + self.nodes[3].importpubkey(watchonly_pubkey, "", True) + watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount) + self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10) + + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) + + self.nodes[0].generate(1) + self.sync_all() + + ############### + # simple test # + ############### + inputs = [ ] + outputs = { self.nodes[0].getnewaddress() : 1.0 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + assert(len(dec_tx['vin']) > 0) #test that we have enough inputs + + ############################## + # simple test with two coins # + ############################## + inputs = [ ] + outputs = { self.nodes[0].getnewaddress() : 2.2 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + assert(len(dec_tx['vin']) > 0) #test if we have enough inputs + + ############################## + # simple test with two coins # + ############################## + inputs = [ ] + outputs = { self.nodes[0].getnewaddress() : 2.6 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + assert(len(dec_tx['vin']) > 0) + assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') + + + ################################ + # simple test with two outputs # + ################################ + inputs = [ ] + outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + for out in dec_tx['vout']: + totalOut += out['value'] + + assert(len(dec_tx['vin']) > 0) + assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') + + + ######################################################################### + # test a fundrawtransaction with a VIN greater than the required amount # + ######################################################################### + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] + outputs = { self.nodes[0].getnewaddress() : 1.0 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + for out in dec_tx['vout']: + totalOut += out['value'] + + assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee + + + ##################################################################### + # test a fundrawtransaction with which will not get a change output # + ##################################################################### + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] + outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + for out in dec_tx['vout']: + totalOut += out['value'] + + assert_equal(rawtxfund['changepos'], -1) + assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee + + + #################################################### + # test a fundrawtransaction with an invalid option # + #################################################### + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'}) + + ############################################################ + # test a fundrawtransaction with an invalid change address # + ############################################################ + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'}) + + ############################################################ + # test a fundrawtransaction with a provided change address # + ############################################################ + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + change = self.nodes[2].getnewaddress() + assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2}) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0}) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + out = dec_tx['vout'][0] + assert_equal(change, out['scriptPubKey']['addresses'][0]) + + ######################################################### + # test a fundrawtransaction with a provided change type # + ######################################################### + utx = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None}) + assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''}) + rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'}) + tx = self.nodes[2].decoderawtransaction(rawtx['hex']) + assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type']) + + ######################################################################### + # test a fundrawtransaction with a VIN smaller than the required amount # + ######################################################################### + utx = get_unspent(self.nodes[2].listunspent(), 1) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] + outputs = { self.nodes[0].getnewaddress() : 1.0 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + + # 4-byte version + 1-byte vin count + 36-byte prevout then script_len + rawtx = rawtx[:82] + "0100" + rawtx[84:] + + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + matchingOuts = 0 + for i, out in enumerate(dec_tx['vout']): + totalOut += out['value'] + if out['scriptPubKey']['addresses'][0] in outputs: + matchingOuts+=1 + else: + assert_equal(i, rawtxfund['changepos']) + + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) + + assert_equal(matchingOuts, 1) + assert_equal(len(dec_tx['vout']), 2) + + + ########################################### + # test a fundrawtransaction with two VINs # + ########################################### + utx = get_unspent(self.nodes[2].listunspent(), 1) + utx2 = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] + outputs = { self.nodes[0].getnewaddress() : 6.0 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + matchingOuts = 0 + for out in dec_tx['vout']: + totalOut += out['value'] + if out['scriptPubKey']['addresses'][0] in outputs: + matchingOuts+=1 + + assert_equal(matchingOuts, 1) + assert_equal(len(dec_tx['vout']), 2) + + matchingIns = 0 + for vinOut in dec_tx['vin']: + for vinIn in inputs: + if vinIn['txid'] == vinOut['txid']: + matchingIns+=1 + + assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params + + ######################################################### + # test a fundrawtransaction with two VINs and two vOUTs # + ######################################################### + utx = get_unspent(self.nodes[2].listunspent(), 1) + utx2 = get_unspent(self.nodes[2].listunspent(), 5) + + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] + outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + fee = rawtxfund['fee'] + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + totalOut = 0 + matchingOuts = 0 + for out in dec_tx['vout']: + totalOut += out['value'] + if out['scriptPubKey']['addresses'][0] in outputs: + matchingOuts+=1 + + assert_equal(matchingOuts, 2) + assert_equal(len(dec_tx['vout']), 3) + + ############################################## + # test a fundrawtransaction with invalid vin # + ############################################## + inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin! + outputs = { self.nodes[0].getnewaddress() : 1.0} + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + + assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx) + + ############################################################ + #compare fee of a standard pubkeyhash transaction + inputs = [] + outputs = {self.nodes[1].getnewaddress():1.1} + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[0].fundrawtransaction(rawtx) + + #create same transaction over sendtoaddress + txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) + signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] + + #compare fee + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) + assert(feeDelta >= 0 and feeDelta <= feeTolerance) + ############################################################ + + ############################################################ + #compare fee of a standard pubkeyhash transaction with multiple outputs + inputs = [] + outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3} + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[0].fundrawtransaction(rawtx) + #create same transaction over sendtoaddress + txId = self.nodes[0].sendmany("", outputs) + signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] + + #compare fee + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) + assert(feeDelta >= 0 and feeDelta <= feeTolerance) + ############################################################ + + + ############################################################ + #compare fee of a 2of2 multisig p2sh transaction + + # create 2of2 addr + addr1 = self.nodes[1].getnewaddress() + addr2 = self.nodes[1].getnewaddress() + + addr1Obj = self.nodes[1].validateaddress(addr1) + addr2Obj = self.nodes[1].validateaddress(addr2) + + mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] + + inputs = [] + outputs = {mSigObj:1.1} + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[0].fundrawtransaction(rawtx) + + #create same transaction over sendtoaddress + txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) + signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] + + #compare fee + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) + assert(feeDelta >= 0 and feeDelta <= feeTolerance) + ############################################################ + + + ############################################################ + #compare fee of a standard pubkeyhash transaction + + # create 4of5 addr + addr1 = self.nodes[1].getnewaddress() + addr2 = self.nodes[1].getnewaddress() + addr3 = self.nodes[1].getnewaddress() + addr4 = self.nodes[1].getnewaddress() + addr5 = self.nodes[1].getnewaddress() + + addr1Obj = self.nodes[1].validateaddress(addr1) + addr2Obj = self.nodes[1].validateaddress(addr2) + addr3Obj = self.nodes[1].validateaddress(addr3) + addr4Obj = self.nodes[1].validateaddress(addr4) + addr5Obj = self.nodes[1].validateaddress(addr5) + + mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address'] + + inputs = [] + outputs = {mSigObj:1.1} + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[0].fundrawtransaction(rawtx) + + #create same transaction over sendtoaddress + txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) + signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] + + #compare fee + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) + assert(feeDelta >= 0 and feeDelta <= feeTolerance) + ############################################################ + + + ############################################################ + # spend a 2of2 multisig transaction over fundraw + + # create 2of2 addr + addr1 = self.nodes[2].getnewaddress() + addr2 = self.nodes[2].getnewaddress() + + addr1Obj = self.nodes[2].validateaddress(addr1) + addr2Obj = self.nodes[2].validateaddress(addr2) + + mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] + + + # send 1.2 BTC to msig addr + txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) + self.sync_all() + self.nodes[1].generate(1) + self.sync_all() + + oldBalance = self.nodes[1].getbalance() + inputs = [] + outputs = {self.nodes[1].getnewaddress():1.1} + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[2].fundrawtransaction(rawtx) + + signedTx = self.nodes[2].signrawtransaction(fundedTx['hex']) + txId = self.nodes[2].sendrawtransaction(signedTx['hex']) + self.sync_all() + self.nodes[1].generate(1) + self.sync_all() + + # make sure funds are received at node1 + assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance()) + + ############################################################ + # locked wallet test + self.stop_node(0) + self.nodes[1].node_encrypt_wallet("test") + self.stop_node(2) + self.stop_node(3) + + self.start_nodes() + # This test is not meant to test fee estimation and we'd like + # to be sure all txs are sent at a consistent desired feerate + for node in self.nodes: + node.settxfee(min_relay_tx_fee) + + connect_nodes_bi(self.nodes,0,1) + connect_nodes_bi(self.nodes,1,2) + connect_nodes_bi(self.nodes,0,2) + connect_nodes_bi(self.nodes,0,3) + self.sync_all() + + # drain the keypool + self.nodes[1].getnewaddress() + self.nodes[1].getrawchangeaddress() + inputs = [] + outputs = {self.nodes[0].getnewaddress():1.1} + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + # fund a transaction that requires a new key for the change output + # creating the key must be impossible because the wallet is locked + assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx) + + #refill the keypool + self.nodes[1].walletpassphrase("test", 100) + self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address + self.nodes[1].walletlock() + + assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2) + + oldBalance = self.nodes[0].getbalance() + + inputs = [] + outputs = {self.nodes[0].getnewaddress():1.1} + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[1].fundrawtransaction(rawtx) + + #now we need to unlock + self.nodes[1].walletpassphrase("test", 600) + signedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) + txId = self.nodes[1].sendrawtransaction(signedTx['hex']) + self.nodes[1].generate(1) + self.sync_all() + + # make sure funds are received at node1 + assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance()) + + + ############################################### + # multiple (~19) inputs tx test | Compare fee # + ############################################### + + #empty node1, send some small coins from node0 to node1 + self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + + for i in range(0,20): + self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) + self.nodes[0].generate(1) + self.sync_all() + + #fund a tx with ~20 small inputs + inputs = [] + outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[1].fundrawtransaction(rawtx) + + #create same transaction over sendtoaddress + txId = self.nodes[1].sendmany("", outputs) + signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] + + #compare fee + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) + assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs + + + ############################################# + # multiple (~19) inputs tx test | sign/send # + ############################################# + + #again, empty node1, send some small coins from node0 to node1 + self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + + for i in range(0,20): + self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) + self.nodes[0].generate(1) + self.sync_all() + + #fund a tx with ~20 small inputs + oldBalance = self.nodes[0].getbalance() + + inputs = [] + outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + fundedTx = self.nodes[1].fundrawtransaction(rawtx) + fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) + txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward + + ##################################################### + # test fundrawtransaction with OP_RETURN and no vin # + ##################################################### + + rawtx = "0100000000010000000000000000066a047465737400000000" + dec_tx = self.nodes[2].decoderawtransaction(rawtx) + + assert_equal(len(dec_tx['vin']), 0) + assert_equal(len(dec_tx['vout']), 1) + + rawtxfund = self.nodes[2].fundrawtransaction(rawtx) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) + + assert_greater_than(len(dec_tx['vin']), 0) # at least one vin + assert_equal(len(dec_tx['vout']), 2) # one change output added + + + ################################################## + # test a fundrawtransaction using only watchonly # + ################################################## + + inputs = [] + outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True }) + res_dec = self.nodes[0].decoderawtransaction(result["hex"]) + assert_equal(len(res_dec["vin"]), 1) + assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) + + assert("fee" in result.keys()) + assert_greater_than(result["changepos"], -1) + + ############################################################### + # test fundrawtransaction using the entirety of watched funds # + ############################################################### + + inputs = [] + outputs = {self.nodes[2].getnewaddress() : watchonly_amount} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + # Backward compatibility test (2nd param is includeWatching) + result = self.nodes[3].fundrawtransaction(rawtx, True) + res_dec = self.nodes[0].decoderawtransaction(result["hex"]) + assert_equal(len(res_dec["vin"]), 2) + assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid) + + assert_greater_than(result["fee"], 0) + assert_greater_than(result["changepos"], -1) + assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10) + + signedtx = self.nodes[3].signrawtransaction(result["hex"]) + assert(not signedtx["complete"]) + signedtx = self.nodes[0].signrawtransaction(signedtx["hex"]) + assert(signedtx["complete"]) + self.nodes[0].sendrawtransaction(signedtx["hex"]) + self.nodes[0].generate(1) + self.sync_all() + + ####################### + # Test feeRate option # + ####################### + + # Make sure there is exactly one input so coin selection can't skew the result + assert_equal(len(self.nodes[3].listunspent(1)), 1) + + inputs = [] + outputs = {self.nodes[3].getnewaddress() : 1} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee) + result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}) + result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee}) + result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex']) + assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) + assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) + + ################################ + # Test no address reuse occurs # + ################################ + + result3 = self.nodes[3].fundrawtransaction(rawtx) + res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) + changeaddress = "" + for out in res_dec['vout']: + if out['value'] > 1.0: + changeaddress += out['scriptPubKey']['addresses'][0] + assert(changeaddress != "") + nextaddr = self.nodes[3].getnewaddress() + # Now the change address key should be removed from the keypool + assert(changeaddress != nextaddr) + + ###################################### + # Test subtractFeeFromOutputs option # + ###################################### + + # Make sure there is exactly one input so coin selection can't skew the result + assert_equal(len(self.nodes[3].listunspent(1)), 1) + + inputs = [] + outputs = {self.nodes[2].getnewaddress(): 1} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee) + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) + self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}), + self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] + + dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result] + output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] + change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] + + assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) + assert_equal(result[3]['fee'], result[4]['fee']) + assert_equal(change[0], change[1]) + assert_equal(output[0], output[1]) + assert_equal(output[0], output[2] + result[2]['fee']) + assert_equal(change[0] + result[0]['fee'], change[2]) + assert_equal(output[3], output[4] + result[4]['fee']) + assert_equal(change[3] + result[3]['fee'], change[4]) + + inputs = [] + outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} + rawtx = self.nodes[3].createrawtransaction(inputs, outputs) + + result = [self.nodes[3].fundrawtransaction(rawtx), + # split the fee between outputs 0, 2, and 3, but not output 1 + self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})] + + dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), + self.nodes[3].decoderawtransaction(result[1]['hex'])] + + # Nested list of non-change output amounts for each transaction + output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] + for d, r in zip(dec_tx, result)] + + # List of differences in output amounts between normal and subtractFee transactions + share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] + + # output 1 is the same in both transactions + assert_equal(share[1], 0) + + # the other 3 outputs are smaller as a result of subtractFeeFromOutputs + assert_greater_than(share[0], 0) + assert_greater_than(share[2], 0) + assert_greater_than(share[3], 0) + + # outputs 2 and 3 take the same share of the fee + assert_equal(share[2], share[3]) + + # output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3 + assert_greater_than_or_equal(share[0], share[2]) + assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) + + # the fee is the same in both transactions + assert_equal(result[0]['fee'], result[1]['fee']) + + # the total subtracted from the outputs is equal to the fee + assert_equal(share[0] + share[2] + share[3], result[0]['fee']) + +if __name__ == '__main__': + RawTransactionsTest().main() diff --git a/test/functional/rpc_getchaintips.py b/test/functional/rpc_getchaintips.py new file mode 100755 index 0000000000..277930bb1a --- /dev/null +++ b/test/functional/rpc_getchaintips.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the getchaintips RPC. + +- introduce a network split +- work on chains of different lengths +- join the network together again +- verify that getchaintips now returns two chain tips. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class GetChainTipsTest (BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + + def run_test (self): + tips = self.nodes[0].getchaintips () + assert_equal (len (tips), 1) + assert_equal (tips[0]['branchlen'], 0) + assert_equal (tips[0]['height'], 200) + assert_equal (tips[0]['status'], 'active') + + # Split the network and build two chains of different lengths. + self.split_network () + self.nodes[0].generate(10) + self.nodes[2].generate(20) + self.sync_all([self.nodes[:2], self.nodes[2:]]) + + tips = self.nodes[1].getchaintips () + assert_equal (len (tips), 1) + shortTip = tips[0] + assert_equal (shortTip['branchlen'], 0) + assert_equal (shortTip['height'], 210) + assert_equal (tips[0]['status'], 'active') + + tips = self.nodes[3].getchaintips () + assert_equal (len (tips), 1) + longTip = tips[0] + assert_equal (longTip['branchlen'], 0) + assert_equal (longTip['height'], 220) + assert_equal (tips[0]['status'], 'active') + + # Join the network halves and check that we now have two tips + # (at least at the nodes that previously had the short chain). + self.join_network () + + tips = self.nodes[0].getchaintips () + assert_equal (len (tips), 2) + assert_equal (tips[0], longTip) + + assert_equal (tips[1]['branchlen'], 10) + assert_equal (tips[1]['status'], 'valid-fork') + tips[1]['branchlen'] = 0 + tips[1]['status'] = 'active' + assert_equal (tips[1], shortTip) + +if __name__ == '__main__': + GetChainTipsTest ().main () diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py new file mode 100755 index 0000000000..b037c2431d --- /dev/null +++ b/test/functional/rpc_invalidateblock.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the invalidateblock RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class InvalidateTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + def setup_network(self): + self.setup_nodes() + + def run_test(self): + self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") + self.log.info("Mine 4 blocks on Node 0") + self.nodes[0].generate(4) + assert(self.nodes[0].getblockcount() == 4) + besthash = self.nodes[0].getbestblockhash() + + self.log.info("Mine competing 6 blocks on Node 1") + self.nodes[1].generate(6) + assert(self.nodes[1].getblockcount() == 6) + + self.log.info("Connect nodes to force a reorg") + connect_nodes_bi(self.nodes,0,1) + sync_blocks(self.nodes[0:2]) + assert(self.nodes[0].getblockcount() == 6) + badhash = self.nodes[1].getblockhash(2) + + self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") + self.nodes[0].invalidateblock(badhash) + newheight = self.nodes[0].getblockcount() + newhash = self.nodes[0].getbestblockhash() + if (newheight != 4 or newhash != besthash): + raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight)) + + self.log.info("Make sure we won't reorg to a lower work chain:") + connect_nodes_bi(self.nodes,1,2) + self.log.info("Sync node 2 to node 1 so both have 6 blocks") + sync_blocks(self.nodes[1:3]) + assert(self.nodes[2].getblockcount() == 6) + self.log.info("Invalidate block 5 on node 1 so its tip is now at 4") + self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) + assert(self.nodes[1].getblockcount() == 4) + self.log.info("Invalidate block 3 on node 2, so its tip is now 2") + self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) + assert(self.nodes[2].getblockcount() == 2) + self.log.info("..and then mine a block") + self.nodes[2].generate(1) + self.log.info("Verify all nodes are at the right height") + time.sleep(5) + assert_equal(self.nodes[2].getblockcount(), 3) + assert_equal(self.nodes[0].getblockcount(), 4) + node1height = self.nodes[1].getblockcount() + if node1height < 4: + raise AssertionError("Node 1 reorged to a lower height: %d"%node1height) + +if __name__ == '__main__': + InvalidateTest().main() diff --git a/test/functional/rpc_listtransactions.py b/test/functional/rpc_listtransactions.py new file mode 100755 index 0000000000..ba71ac0967 --- /dev/null +++ b/test/functional/rpc_listtransactions.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the listtransactions API.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import CTransaction, COIN +from io import BytesIO + +def txFromHex(hexstring): + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(hexstring)) + tx.deserialize(f) + return tx + +class ListTransactionsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.enable_mocktime() + + def run_test(self): + # Simple send, 0 to 1: + txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) + self.sync_all() + assert_array_result(self.nodes[0].listtransactions(), + {"txid":txid}, + {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) + assert_array_result(self.nodes[1].listtransactions(), + {"txid":txid}, + {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) + # mine a block, confirmations should change: + self.nodes[0].generate(1) + self.sync_all() + assert_array_result(self.nodes[0].listtransactions(), + {"txid":txid}, + {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) + assert_array_result(self.nodes[1].listtransactions(), + {"txid":txid}, + {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) + + # send-to-self: + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) + assert_array_result(self.nodes[0].listtransactions(), + {"txid":txid, "category":"send"}, + {"amount":Decimal("-0.2")}) + assert_array_result(self.nodes[0].listtransactions(), + {"txid":txid, "category":"receive"}, + {"amount":Decimal("0.2")}) + + # sendmany from node1: twice to self, twice to node2: + send_to = { self.nodes[0].getnewaddress() : 0.11, + self.nodes[1].getnewaddress() : 0.22, + self.nodes[0].getaccountaddress("from1") : 0.33, + self.nodes[1].getaccountaddress("toself") : 0.44 } + txid = self.nodes[1].sendmany("", send_to) + self.sync_all() + assert_array_result(self.nodes[1].listtransactions(), + {"category":"send","amount":Decimal("-0.11")}, + {"txid":txid} ) + assert_array_result(self.nodes[0].listtransactions(), + {"category":"receive","amount":Decimal("0.11")}, + {"txid":txid} ) + assert_array_result(self.nodes[1].listtransactions(), + {"category":"send","amount":Decimal("-0.22")}, + {"txid":txid} ) + assert_array_result(self.nodes[1].listtransactions(), + {"category":"receive","amount":Decimal("0.22")}, + {"txid":txid} ) + assert_array_result(self.nodes[1].listtransactions(), + {"category":"send","amount":Decimal("-0.33")}, + {"txid":txid} ) + assert_array_result(self.nodes[0].listtransactions(), + {"category":"receive","amount":Decimal("0.33")}, + {"txid":txid, "account" : "from1"} ) + assert_array_result(self.nodes[1].listtransactions(), + {"category":"send","amount":Decimal("-0.44")}, + {"txid":txid, "account" : ""} ) + assert_array_result(self.nodes[1].listtransactions(), + {"category":"receive","amount":Decimal("0.44")}, + {"txid":txid, "account" : "toself"} ) + + pubkey = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['pubkey'] + multisig = self.nodes[1].createmultisig(1, [pubkey]) + self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) + txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) + self.nodes[1].generate(1) + self.sync_all() + assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) + assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), + {"category":"receive","amount":Decimal("0.1")}, + {"txid":txid, "account" : "watchonly"} ) + + self.run_rbf_opt_in_test() + + # Check that the opt-in-rbf flag works properly, for sent and received + # transactions. + def run_rbf_opt_in_test(self): + # Check whether a transaction signals opt-in RBF itself + def is_opt_in(node, txid): + rawtx = node.getrawtransaction(txid, 1) + for x in rawtx["vin"]: + if x["sequence"] < 0xfffffffe: + return True + return False + + # Find an unconfirmed output matching a certain txid + def get_unconfirmed_utxo_entry(node, txid_to_match): + utxo = node.listunspent(0, 0) + for i in utxo: + if i["txid"] == txid_to_match: + return i + return None + + # 1. Chain a few transactions that don't opt-in. + txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) + assert(not is_opt_in(self.nodes[0], txid_1)) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) + sync_mempools(self.nodes) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) + + # Tx2 will build off txid_1, still not opting in to RBF. + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) + assert_equal(utxo_to_use["safe"], True) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) + assert_equal(utxo_to_use["safe"], False) + + # Create tx2 using createrawtransaction + inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}] + outputs = {self.nodes[0].getnewaddress(): 0.999} + tx2 = self.nodes[1].createrawtransaction(inputs, outputs) + tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"] + txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) + + # ...and check the result + assert(not is_opt_in(self.nodes[1], txid_2)) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) + sync_mempools(self.nodes) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) + + # Tx3 will opt-in to RBF + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) + inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}] + outputs = {self.nodes[1].getnewaddress(): 0.998} + tx3 = self.nodes[0].createrawtransaction(inputs, outputs) + tx3_modified = txFromHex(tx3) + tx3_modified.vin[0].nSequence = 0 + tx3 = bytes_to_hex_str(tx3_modified.serialize()) + tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex'] + txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) + + assert(is_opt_in(self.nodes[0], txid_3)) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) + sync_mempools(self.nodes) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) + + # Tx4 will chain off tx3. Doesn't signal itself, but depends on one + # that does. + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) + inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}] + outputs = {self.nodes[0].getnewaddress(): 0.997} + tx4 = self.nodes[1].createrawtransaction(inputs, outputs) + tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"] + txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) + + assert(not is_opt_in(self.nodes[1], txid_4)) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) + sync_mempools(self.nodes) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) + + # Replace tx3, and check that tx4 becomes unknown + tx3_b = tx3_modified + tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee + tx3_b = bytes_to_hex_str(tx3_b.serialize()) + tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex'] + txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True) + assert(is_opt_in(self.nodes[0], txid_3b)) + + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) + sync_mempools(self.nodes) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) + + # Check gettransaction as well: + for n in self.nodes[0:2]: + assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") + assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") + assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") + assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") + assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") + + # After mining a transaction, it's no longer BIP125-replaceable + self.nodes[0].generate(1) + assert(txid_3b not in self.nodes[0].getrawmempool()) + assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") + assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") + + +if __name__ == '__main__': + ListTransactionsTest().main() + diff --git a/test/functional/rpc_named_arguments.py b/test/functional/rpc_named_arguments.py new file mode 100755 index 0000000000..97bee39614 --- /dev/null +++ b/test/functional/rpc_named_arguments.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test using named arguments for RPCs.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) + +class NamedArgumentTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def run_test(self): + node = self.nodes[0] + h = node.help(command='getblockchaininfo') + assert(h.startswith('getblockchaininfo\n')) + + assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo') + + h = node.getblockhash(height=0) + node.getblock(blockhash=h) + + assert_equal(node.echo(), []) + assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9]) + assert_equal(node.echo(arg1=1), [None, 1]) + assert_equal(node.echo(arg9=None), [None]*10) + assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9]) + +if __name__ == '__main__': + NamedArgumentTest().main() diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py new file mode 100755 index 0000000000..16e4f6adb4 --- /dev/null +++ b/test/functional/rpc_net.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test RPC calls related to net. + +Tests correspond to code in rpc/net.cpp. +""" + +import time + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + connect_nodes_bi, + p2p_port, +) + +class NetTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + + def run_test(self): + self._test_connection_count() + self._test_getnettotals() + self._test_getnetworkinginfo() + self._test_getaddednodeinfo() + self._test_getpeerinfo() + + def _test_connection_count(self): + # connect_nodes_bi connects each node to the other + assert_equal(self.nodes[0].getconnectioncount(), 2) + + def _test_getnettotals(self): + # check that getnettotals totalbytesrecv and totalbytessent + # are consistent with getpeerinfo + peer_info = self.nodes[0].getpeerinfo() + assert_equal(len(peer_info), 2) + net_totals = self.nodes[0].getnettotals() + assert_equal(sum([peer['bytesrecv'] for peer in peer_info]), + net_totals['totalbytesrecv']) + assert_equal(sum([peer['bytessent'] for peer in peer_info]), + net_totals['totalbytessent']) + # test getnettotals and getpeerinfo by doing a ping + # the bytes sent/received should change + # note ping and pong are 32 bytes each + self.nodes[0].ping() + time.sleep(0.1) + peer_info_after_ping = self.nodes[0].getpeerinfo() + net_totals_after_ping = self.nodes[0].getnettotals() + for before, after in zip(peer_info, peer_info_after_ping): + assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong']) + assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping']) + assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv']) + assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent']) + + def _test_getnetworkinginfo(self): + assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) + assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) + + self.nodes[0].setnetworkactive(False) + assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False) + timeout = 3 + while self.nodes[0].getnetworkinfo()['connections'] != 0: + # Wait a bit for all sockets to close + assert timeout > 0, 'not all connections closed in time' + timeout -= 0.1 + time.sleep(0.1) + + self.nodes[0].setnetworkactive(True) + connect_nodes_bi(self.nodes, 0, 1) + assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) + assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) + + def _test_getaddednodeinfo(self): + assert_equal(self.nodes[0].getaddednodeinfo(), []) + # add a node (node2) to node0 + ip_port = "127.0.0.1:{}".format(p2p_port(2)) + self.nodes[0].addnode(ip_port, 'add') + # check that the node has indeed been added + added_nodes = self.nodes[0].getaddednodeinfo(ip_port) + assert_equal(len(added_nodes), 1) + assert_equal(added_nodes[0]['addednode'], ip_port) + # check that a non-existent node returns an error + assert_raises_rpc_error(-24, "Node has not been added", + self.nodes[0].getaddednodeinfo, '1.1.1.1') + + def _test_getpeerinfo(self): + peer_info = [x.getpeerinfo() for x in self.nodes] + # check both sides of bidirectional connection between nodes + # the address bound to on one side will be the source address for the other node + assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr']) + assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr']) + +if __name__ == '__main__': + NetTest().main() diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py new file mode 100755 index 0000000000..960cd0ad12 --- /dev/null +++ b/test/functional/rpc_preciousblock.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the preciousblock RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + connect_nodes_bi, + sync_chain, + sync_blocks, +) + +def unidirectional_node_sync_via_rpc(node_src, node_dest): + blocks_to_copy = [] + blockhash = node_src.getbestblockhash() + while True: + try: + assert(len(node_dest.getblock(blockhash, False)) > 0) + break + except: + blocks_to_copy.append(blockhash) + blockhash = node_src.getblockheader(blockhash, True)['previousblockhash'] + blocks_to_copy.reverse() + for blockhash in blocks_to_copy: + blockdata = node_src.getblock(blockhash, False) + assert(node_dest.submitblock(blockdata) in (None, 'inconclusive')) + +def node_sync_via_rpc(nodes): + for node_src in nodes: + for node_dest in nodes: + if node_src is node_dest: + continue + unidirectional_node_sync_via_rpc(node_src, node_dest) + +class PreciousTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + def setup_network(self): + self.setup_nodes() + + def run_test(self): + self.log.info("Ensure submitblock can in principle reorg to a competing chain") + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockcount(), 1) + hashZ = self.nodes[1].generate(2)[-1] + assert_equal(self.nodes[1].getblockcount(), 2) + node_sync_via_rpc(self.nodes[0:3]) + assert_equal(self.nodes[0].getbestblockhash(), hashZ) + + self.log.info("Mine blocks A-B-C on Node 0") + hashC = self.nodes[0].generate(3)[-1] + assert_equal(self.nodes[0].getblockcount(), 5) + self.log.info("Mine competing blocks E-F-G on Node 1") + hashG = self.nodes[1].generate(3)[-1] + assert_equal(self.nodes[1].getblockcount(), 5) + assert(hashC != hashG) + self.log.info("Connect nodes and check no reorg occurs") + # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) + node_sync_via_rpc(self.nodes[0:2]) + connect_nodes_bi(self.nodes,0,1) + assert_equal(self.nodes[0].getbestblockhash(), hashC) + assert_equal(self.nodes[1].getbestblockhash(), hashG) + self.log.info("Make Node0 prefer block G") + self.nodes[0].preciousblock(hashG) + assert_equal(self.nodes[0].getbestblockhash(), hashG) + self.log.info("Make Node0 prefer block C again") + self.nodes[0].preciousblock(hashC) + assert_equal(self.nodes[0].getbestblockhash(), hashC) + self.log.info("Make Node1 prefer block C") + self.nodes[1].preciousblock(hashC) + sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC + assert_equal(self.nodes[1].getbestblockhash(), hashC) + self.log.info("Make Node1 prefer block G again") + self.nodes[1].preciousblock(hashG) + assert_equal(self.nodes[1].getbestblockhash(), hashG) + self.log.info("Make Node0 prefer block G again") + self.nodes[0].preciousblock(hashG) + assert_equal(self.nodes[0].getbestblockhash(), hashG) + self.log.info("Make Node1 prefer block C again") + self.nodes[1].preciousblock(hashC) + assert_equal(self.nodes[1].getbestblockhash(), hashC) + self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") + self.nodes[0].generate(1) + assert_equal(self.nodes[0].getblockcount(), 6) + sync_blocks(self.nodes[0:2]) + hashH = self.nodes[0].getbestblockhash() + assert_equal(self.nodes[1].getbestblockhash(), hashH) + self.log.info("Node1 should not be able to prefer block C anymore") + self.nodes[1].preciousblock(hashC) + assert_equal(self.nodes[1].getbestblockhash(), hashH) + self.log.info("Mine competing blocks I-J-K-L on Node 2") + self.nodes[2].generate(4) + assert_equal(self.nodes[2].getblockcount(), 6) + hashL = self.nodes[2].getbestblockhash() + self.log.info("Connect nodes and check no reorg occurs") + node_sync_via_rpc(self.nodes[1:3]) + connect_nodes_bi(self.nodes,1,2) + connect_nodes_bi(self.nodes,0,2) + assert_equal(self.nodes[0].getbestblockhash(), hashH) + assert_equal(self.nodes[1].getbestblockhash(), hashH) + assert_equal(self.nodes[2].getbestblockhash(), hashL) + self.log.info("Make Node1 prefer block L") + self.nodes[1].preciousblock(hashL) + assert_equal(self.nodes[1].getbestblockhash(), hashL) + self.log.info("Make Node2 prefer block H") + self.nodes[2].preciousblock(hashH) + assert_equal(self.nodes[2].getbestblockhash(), hashH) + +if __name__ == '__main__': + PreciousTest().main() diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py new file mode 100755 index 0000000000..d39d86b310 --- /dev/null +++ b/test/functional/rpc_rawtransaction.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the rawtransaction RPCs. + +Test the following RPCs: + - createrawtransaction + - signrawtransaction + - sendrawtransaction + - decoderawtransaction + - getrawtransaction +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + + +class multidict(dict): + """Dictionary that allows duplicate keys. + + Constructed with a list of (key, value) tuples. When dumped by the json module, + will output invalid json with repeated keys, eg: + >>> json.dumps(multidict([(1,2),(1,2)]) + '{"1": 2, "1": 2}' + + Used to test calls to rpc methods with repeated keys in the json object.""" + + def __init__(self, x): + dict.__init__(self, x) + self.x = x + + def items(self): + return self.x + + +# Create one-input, one-output, no-fee transaction: +class RawTransactionsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]] + + def setup_network(self, split=False): + super().setup_network() + connect_nodes_bi(self.nodes,0,2) + + def run_test(self): + + #prepare some coins for multiple *rawtransaction commands + self.nodes[2].generate(1) + self.sync_all() + self.nodes[0].generate(101) + self.sync_all() + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0) + self.sync_all() + self.nodes[0].generate(5) + self.sync_all() + + # Test `createrawtransaction` required parameters + assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction) + assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, []) + + # Test `createrawtransaction` invalid extra parameters + assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo') + + # Test `createrawtransaction` invalid `inputs` + txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' + assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {}) + assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {}) + assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {}) + assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {}) + assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {}) + assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {}) + assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {}) + assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {}) + + # Test `createrawtransaction` invalid `outputs` + address = self.nodes[0].getnewaddress() + assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo') + assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'}) + assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0}) + assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'}) + assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) + assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) + + # Test `createrawtransaction` invalid `locktime` + assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo') + assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1) + assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296) + + # Test `createrawtransaction` invalid `replaceable` + assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') + + ######################################### + # sendrawtransaction with missing input # + ######################################### + inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists + outputs = { self.nodes[0].getnewaddress() : 4.998 } + rawtx = self.nodes[2].createrawtransaction(inputs, outputs) + rawtx = self.nodes[2].signrawtransaction(rawtx) + + # This will raise an exception since there are missing inputs + assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex']) + + ##################################### + # getrawtransaction with block hash # + ##################################### + + # make a tx by sending then generate 2 blocks; block1 has the tx in it + tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1) + block1, block2 = self.nodes[2].generate(2) + self.sync_all() + # We should be able to get the raw transaction by providing the correct block + gottx = self.nodes[0].getrawtransaction(tx, True, block1) + assert_equal(gottx['txid'], tx) + assert_equal(gottx['in_active_chain'], True) + # We should not have the 'in_active_chain' flag when we don't provide a block + gottx = self.nodes[0].getrawtransaction(tx, True) + assert_equal(gottx['txid'], tx) + assert 'in_active_chain' not in gottx + # We should not get the tx if we provide an unrelated block + assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2) + # An invalid block hash should raise the correct errors + assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True) + assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar") + assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234") + assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000") + # Undo the blocks and check in_active_chain + self.nodes[0].invalidateblock(block1) + gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1) + assert_equal(gottx['in_active_chain'], False) + self.nodes[0].reconsiderblock(block1) + assert_equal(self.nodes[0].getbestblockhash(), block2) + + ######################### + # RAW TX MULTISIG TESTS # + ######################### + # 2of2 test + addr1 = self.nodes[2].getnewaddress() + addr2 = self.nodes[2].getnewaddress() + + addr1Obj = self.nodes[2].validateaddress(addr1) + addr2Obj = self.nodes[2].validateaddress(addr2) + + # Tests for createmultisig and addmultisigaddress + assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) + self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys + assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here. + + mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address'] + + #use balance deltas instead of absolute values + bal = self.nodes[2].getbalance() + + # send 1.2 BTC to msig adr + txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance + + + # 2of3 test from different nodes + bal = self.nodes[2].getbalance() + addr1 = self.nodes[1].getnewaddress() + addr2 = self.nodes[2].getnewaddress() + addr3 = self.nodes[2].getnewaddress() + + addr1Obj = self.nodes[1].validateaddress(addr1) + addr2Obj = self.nodes[2].validateaddress(addr2) + addr3Obj = self.nodes[2].validateaddress(addr3) + + mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address'] + + txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) + decTx = self.nodes[0].gettransaction(txId) + rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + + #THIS IS A INCOMPLETE FEATURE + #NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION + assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable + + txDetails = self.nodes[0].gettransaction(txId, True) + rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) + vout = False + for outpoint in rawTx['vout']: + if outpoint['value'] == Decimal('2.20000000'): + vout = outpoint + break + + bal = self.nodes[0].getbalance() + inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}] + outputs = { self.nodes[0].getnewaddress() : 2.19 } + rawTx = self.nodes[2].createrawtransaction(inputs, outputs) + rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs) + assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx + + rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs) + assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys + self.nodes[2].sendrawtransaction(rawTxSigned['hex']) + rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx + + # 2of2 test for combining transactions + bal = self.nodes[2].getbalance() + addr1 = self.nodes[1].getnewaddress() + addr2 = self.nodes[2].getnewaddress() + + addr1Obj = self.nodes[1].validateaddress(addr1) + addr2Obj = self.nodes[2].validateaddress(addr2) + + self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] + mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] + mSigObjValid = self.nodes[2].validateaddress(mSigObj) + + txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) + decTx = self.nodes[0].gettransaction(txId) + rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + + assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable + + txDetails = self.nodes[0].gettransaction(txId, True) + rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex']) + vout = False + for outpoint in rawTx2['vout']: + if outpoint['value'] == Decimal('2.20000000'): + vout = outpoint + break + + bal = self.nodes[0].getbalance() + inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}] + outputs = { self.nodes[0].getnewaddress() : 2.19 } + rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs) + rawTxPartialSigned1 = self.nodes[1].signrawtransaction(rawTx2, inputs) + self.log.info(rawTxPartialSigned1) + assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx + + rawTxPartialSigned2 = self.nodes[2].signrawtransaction(rawTx2, inputs) + self.log.info(rawTxPartialSigned2) + assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx + rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) + self.log.info(rawTxComb) + self.nodes[2].sendrawtransaction(rawTxComb) + rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) + self.sync_all() + self.nodes[0].generate(1) + self.sync_all() + assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx + + # decoderawtransaction tests + # witness transaction + encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000" + decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction + assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000')) + assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction + # non-witness transaction + encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000" + decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction + assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000')) + + # getrawtransaction tests + # 1. valid parameters - only supply txid + txHash = rawTx["hash"] + assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex']) + + # 2. valid parameters - supply txid and 0 for non-verbose + assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex']) + + # 3. valid parameters - supply txid and False for non-verbose + assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex']) + + # 4. valid parameters - supply txid and 1 for verbose. + # We only check the "hex" field of the output so we don't need to update this test every time the output format changes. + assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex']) + + # 5. valid parameters - supply txid and True for non-verbose + assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex']) + + # 6. invalid parameters - supply txid and string "Flase" + assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase") + + # 7. invalid parameters - supply txid and empty array + assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, []) + + # 8. invalid parameters - supply txid and empty dict + assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {}) + + inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}] + outputs = { self.nodes[0].getnewaddress() : 1 } + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + decrawtx= self.nodes[0].decoderawtransaction(rawtx) + assert_equal(decrawtx['vin'][0]['sequence'], 1000) + + # 9. invalid parameters - sequence number out of range + inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}] + outputs = { self.nodes[0].getnewaddress() : 1 } + assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) + + # 10. invalid parameters - sequence number out of range + inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}] + outputs = { self.nodes[0].getnewaddress() : 1 } + assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) + + inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}] + outputs = { self.nodes[0].getnewaddress() : 1 } + rawtx = self.nodes[0].createrawtransaction(inputs, outputs) + decrawtx= self.nodes[0].decoderawtransaction(rawtx) + assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) + +if __name__ == '__main__': + RawTransactionsTest().main() diff --git a/test/functional/rpc_signmessage.py b/test/functional/rpc_signmessage.py new file mode 100755 index 0000000000..5b6935ceea --- /dev/null +++ b/test/functional/rpc_signmessage.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test RPC commands for signing and verifying messages.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class SignMessagesTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-addresstype=legacy"]] + + def run_test(self): + message = 'This is just a test message' + + self.log.info('test signing with priv_key') + priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' + address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' + expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0=' + signature = self.nodes[0].signmessagewithprivkey(priv_key, message) + assert_equal(expected_signature, signature) + assert(self.nodes[0].verifymessage(address, signature, message)) + + self.log.info('test signing with an address with wallet') + address = self.nodes[0].getnewaddress() + signature = self.nodes[0].signmessage(address, message) + assert(self.nodes[0].verifymessage(address, signature, message)) + + self.log.info('test verifying with another address should not work') + other_address = self.nodes[0].getnewaddress() + other_signature = self.nodes[0].signmessage(other_address, message) + assert(not self.nodes[0].verifymessage(other_address, signature, message)) + assert(not self.nodes[0].verifymessage(address, other_signature, message)) + +if __name__ == '__main__': + SignMessagesTest().main() diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py new file mode 100755 index 0000000000..dd0fa6c02c --- /dev/null +++ b/test/functional/rpc_signrawtransaction.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test transaction signing using the signrawtransaction RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + + +class SignRawTransactionsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def successful_signing_test(self): + """Create and sign a valid raw transaction with one input. + + Expected results: + + 1) The transaction has a complete set of signatures + 2) No script verification error occurred""" + privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA'] + + inputs = [ + # Valid pay-to-pubkey scripts + {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, + 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, + {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0, + 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'}, + ] + + outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} + + rawTx = self.nodes[0].createrawtransaction(inputs, outputs) + rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys) + + # 1) The transaction has a complete set of signatures + assert 'complete' in rawTxSigned + assert_equal(rawTxSigned['complete'], True) + + # 2) No script verification error occurred + assert 'errors' not in rawTxSigned + + def script_verification_error_test(self): + """Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script. + + Expected results: + + 3) The transaction has no complete set of signatures + 4) Two script verification errors occurred + 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error") + 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)""" + privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'] + + inputs = [ + # Valid pay-to-pubkey script + {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0}, + # Invalid script + {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7}, + # Missing scriptPubKey + {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1}, + ] + + scripts = [ + # Valid pay-to-pubkey script + {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, + 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, + # Invalid script + {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, + 'scriptPubKey': 'badbadbadbad'} + ] + + outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} + + rawTx = self.nodes[0].createrawtransaction(inputs, outputs) + + # Make sure decoderawtransaction is at least marginally sane + decodedRawTx = self.nodes[0].decoderawtransaction(rawTx) + for i, inp in enumerate(inputs): + assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"]) + assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"]) + + # Make sure decoderawtransaction throws if there is extra data + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00") + + rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys) + + # 3) The transaction has no complete set of signatures + assert 'complete' in rawTxSigned + assert_equal(rawTxSigned['complete'], False) + + # 4) Two script verification errors occurred + assert 'errors' in rawTxSigned + assert_equal(len(rawTxSigned['errors']), 2) + + # 5) Script verification errors have certain properties + assert 'txid' in rawTxSigned['errors'][0] + assert 'vout' in rawTxSigned['errors'][0] + assert 'witness' in rawTxSigned['errors'][0] + assert 'scriptSig' in rawTxSigned['errors'][0] + assert 'sequence' in rawTxSigned['errors'][0] + assert 'error' in rawTxSigned['errors'][0] + + # 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2) + assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid']) + assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout']) + assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid']) + assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout']) + assert not rawTxSigned['errors'][0]['witness'] + + # Now test signing failure for transaction with input witnesses + p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000" + + rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx) + + # 7) The transaction has no complete set of signatures + assert 'complete' in rawTxSigned + assert_equal(rawTxSigned['complete'], False) + + # 8) Two script verification errors occurred + assert 'errors' in rawTxSigned + assert_equal(len(rawTxSigned['errors']), 2) + + # 9) Script verification errors have certain properties + assert 'txid' in rawTxSigned['errors'][0] + assert 'vout' in rawTxSigned['errors'][0] + assert 'witness' in rawTxSigned['errors'][0] + assert 'scriptSig' in rawTxSigned['errors'][0] + assert 'sequence' in rawTxSigned['errors'][0] + assert 'error' in rawTxSigned['errors'][0] + + # Non-empty witness checked here + assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"]) + assert not rawTxSigned['errors'][0]['witness'] + + def run_test(self): + self.successful_signing_test() + self.script_verification_error_test() + + +if __name__ == '__main__': + SignRawTransactionsTest().main() diff --git a/test/functional/rpc_txoutproof.py b/test/functional/rpc_txoutproof.py new file mode 100755 index 0000000000..50e0371fdf --- /dev/null +++ b/test/functional/rpc_txoutproof.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test gettxoutproof and verifytxoutproof RPCs.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class MerkleBlockTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = True + # Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing + self.extra_args = [[], [], [], ["-txindex"]] + + def setup_network(self): + self.setup_nodes() + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[0], 2) + connect_nodes(self.nodes[0], 3) + + self.sync_all() + + def run_test(self): + self.log.info("Mining blocks...") + self.nodes[0].generate(105) + self.sync_all() + + chain_height = self.nodes[1].getblockcount() + assert_equal(chain_height, 105) + assert_equal(self.nodes[1].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 0) + + node0utxos = self.nodes[0].listunspent(1) + tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) + txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"]) + tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) + txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"]) + # This will raise an exception because the transaction is not yet in a block + assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1]) + + self.nodes[0].generate(1) + blockhash = self.nodes[0].getblockhash(chain_height + 1) + self.sync_all() + + txlist = [] + blocktxn = self.nodes[0].getblock(blockhash, True)["tx"] + txlist.append(blocktxn[1]) + txlist.append(blocktxn[2]) + + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1]) + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist) + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist) + + txin_spent = self.nodes[1].listunspent(1).pop() + tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98}) + txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"]) + self.nodes[0].generate(1) + self.sync_all() + + txid_spent = txin_spent["txid"] + txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2 + + # We can't find the block from a fully-spent tx + assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent]) + # We can get the proof if we specify the block + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent]) + # We can't get the proof if we specify a non-existent block + assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000") + # We can get the proof if the transaction is unspent + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent]) + # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter. + assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist)) + assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist)) + # We can always get a proof if we have a -txindex + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent]) + # We can't get a proof if we specify transactions from different blocks + assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3]) + + +if __name__ == '__main__': + MerkleBlockTest().main() diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py new file mode 100755 index 0000000000..78236b2393 --- /dev/null +++ b/test/functional/rpc_uptime.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the RPC call related to the uptime command. + +Test corresponds to code in rpc/server.cpp. +""" + +import time + +from test_framework.test_framework import BitcoinTestFramework + + +class UptimeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + self._test_uptime() + + def _test_uptime(self): + wait_time = 10 + self.nodes[0].setmocktime(int(time.time() + wait_time)) + assert(self.nodes[0].uptime() >= wait_time) + + +if __name__ == '__main__': + UptimeTest().main() diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py new file mode 100755 index 0000000000..01f68344ae --- /dev/null +++ b/test/functional/rpc_users.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test multiple RPC users.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import str_to_b64str, assert_equal + +import os +import http.client +import urllib.parse + +class HTTPBasicsTest (BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + + def setup_chain(self): + super().setup_chain() + #Append rpcauth to bitcoin.conf before initialization + rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" + rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" + rpcuser = "rpcuser=rpcuser💻" + rpcpassword = "rpcpassword=rpcpassword🔑" + with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f: + f.write(rpcauth+"\n") + f.write(rpcauth2+"\n") + with open(os.path.join(self.options.tmpdir+"/node1", "bitcoin.conf"), 'a', encoding='utf8') as f: + f.write(rpcuser+"\n") + f.write(rpcpassword+"\n") + + def run_test(self): + + ################################################## + # Check correctness of the rpcauth config option # + ################################################## + url = urllib.parse.urlparse(self.nodes[0].url) + + #Old authpair + authpair = url.username + ':' + url.password + + #New authpair generated via share/rpcuser tool + password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" + + #Second authpair with different username + password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" + authpairnew = "rt:"+password + + headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 200) + conn.close() + + #Use new authpair to confirm both work + headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 200) + conn.close() + + #Wrong login name with rt's password + authpairnew = "rtwrong:"+password + headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 401) + conn.close() + + #Wrong password for rt + authpairnew = "rt:"+password+"wrong" + headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 401) + conn.close() + + #Correct for rt2 + authpairnew = "rt2:"+password2 + headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 200) + conn.close() + + #Wrong password for rt2 + authpairnew = "rt2:"+password2+"wrong" + headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 401) + conn.close() + + ############################################################### + # Check correctness of the rpcuser/rpcpassword config options # + ############################################################### + url = urllib.parse.urlparse(self.nodes[1].url) + + # rpcuser and rpcpassword authpair + rpcuserauthpair = "rpcuser💻:rpcpassword🔑" + + headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 200) + conn.close() + + #Wrong login name with rpcuser's password + rpcuserauthpair = "rpcuserwrong:rpcpassword" + headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 401) + conn.close() + + #Wrong password for rpcuser + rpcuserauthpair = "rpcuser:rpcpasswordwrong" + headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + resp = conn.getresponse() + assert_equal(resp.status, 401) + conn.close() + + +if __name__ == '__main__': + HTTPBasicsTest ().main () diff --git a/test/functional/rpcbind_test.py b/test/functional/rpcbind_test.py deleted file mode 100755 index 05433c7e24..0000000000 --- a/test/functional/rpcbind_test.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test running bitcoind with the -rpcbind and -rpcallowip options.""" - -import socket -import sys - -from test_framework.test_framework import BitcoinTestFramework, SkipTest -from test_framework.util import * -from test_framework.netutil import * - -class RPCBindTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def setup_network(self): - self.add_nodes(self.num_nodes, None) - - def run_bind_test(self, allow_ips, connect_to, addresses, expected): - ''' - Start a node with requested rpcallowip and rpcbind parameters, - then try to connect, and check if the set of bound addresses - matches the expected set. - ''' - self.log.info("Bind test for %s" % str(addresses)) - expected = [(addr_to_hex(addr), port) for (addr, port) in expected] - base_args = ['-disablewallet', '-nolisten'] - if allow_ips: - base_args += ['-rpcallowip=' + x for x in allow_ips] - binds = ['-rpcbind='+addr for addr in addresses] - self.nodes[0].rpchost = connect_to - self.start_node(0, base_args + binds) - pid = self.nodes[0].process.pid - assert_equal(set(get_bind_addrs(pid)), set(expected)) - self.stop_nodes() - - def run_allowip_test(self, allow_ips, rpchost, rpcport): - ''' - Start a node with rpcallow IP, and request getnetworkinfo - at a non-localhost IP. - ''' - self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport)) - base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips] - self.nodes[0].rpchost = None - self.start_nodes([base_args]) - # connect to node through non-loopback interface - node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir) - node.getnetworkinfo() - self.stop_nodes() - - def run_test(self): - # due to OS-specific network stats queries, this test works only on Linux - if not sys.platform.startswith('linux'): - raise SkipTest("This test can only be run on linux.") - # find the first non-loopback interface for testing - non_loopback_ip = None - for name,ip in all_interfaces(): - if ip != '127.0.0.1': - non_loopback_ip = ip - break - if non_loopback_ip is None: - raise SkipTest("This test requires at least one non-loopback IPv4 interface.") - try: - s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) - s.connect(("::1",1)) - s.close - except OSError: - raise SkipTest("This test requires IPv6 support.") - - self.log.info("Using interface %s for testing" % non_loopback_ip) - - defaultport = rpc_port(0) - - # check default without rpcallowip (IPv4 and IPv6 localhost) - self.run_bind_test(None, '127.0.0.1', [], - [('127.0.0.1', defaultport), ('::1', defaultport)]) - # check default with rpcallowip (IPv6 any) - self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], - [('::0', defaultport)]) - # check only IPv4 localhost (explicit) - self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'], - [('127.0.0.1', defaultport)]) - # check only IPv4 localhost (explicit) with alternative port - self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'], - [('127.0.0.1', 32171)]) - # check only IPv4 localhost (explicit) with multiple alternative ports on same host - self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'], - [('127.0.0.1', 32171), ('127.0.0.1', 32172)]) - # check only IPv6 localhost (explicit) - self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], - [('::1', defaultport)]) - # check both IPv4 and IPv6 localhost (explicit) - self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'], - [('127.0.0.1', defaultport), ('::1', defaultport)]) - # check only non-loopback interface - self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip], - [(non_loopback_ip, defaultport)]) - - # Check that with invalid rpcallowip, we are denied - self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport) - assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport) - -if __name__ == '__main__': - RPCBindTest().main() diff --git a/test/functional/rpcnamedargs.py b/test/functional/rpcnamedargs.py deleted file mode 100755 index 97bee39614..0000000000 --- a/test/functional/rpcnamedargs.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test using named arguments for RPCs.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, -) - -class NamedArgumentTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def run_test(self): - node = self.nodes[0] - h = node.help(command='getblockchaininfo') - assert(h.startswith('getblockchaininfo\n')) - - assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo') - - h = node.getblockhash(height=0) - node.getblock(blockhash=h) - - assert_equal(node.echo(), []) - assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9]) - assert_equal(node.echo(arg1=1), [None, 1]) - assert_equal(node.echo(arg9=None), [None]*10) - assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9]) - -if __name__ == '__main__': - NamedArgumentTest().main() diff --git a/test/functional/signmessages.py b/test/functional/signmessages.py deleted file mode 100755 index 5b6935ceea..0000000000 --- a/test/functional/signmessages.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPC commands for signing and verifying messages.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class SignMessagesTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-addresstype=legacy"]] - - def run_test(self): - message = 'This is just a test message' - - self.log.info('test signing with priv_key') - priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' - address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' - expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0=' - signature = self.nodes[0].signmessagewithprivkey(priv_key, message) - assert_equal(expected_signature, signature) - assert(self.nodes[0].verifymessage(address, signature, message)) - - self.log.info('test signing with an address with wallet') - address = self.nodes[0].getnewaddress() - signature = self.nodes[0].signmessage(address, message) - assert(self.nodes[0].verifymessage(address, signature, message)) - - self.log.info('test verifying with another address should not work') - other_address = self.nodes[0].getnewaddress() - other_signature = self.nodes[0].signmessage(other_address, message) - assert(not self.nodes[0].verifymessage(other_address, signature, message)) - assert(not self.nodes[0].verifymessage(address, other_signature, message)) - -if __name__ == '__main__': - SignMessagesTest().main() diff --git a/test/functional/signrawtransactions.py b/test/functional/signrawtransactions.py deleted file mode 100755 index dd0fa6c02c..0000000000 --- a/test/functional/signrawtransactions.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test transaction signing using the signrawtransaction RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - - -class SignRawTransactionsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def successful_signing_test(self): - """Create and sign a valid raw transaction with one input. - - Expected results: - - 1) The transaction has a complete set of signatures - 2) No script verification error occurred""" - privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA'] - - inputs = [ - # Valid pay-to-pubkey scripts - {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, - 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, - {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0, - 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'}, - ] - - outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} - - rawTx = self.nodes[0].createrawtransaction(inputs, outputs) - rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys) - - # 1) The transaction has a complete set of signatures - assert 'complete' in rawTxSigned - assert_equal(rawTxSigned['complete'], True) - - # 2) No script verification error occurred - assert 'errors' not in rawTxSigned - - def script_verification_error_test(self): - """Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script. - - Expected results: - - 3) The transaction has no complete set of signatures - 4) Two script verification errors occurred - 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error") - 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)""" - privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'] - - inputs = [ - # Valid pay-to-pubkey script - {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0}, - # Invalid script - {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7}, - # Missing scriptPubKey - {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1}, - ] - - scripts = [ - # Valid pay-to-pubkey script - {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, - 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, - # Invalid script - {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, - 'scriptPubKey': 'badbadbadbad'} - ] - - outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} - - rawTx = self.nodes[0].createrawtransaction(inputs, outputs) - - # Make sure decoderawtransaction is at least marginally sane - decodedRawTx = self.nodes[0].decoderawtransaction(rawTx) - for i, inp in enumerate(inputs): - assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"]) - assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"]) - - # Make sure decoderawtransaction throws if there is extra data - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00") - - rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys) - - # 3) The transaction has no complete set of signatures - assert 'complete' in rawTxSigned - assert_equal(rawTxSigned['complete'], False) - - # 4) Two script verification errors occurred - assert 'errors' in rawTxSigned - assert_equal(len(rawTxSigned['errors']), 2) - - # 5) Script verification errors have certain properties - assert 'txid' in rawTxSigned['errors'][0] - assert 'vout' in rawTxSigned['errors'][0] - assert 'witness' in rawTxSigned['errors'][0] - assert 'scriptSig' in rawTxSigned['errors'][0] - assert 'sequence' in rawTxSigned['errors'][0] - assert 'error' in rawTxSigned['errors'][0] - - # 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2) - assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid']) - assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout']) - assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid']) - assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout']) - assert not rawTxSigned['errors'][0]['witness'] - - # Now test signing failure for transaction with input witnesses - p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000" - - rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx) - - # 7) The transaction has no complete set of signatures - assert 'complete' in rawTxSigned - assert_equal(rawTxSigned['complete'], False) - - # 8) Two script verification errors occurred - assert 'errors' in rawTxSigned - assert_equal(len(rawTxSigned['errors']), 2) - - # 9) Script verification errors have certain properties - assert 'txid' in rawTxSigned['errors'][0] - assert 'vout' in rawTxSigned['errors'][0] - assert 'witness' in rawTxSigned['errors'][0] - assert 'scriptSig' in rawTxSigned['errors'][0] - assert 'sequence' in rawTxSigned['errors'][0] - assert 'error' in rawTxSigned['errors'][0] - - # Non-empty witness checked here - assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"]) - assert not rawTxSigned['errors'][0]['witness'] - - def run_test(self): - self.successful_signing_test() - self.script_verification_error_test() - - -if __name__ == '__main__': - SignRawTransactionsTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 26beef5a52..d93e0e018e 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -59,7 +59,7 @@ BASE_SCRIPTS= [ 'wallet_backup.py', # vv Tests less than 5m vv 'feature_block.py', - 'fundrawtransaction.py', + 'rpc_fundrawtransaction.py', 'p2p_compactblocks.py', 'feature_segwit.py', # vv Tests less than 2m vv @@ -67,17 +67,17 @@ BASE_SCRIPTS= [ 'wallet_accounts.py', 'p2p_segwit.py', 'wallet_dump.py', - 'listtransactions.py', + 'rpc_listtransactions.py', # vv Tests less than 60s vv 'p2p_sendheaders.py', 'wallet_zapwallettxes.py', 'wallet_importmulti.py', 'mempool_limit.py', - 'merkle_blocks.py', + 'rpc_txoutproof.py', 'wallet_listreceivedby.py', 'wallet_abandonconflict.py', 'feature_csv_activation.py', - 'rawtransactions.py', + 'rpc_rawtransaction.py', 'wallet_address_types.py', 'feature_reindex.py', # vv Tests less than 30s vv @@ -88,7 +88,7 @@ BASE_SCRIPTS= [ 'wallet_txn_doublespend.py --mineblock', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', - 'getchaintips.py', + 'rpc_getchaintips.py', 'rest.py', 'mempool_spendcoinbase.py', 'mempool_reorg.py', @@ -96,35 +96,35 @@ BASE_SCRIPTS= [ 'wallet_multiwallet.py', 'wallet_multiwallet.py --usecli', 'httpbasics.py', - 'multi_rpc.py', + 'rpc_users.py', 'feature_proxy.py', - 'signrawtransactions.py', + 'rpc_signrawtransaction.py', 'p2p_disconnect_ban.py', - 'decodescript.py', - 'blockchain.py', - 'deprecated_rpc.py', + 'rpc_decodescript.py', + 'rpc_blockchain.py', + 'rpc_deprecated.py', 'wallet_disable.py', - 'net.py', + 'rpc_net.py', 'wallet_keypool.py', 'p2p_mempool.py', 'prioritise_transaction.py', 'p2p_invalid_block.py', 'p2p_invalid_tx.py', 'feature_versionbits_warning.py', - 'preciousblock.py', + 'rpc_preciousblock.py', 'wallet_importprunedfunds.py', - 'signmessages.py', + 'rpc_signmessage.py', 'feature_nulldummy.py', 'wallet_import_rescan.py', 'mining.py', 'wallet_bumpfee.py', - 'rpcnamedargs.py', + 'rpc_named_arguments.py', 'wallet_listsinceblock.py', 'p2p_leak.py', 'wallet_encryption.py', 'feature_dersig.py', 'feature_cltv.py', - 'uptime.py', + 'rpc_uptime.py', 'wallet_resendwallettransactions.py', 'feature_minchainwork.py', 'p2p_fingerprint.py', @@ -154,14 +154,14 @@ EXTENDED_SCRIPTS = [ # vv Tests less than 60s vv 'feature_bip9_softforks.py', 'p2p_feefilter.py', - 'rpcbind_test.py', + 'rpc_bind.py', # vv Tests less than 30s vv 'feature_assumevalid.py', 'example_test.py', 'wallet_txn_doublespend.py', 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', - 'invalidateblock.py', + 'rpc_invalidateblock.py', 'feature_rbf.py', ] @@ -474,7 +474,7 @@ class TestResult(): def check_script_prefixes(): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 24 + EXPECTED_VIOLATION_COUNT = 7 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming diff --git a/test/functional/uptime.py b/test/functional/uptime.py deleted file mode 100755 index 78236b2393..0000000000 --- a/test/functional/uptime.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the RPC call related to the uptime command. - -Test corresponds to code in rpc/server.cpp. -""" - -import time - -from test_framework.test_framework import BitcoinTestFramework - - -class UptimeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - self._test_uptime() - - def _test_uptime(self): - wait_time = 10 - self.nodes[0].setmocktime(int(time.time() + wait_time)) - assert(self.nodes[0].uptime() >= wait_time) - - -if __name__ == '__main__': - UptimeTest().main() -- cgit v1.2.3 From 3150b3fea732df23c6273f43102cc64783aaee9c Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:30 +1000 Subject: [tests] Rename misc functional tests. --- test/functional/bitcoin_cli.py | 65 ---- test/functional/getblocktemplate_longpoll.py | 71 ----- test/functional/httpbasics.py | 108 ------- test/functional/interface_bitcoin_cli.py | 65 ++++ test/functional/interface_http.py | 108 +++++++ test/functional/interface_rest.py | 326 +++++++++++++++++++++ test/functional/interface_zmq.py | 126 ++++++++ test/functional/mempool_resurrect.py | 68 +++++ test/functional/mempool_resurrect_test.py | 68 ----- test/functional/mempool_spend_coinbase.py | 53 ++++ test/functional/mempool_spendcoinbase.py | 53 ---- test/functional/mining.py | 135 --------- test/functional/mining_basic.py | 135 +++++++++ .../functional/mining_getblocktemplate_longpoll.py | 71 +++++ test/functional/mining_prioritisetransaction.py | 147 ++++++++++ test/functional/prioritise_transaction.py | 147 ---------- test/functional/rest.py | 326 --------------------- test/functional/test_runner.py | 20 +- test/functional/zmq_test.py | 126 -------- 19 files changed, 1109 insertions(+), 1109 deletions(-) delete mode 100755 test/functional/bitcoin_cli.py delete mode 100755 test/functional/getblocktemplate_longpoll.py delete mode 100755 test/functional/httpbasics.py create mode 100755 test/functional/interface_bitcoin_cli.py create mode 100755 test/functional/interface_http.py create mode 100755 test/functional/interface_rest.py create mode 100755 test/functional/interface_zmq.py create mode 100755 test/functional/mempool_resurrect.py delete mode 100755 test/functional/mempool_resurrect_test.py create mode 100755 test/functional/mempool_spend_coinbase.py delete mode 100755 test/functional/mempool_spendcoinbase.py delete mode 100755 test/functional/mining.py create mode 100755 test/functional/mining_basic.py create mode 100755 test/functional/mining_getblocktemplate_longpoll.py create mode 100755 test/functional/mining_prioritisetransaction.py delete mode 100755 test/functional/prioritise_transaction.py delete mode 100755 test/functional/rest.py delete mode 100755 test/functional/zmq_test.py (limited to 'test') diff --git a/test/functional/bitcoin_cli.py b/test/functional/bitcoin_cli.py deleted file mode 100755 index d8c80ab34f..0000000000 --- a/test/functional/bitcoin_cli.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test bitcoin-cli""" -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie - -class TestBitcoinCli(BitcoinTestFramework): - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def run_test(self): - """Main test logic""" - - self.log.info("Compare responses from gewalletinfo RPC and `bitcoin-cli getwalletinfo`") - cli_response = self.nodes[0].cli.getwalletinfo() - rpc_response = self.nodes[0].getwalletinfo() - assert_equal(cli_response, rpc_response) - - self.log.info("Compare responses from getblockchaininfo RPC and `bitcoin-cli getblockchaininfo`") - cli_response = self.nodes[0].cli.getblockchaininfo() - rpc_response = self.nodes[0].getblockchaininfo() - assert_equal(cli_response, rpc_response) - - user, password = get_auth_cookie(self.nodes[0].datadir) - - self.log.info("Test -stdinrpcpass option") - assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount()) - assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo) - - self.log.info("Test -stdin and -stdinrpcpass") - assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo()) - assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo) - - self.log.info("Make sure that -getinfo with arguments fails") - assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help) - - self.log.info("Compare responses from `bitcoin-cli -getinfo` and the RPCs data is retrieved from.") - cli_get_info = self.nodes[0].cli('-getinfo').send_cli() - wallet_info = self.nodes[0].getwalletinfo() - network_info = self.nodes[0].getnetworkinfo() - blockchain_info = self.nodes[0].getblockchaininfo() - - assert_equal(cli_get_info['version'], network_info['version']) - assert_equal(cli_get_info['protocolversion'], network_info['protocolversion']) - assert_equal(cli_get_info['walletversion'], wallet_info['walletversion']) - assert_equal(cli_get_info['balance'], wallet_info['balance']) - assert_equal(cli_get_info['blocks'], blockchain_info['blocks']) - assert_equal(cli_get_info['timeoffset'], network_info['timeoffset']) - assert_equal(cli_get_info['connections'], network_info['connections']) - assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy']) - assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty']) - assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test") - assert_equal(cli_get_info['balance'], wallet_info['balance']) - assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest']) - assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize']) - assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee']) - assert_equal(cli_get_info['relayfee'], network_info['relayfee']) - # unlocked_until is not tested because the wallet is not encrypted - -if __name__ == '__main__': - TestBitcoinCli().main() diff --git a/test/functional/getblocktemplate_longpoll.py b/test/functional/getblocktemplate_longpoll.py deleted file mode 100755 index 252ff4dbff..0000000000 --- a/test/functional/getblocktemplate_longpoll.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test longpolling with getblocktemplate.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -import threading - -class LongpollThread(threading.Thread): - def __init__(self, node): - threading.Thread.__init__(self) - # query current longpollid - templat = node.getblocktemplate() - self.longpollid = templat['longpollid'] - # create a new connection to the node, we can't use the same - # connection from two threads - self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir) - - def run(self): - self.node.getblocktemplate({'longpollid':self.longpollid}) - -class GetBlockTemplateLPTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - - def run_test(self): - self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") - self.nodes[0].generate(10) - templat = self.nodes[0].getblocktemplate() - longpollid = templat['longpollid'] - # longpollid should not change between successive invocations if nothing else happens - templat2 = self.nodes[0].getblocktemplate() - assert(templat2['longpollid'] == longpollid) - - # Test 1: test that the longpolling wait if we do nothing - thr = LongpollThread(self.nodes[0]) - thr.start() - # check that thread still lives - thr.join(5) # wait 5 seconds or until thread exits - assert(thr.is_alive()) - - # Test 2: test that longpoll will terminate if another node generates a block - self.nodes[1].generate(1) # generate a block on another node - # check that thread will exit now that new transaction entered mempool - thr.join(5) # wait 5 seconds or until thread exits - assert(not thr.is_alive()) - - # Test 3: test that longpoll will terminate if we generate a block ourselves - thr = LongpollThread(self.nodes[0]) - thr.start() - self.nodes[0].generate(1) # generate a block on another node - thr.join(5) # wait 5 seconds or until thread exits - assert(not thr.is_alive()) - - # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll - thr = LongpollThread(self.nodes[0]) - thr.start() - # generate a random transaction and submit it - min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] - # min_relay_fee is fee per 1000 bytes, which should be more than enough. - (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) - # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned - thr.join(60 + 20) - assert(not thr.is_alive()) - -if __name__ == '__main__': - GetBlockTemplateLPTest().main() - diff --git a/test/functional/httpbasics.py b/test/functional/httpbasics.py deleted file mode 100755 index cd6d744545..0000000000 --- a/test/functional/httpbasics.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the RPC HTTP basics.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -import http.client -import urllib.parse - -class HTTPBasicsTest (BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 3 - - def setup_network(self): - self.setup_nodes() - - def run_test(self): - - ################################################# - # lowlevel check for http persistent connection # - ################################################# - url = urllib.parse.urlparse(self.nodes[0].url) - authpair = url.username + ':' + url.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! - - #send 2nd request without closing connection - conn.request('POST', '/', '{"method": "getchaintips"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! - conn.close() - - #same should be if we add keep-alive because this should be the std. behaviour - headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! - - #send 2nd request without closing connection - conn.request('POST', '/', '{"method": "getchaintips"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! - conn.close() - - #now do the same with "Connection: close" - headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"} - - conn = http.client.HTTPConnection(url.hostname, url.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) - assert(conn.sock==None) #now the connection must be closed after the response - - #node1 (2nd node) is running with disabled keep-alive option - urlNode1 = urllib.parse.urlparse(self.nodes[1].url) - authpair = urlNode1.username + ':' + urlNode1.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} - - conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) - - #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on - urlNode2 = urllib.parse.urlparse(self.nodes[2].url) - authpair = urlNode2.username + ':' + urlNode2.password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} - - conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) - conn.connect() - conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) - out1 = conn.getresponse().read() - assert(b'"error":null' in out1) - assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default - - # Check excessive request size - conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) - conn.connect() - conn.request('GET', '/' + ('x'*1000), '', headers) - out1 = conn.getresponse() - assert_equal(out1.status, http.client.NOT_FOUND) - - conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) - conn.connect() - conn.request('GET', '/' + ('x'*10000), '', headers) - out1 = conn.getresponse() - assert_equal(out1.status, http.client.BAD_REQUEST) - - -if __name__ == '__main__': - HTTPBasicsTest ().main () diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py new file mode 100755 index 0000000000..d8c80ab34f --- /dev/null +++ b/test/functional/interface_bitcoin_cli.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test bitcoin-cli""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie + +class TestBitcoinCli(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + """Main test logic""" + + self.log.info("Compare responses from gewalletinfo RPC and `bitcoin-cli getwalletinfo`") + cli_response = self.nodes[0].cli.getwalletinfo() + rpc_response = self.nodes[0].getwalletinfo() + assert_equal(cli_response, rpc_response) + + self.log.info("Compare responses from getblockchaininfo RPC and `bitcoin-cli getblockchaininfo`") + cli_response = self.nodes[0].cli.getblockchaininfo() + rpc_response = self.nodes[0].getblockchaininfo() + assert_equal(cli_response, rpc_response) + + user, password = get_auth_cookie(self.nodes[0].datadir) + + self.log.info("Test -stdinrpcpass option") + assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount()) + assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo) + + self.log.info("Test -stdin and -stdinrpcpass") + assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo()) + assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo) + + self.log.info("Make sure that -getinfo with arguments fails") + assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help) + + self.log.info("Compare responses from `bitcoin-cli -getinfo` and the RPCs data is retrieved from.") + cli_get_info = self.nodes[0].cli('-getinfo').send_cli() + wallet_info = self.nodes[0].getwalletinfo() + network_info = self.nodes[0].getnetworkinfo() + blockchain_info = self.nodes[0].getblockchaininfo() + + assert_equal(cli_get_info['version'], network_info['version']) + assert_equal(cli_get_info['protocolversion'], network_info['protocolversion']) + assert_equal(cli_get_info['walletversion'], wallet_info['walletversion']) + assert_equal(cli_get_info['balance'], wallet_info['balance']) + assert_equal(cli_get_info['blocks'], blockchain_info['blocks']) + assert_equal(cli_get_info['timeoffset'], network_info['timeoffset']) + assert_equal(cli_get_info['connections'], network_info['connections']) + assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy']) + assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty']) + assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test") + assert_equal(cli_get_info['balance'], wallet_info['balance']) + assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest']) + assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize']) + assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee']) + assert_equal(cli_get_info['relayfee'], network_info['relayfee']) + # unlocked_until is not tested because the wallet is not encrypted + +if __name__ == '__main__': + TestBitcoinCli().main() diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py new file mode 100755 index 0000000000..cd6d744545 --- /dev/null +++ b/test/functional/interface_http.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the RPC HTTP basics.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +import http.client +import urllib.parse + +class HTTPBasicsTest (BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 3 + + def setup_network(self): + self.setup_nodes() + + def run_test(self): + + ################################################# + # lowlevel check for http persistent connection # + ################################################# + url = urllib.parse.urlparse(self.nodes[0].url) + authpair = url.username + ':' + url.password + headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) + assert(conn.sock!=None) #according to http/1.1 connection must still be open! + + #send 2nd request without closing connection + conn.request('POST', '/', '{"method": "getchaintips"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) #must also response with a correct json-rpc message + assert(conn.sock!=None) #according to http/1.1 connection must still be open! + conn.close() + + #same should be if we add keep-alive because this should be the std. behaviour + headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) + assert(conn.sock!=None) #according to http/1.1 connection must still be open! + + #send 2nd request without closing connection + conn.request('POST', '/', '{"method": "getchaintips"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) #must also response with a correct json-rpc message + assert(conn.sock!=None) #according to http/1.1 connection must still be open! + conn.close() + + #now do the same with "Connection: close" + headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"} + + conn = http.client.HTTPConnection(url.hostname, url.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) + assert(conn.sock==None) #now the connection must be closed after the response + + #node1 (2nd node) is running with disabled keep-alive option + urlNode1 = urllib.parse.urlparse(self.nodes[1].url) + authpair = urlNode1.username + ':' + urlNode1.password + headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + + conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) + + #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on + urlNode2 = urllib.parse.urlparse(self.nodes[2].url) + authpair = urlNode2.username + ':' + urlNode2.password + headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + + conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) + conn.connect() + conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) + out1 = conn.getresponse().read() + assert(b'"error":null' in out1) + assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default + + # Check excessive request size + conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) + conn.connect() + conn.request('GET', '/' + ('x'*1000), '', headers) + out1 = conn.getresponse() + assert_equal(out1.status, http.client.NOT_FOUND) + + conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) + conn.connect() + conn.request('GET', '/' + ('x'*10000), '', headers) + out1 = conn.getresponse() + assert_equal(out1.status, http.client.BAD_REQUEST) + + +if __name__ == '__main__': + HTTPBasicsTest ().main () diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py new file mode 100755 index 0000000000..9006e27cbe --- /dev/null +++ b/test/functional/interface_rest.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the REST API.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from struct import * +from io import BytesIO +from codecs import encode + +import http.client +import urllib.parse + +def deser_uint256(f): + r = 0 + for i in range(8): + t = unpack(b" 0) + + # Use invalidateblock to re-org back; all transactions should + # end up unconfirmed and back in the mempool + for node in self.nodes: + node.invalidateblock(blocks[0]) + + # mempool should be empty, all txns confirmed + assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) + for txid in spends1_id+spends2_id: + tx = self.nodes[0].gettransaction(txid) + assert(tx["confirmations"] == 0) + + # Generate another block, they should all get mined + self.nodes[0].generate(1) + # mempool should be empty, all txns confirmed + assert_equal(set(self.nodes[0].getrawmempool()), set()) + for txid in spends1_id+spends2_id: + tx = self.nodes[0].gettransaction(txid) + assert(tx["confirmations"] > 0) + + +if __name__ == '__main__': + MempoolCoinbaseTest().main() diff --git a/test/functional/mempool_resurrect_test.py b/test/functional/mempool_resurrect_test.py deleted file mode 100755 index 83e84da4bc..0000000000 --- a/test/functional/mempool_resurrect_test.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test resurrection of mined transactions when the blockchain is re-organized.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -# Create one-input, one-output, no-fee transaction: -class MempoolCoinbaseTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [["-checkmempool"]] - - def run_test(self): - node0_address = self.nodes[0].getnewaddress() - # Spend block 1/2/3's coinbase transactions - # Mine a block. - # Create three more transactions, spending the spends - # Mine another block. - # ... make sure all the transactions are confirmed - # Invalidate both blocks - # ... make sure all the transactions are put back in the mempool - # Mine a new block - # ... make sure all the transactions are confirmed again. - - b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ] - coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] - spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ] - spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ] - - blocks = [] - blocks.extend(self.nodes[0].generate(1)) - - spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ] - spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ] - - blocks.extend(self.nodes[0].generate(1)) - - # mempool should be empty, all txns confirmed - assert_equal(set(self.nodes[0].getrawmempool()), set()) - for txid in spends1_id+spends2_id: - tx = self.nodes[0].gettransaction(txid) - assert(tx["confirmations"] > 0) - - # Use invalidateblock to re-org back; all transactions should - # end up unconfirmed and back in the mempool - for node in self.nodes: - node.invalidateblock(blocks[0]) - - # mempool should be empty, all txns confirmed - assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) - for txid in spends1_id+spends2_id: - tx = self.nodes[0].gettransaction(txid) - assert(tx["confirmations"] == 0) - - # Generate another block, they should all get mined - self.nodes[0].generate(1) - # mempool should be empty, all txns confirmed - assert_equal(set(self.nodes[0].getrawmempool()), set()) - for txid in spends1_id+spends2_id: - tx = self.nodes[0].gettransaction(txid) - assert(tx["confirmations"] > 0) - - -if __name__ == '__main__': - MempoolCoinbaseTest().main() diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py new file mode 100755 index 0000000000..db0738c08a --- /dev/null +++ b/test/functional/mempool_spend_coinbase.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test spending coinbase transactions. + +The coinbase transaction in block N can appear in block +N+100... so is valid in the mempool when the best block +height is N+99. +This test makes sure coinbase spends that will be mature +in the next block are accepted into the memory pool, +but less mature coinbase spends are NOT. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +# Create one-input, one-output, no-fee transaction: +class MempoolSpendCoinbaseTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [["-checkmempool"]] + + def run_test(self): + chain_height = self.nodes[0].getblockcount() + assert_equal(chain_height, 200) + node0_address = self.nodes[0].getnewaddress() + + # Coinbase at height chain_height-100+1 ok in mempool, should + # get mined. Coinbase at height chain_height-100+2 is + # is too immature to spend. + b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ] + coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] + spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ] + + spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0]) + + # coinbase at height 102 should be too immature to spend + assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1]) + + # mempool should have just spend_101: + assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ]) + + # mine a block, spend_101 should get confirmed + self.nodes[0].generate(1) + assert_equal(set(self.nodes[0].getrawmempool()), set()) + + # ... and now height 102 can be spent: + spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1]) + assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ]) + +if __name__ == '__main__': + MempoolSpendCoinbaseTest().main() diff --git a/test/functional/mempool_spendcoinbase.py b/test/functional/mempool_spendcoinbase.py deleted file mode 100755 index db0738c08a..0000000000 --- a/test/functional/mempool_spendcoinbase.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test spending coinbase transactions. - -The coinbase transaction in block N can appear in block -N+100... so is valid in the mempool when the best block -height is N+99. -This test makes sure coinbase spends that will be mature -in the next block are accepted into the memory pool, -but less mature coinbase spends are NOT. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -# Create one-input, one-output, no-fee transaction: -class MempoolSpendCoinbaseTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [["-checkmempool"]] - - def run_test(self): - chain_height = self.nodes[0].getblockcount() - assert_equal(chain_height, 200) - node0_address = self.nodes[0].getnewaddress() - - # Coinbase at height chain_height-100+1 ok in mempool, should - # get mined. Coinbase at height chain_height-100+2 is - # is too immature to spend. - b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ] - coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] - spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ] - - spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0]) - - # coinbase at height 102 should be too immature to spend - assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1]) - - # mempool should have just spend_101: - assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ]) - - # mine a block, spend_101 should get confirmed - self.nodes[0].generate(1) - assert_equal(set(self.nodes[0].getrawmempool()), set()) - - # ... and now height 102 can be spent: - spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1]) - assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ]) - -if __name__ == '__main__': - MempoolSpendCoinbaseTest().main() diff --git a/test/functional/mining.py b/test/functional/mining.py deleted file mode 100755 index 569bf71933..0000000000 --- a/test/functional/mining.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test mining RPCs - -- getmininginfo -- getblocktemplate proposal mode -- submitblock""" - -import copy -from binascii import b2a_hex -from decimal import Decimal - -from test_framework.blocktools import create_coinbase -from test_framework.mininode import CBlock -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error - -def b2x(b): - return b2a_hex(b).decode('ascii') - -def assert_template(node, block, expect, rehash=True): - if rehash: - block.hashMerkleRoot = block.calc_merkle_root() - rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'}) - assert_equal(rsp, expect) - -class MiningTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.setup_clean_chain = False - - def run_test(self): - node = self.nodes[0] - - self.log.info('getmininginfo') - mining_info = node.getmininginfo() - assert_equal(mining_info['blocks'], 200) - assert_equal(mining_info['chain'], 'regtest') - assert_equal(mining_info['currentblocktx'], 0) - assert_equal(mining_info['currentblockweight'], 0) - assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) - assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) - assert_equal(mining_info['pooledtx'], 0) - - # Mine a block to leave initial block download - node.generate(1) - tmpl = node.getblocktemplate() - self.log.info("getblocktemplate: Test capability advertised") - assert 'proposal' in tmpl['capabilities'] - assert 'coinbasetxn' not in tmpl - - coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1) - # sequence numbers must not be max for nLockTime to have effect - coinbase_tx.vin[0].nSequence = 2 ** 32 - 2 - coinbase_tx.rehash() - - block = CBlock() - block.nVersion = tmpl["version"] - block.hashPrevBlock = int(tmpl["previousblockhash"], 16) - block.nTime = tmpl["curtime"] - block.nBits = int(tmpl["bits"], 16) - block.nNonce = 0 - block.vtx = [coinbase_tx] - - self.log.info("getblocktemplate: Test valid block") - assert_template(node, block, None) - - self.log.info("submitblock: Test block decode failure") - assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15])) - - self.log.info("getblocktemplate: Test bad input hash for coinbase transaction") - bad_block = copy.deepcopy(block) - bad_block.vtx[0].vin[0].prevout.hash += 1 - bad_block.vtx[0].rehash() - assert_template(node, bad_block, 'bad-cb-missing') - - self.log.info("submitblock: Test invalid coinbase transaction") - assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize())) - - self.log.info("getblocktemplate: Test truncated final transaction") - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'}) - - self.log.info("getblocktemplate: Test duplicate transaction") - bad_block = copy.deepcopy(block) - bad_block.vtx.append(bad_block.vtx[0]) - assert_template(node, bad_block, 'bad-txns-duplicate') - - self.log.info("getblocktemplate: Test invalid transaction") - bad_block = copy.deepcopy(block) - bad_tx = copy.deepcopy(bad_block.vtx[0]) - bad_tx.vin[0].prevout.hash = 255 - bad_tx.rehash() - bad_block.vtx.append(bad_tx) - assert_template(node, bad_block, 'bad-txns-inputs-missingorspent') - - self.log.info("getblocktemplate: Test nonfinal transaction") - bad_block = copy.deepcopy(block) - bad_block.vtx[0].nLockTime = 2 ** 32 - 1 - bad_block.vtx[0].rehash() - assert_template(node, bad_block, 'bad-txns-nonfinal') - - self.log.info("getblocktemplate: Test bad tx count") - # The tx count is immediately after the block header - TX_COUNT_OFFSET = 80 - bad_block_sn = bytearray(block.serialize()) - assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1) - bad_block_sn[TX_COUNT_OFFSET] += 1 - assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'}) - - self.log.info("getblocktemplate: Test bad bits") - bad_block = copy.deepcopy(block) - bad_block.nBits = 469762303 # impossible in the real world - assert_template(node, bad_block, 'bad-diffbits') - - self.log.info("getblocktemplate: Test bad merkle root") - bad_block = copy.deepcopy(block) - bad_block.hashMerkleRoot += 1 - assert_template(node, bad_block, 'bad-txnmrklroot', False) - - self.log.info("getblocktemplate: Test bad timestamps") - bad_block = copy.deepcopy(block) - bad_block.nTime = 2 ** 31 - 1 - assert_template(node, bad_block, 'time-too-new') - bad_block.nTime = 0 - assert_template(node, bad_block, 'time-too-old') - - self.log.info("getblocktemplate: Test not best block") - bad_block = copy.deepcopy(block) - bad_block.hashPrevBlock = 123 - assert_template(node, bad_block, 'inconclusive-not-best-prevblk') - -if __name__ == '__main__': - MiningTest().main() diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py new file mode 100755 index 0000000000..569bf71933 --- /dev/null +++ b/test/functional/mining_basic.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test mining RPCs + +- getmininginfo +- getblocktemplate proposal mode +- submitblock""" + +import copy +from binascii import b2a_hex +from decimal import Decimal + +from test_framework.blocktools import create_coinbase +from test_framework.mininode import CBlock +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error + +def b2x(b): + return b2a_hex(b).decode('ascii') + +def assert_template(node, block, expect, rehash=True): + if rehash: + block.hashMerkleRoot = block.calc_merkle_root() + rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'}) + assert_equal(rsp, expect) + +class MiningTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = False + + def run_test(self): + node = self.nodes[0] + + self.log.info('getmininginfo') + mining_info = node.getmininginfo() + assert_equal(mining_info['blocks'], 200) + assert_equal(mining_info['chain'], 'regtest') + assert_equal(mining_info['currentblocktx'], 0) + assert_equal(mining_info['currentblockweight'], 0) + assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) + assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) + assert_equal(mining_info['pooledtx'], 0) + + # Mine a block to leave initial block download + node.generate(1) + tmpl = node.getblocktemplate() + self.log.info("getblocktemplate: Test capability advertised") + assert 'proposal' in tmpl['capabilities'] + assert 'coinbasetxn' not in tmpl + + coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1) + # sequence numbers must not be max for nLockTime to have effect + coinbase_tx.vin[0].nSequence = 2 ** 32 - 2 + coinbase_tx.rehash() + + block = CBlock() + block.nVersion = tmpl["version"] + block.hashPrevBlock = int(tmpl["previousblockhash"], 16) + block.nTime = tmpl["curtime"] + block.nBits = int(tmpl["bits"], 16) + block.nNonce = 0 + block.vtx = [coinbase_tx] + + self.log.info("getblocktemplate: Test valid block") + assert_template(node, block, None) + + self.log.info("submitblock: Test block decode failure") + assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15])) + + self.log.info("getblocktemplate: Test bad input hash for coinbase transaction") + bad_block = copy.deepcopy(block) + bad_block.vtx[0].vin[0].prevout.hash += 1 + bad_block.vtx[0].rehash() + assert_template(node, bad_block, 'bad-cb-missing') + + self.log.info("submitblock: Test invalid coinbase transaction") + assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize())) + + self.log.info("getblocktemplate: Test truncated final transaction") + assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'}) + + self.log.info("getblocktemplate: Test duplicate transaction") + bad_block = copy.deepcopy(block) + bad_block.vtx.append(bad_block.vtx[0]) + assert_template(node, bad_block, 'bad-txns-duplicate') + + self.log.info("getblocktemplate: Test invalid transaction") + bad_block = copy.deepcopy(block) + bad_tx = copy.deepcopy(bad_block.vtx[0]) + bad_tx.vin[0].prevout.hash = 255 + bad_tx.rehash() + bad_block.vtx.append(bad_tx) + assert_template(node, bad_block, 'bad-txns-inputs-missingorspent') + + self.log.info("getblocktemplate: Test nonfinal transaction") + bad_block = copy.deepcopy(block) + bad_block.vtx[0].nLockTime = 2 ** 32 - 1 + bad_block.vtx[0].rehash() + assert_template(node, bad_block, 'bad-txns-nonfinal') + + self.log.info("getblocktemplate: Test bad tx count") + # The tx count is immediately after the block header + TX_COUNT_OFFSET = 80 + bad_block_sn = bytearray(block.serialize()) + assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1) + bad_block_sn[TX_COUNT_OFFSET] += 1 + assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'}) + + self.log.info("getblocktemplate: Test bad bits") + bad_block = copy.deepcopy(block) + bad_block.nBits = 469762303 # impossible in the real world + assert_template(node, bad_block, 'bad-diffbits') + + self.log.info("getblocktemplate: Test bad merkle root") + bad_block = copy.deepcopy(block) + bad_block.hashMerkleRoot += 1 + assert_template(node, bad_block, 'bad-txnmrklroot', False) + + self.log.info("getblocktemplate: Test bad timestamps") + bad_block = copy.deepcopy(block) + bad_block.nTime = 2 ** 31 - 1 + assert_template(node, bad_block, 'time-too-new') + bad_block.nTime = 0 + assert_template(node, bad_block, 'time-too-old') + + self.log.info("getblocktemplate: Test not best block") + bad_block = copy.deepcopy(block) + bad_block.hashPrevBlock = 123 + assert_template(node, bad_block, 'inconclusive-not-best-prevblk') + +if __name__ == '__main__': + MiningTest().main() diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py new file mode 100755 index 0000000000..252ff4dbff --- /dev/null +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test longpolling with getblocktemplate.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +import threading + +class LongpollThread(threading.Thread): + def __init__(self, node): + threading.Thread.__init__(self) + # query current longpollid + templat = node.getblocktemplate() + self.longpollid = templat['longpollid'] + # create a new connection to the node, we can't use the same + # connection from two threads + self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir) + + def run(self): + self.node.getblocktemplate({'longpollid':self.longpollid}) + +class GetBlockTemplateLPTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + + def run_test(self): + self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") + self.nodes[0].generate(10) + templat = self.nodes[0].getblocktemplate() + longpollid = templat['longpollid'] + # longpollid should not change between successive invocations if nothing else happens + templat2 = self.nodes[0].getblocktemplate() + assert(templat2['longpollid'] == longpollid) + + # Test 1: test that the longpolling wait if we do nothing + thr = LongpollThread(self.nodes[0]) + thr.start() + # check that thread still lives + thr.join(5) # wait 5 seconds or until thread exits + assert(thr.is_alive()) + + # Test 2: test that longpoll will terminate if another node generates a block + self.nodes[1].generate(1) # generate a block on another node + # check that thread will exit now that new transaction entered mempool + thr.join(5) # wait 5 seconds or until thread exits + assert(not thr.is_alive()) + + # Test 3: test that longpoll will terminate if we generate a block ourselves + thr = LongpollThread(self.nodes[0]) + thr.start() + self.nodes[0].generate(1) # generate a block on another node + thr.join(5) # wait 5 seconds or until thread exits + assert(not thr.is_alive()) + + # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll + thr = LongpollThread(self.nodes[0]) + thr.start() + # generate a random transaction and submit it + min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] + # min_relay_fee is fee per 1000 bytes, which should be more than enough. + (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) + # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned + thr.join(60 + 20) + assert(not thr.is_alive()) + +if __name__ == '__main__': + GetBlockTemplateLPTest().main() + diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py new file mode 100755 index 0000000000..57954ce321 --- /dev/null +++ b/test/functional/mining_prioritisetransaction.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the prioritisetransaction mining RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE + +class PrioritiseTransactionTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + self.extra_args = [["-printpriority=1"], ["-printpriority=1"]] + + def run_test(self): + # Test `prioritisetransaction` required parameters + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction) + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '') + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0) + + # Test `prioritisetransaction` invalid extra parameters + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0) + + # Test `prioritisetransaction` invalid `txid` + assert_raises_rpc_error(-1, "txid must be hexadecimal string", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0) + + # Test `prioritisetransaction` invalid `dummy` + txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' + assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0) + assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0) + + # Test `prioritisetransaction` invalid `fee_delta` + assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo') + + self.txouts = gen_return_txouts() + self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] + + utxo_count = 90 + utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count) + base_fee = self.relayfee*100 # our transactions are smaller than 100kb + txids = [] + + # Create 3 batches of transactions at 3 different fee rate levels + range_size = utxo_count // 3 + for i in range(3): + txids.append([]) + start_range = i * range_size + end_range = start_range + range_size + txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee) + + # Make sure that the size of each group of transactions exceeds + # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create + # more transactions. + mempool = self.nodes[0].getrawmempool(True) + sizes = [0, 0, 0] + for i in range(3): + for j in txids[i]: + assert(j in mempool) + sizes[i] += mempool[j]['size'] + assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count + + # add a fee delta to something in the cheapest bucket and make sure it gets mined + # also check that a different entry in the cheapest bucket is NOT mined + self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN)) + + self.nodes[0].generate(1) + + mempool = self.nodes[0].getrawmempool() + self.log.info("Assert that prioritised transaction was mined") + assert(txids[0][0] not in mempool) + assert(txids[0][1] in mempool) + + high_fee_tx = None + for x in txids[2]: + if x not in mempool: + high_fee_tx = x + + # Something high-fee should have been mined! + assert(high_fee_tx != None) + + # Add a prioritisation before a tx is in the mempool (de-prioritising a + # high-fee transaction so that it's now low fee). + self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN)) + + # Add everything back to mempool + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # Check to make sure our high fee rate tx is back in the mempool + mempool = self.nodes[0].getrawmempool() + assert(high_fee_tx in mempool) + + # Now verify the modified-high feerate transaction isn't mined before + # the other high fee transactions. Keep mining until our mempool has + # decreased by all the high fee size that we calculated above. + while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): + self.nodes[0].generate(1) + + # High fee transaction should not have been mined, but other high fee rate + # transactions should have been. + mempool = self.nodes[0].getrawmempool() + self.log.info("Assert that de-prioritised transaction is still in mempool") + assert(high_fee_tx in mempool) + for x in txids[2]: + if (x != high_fee_tx): + assert(x not in mempool) + + # Create a free transaction. Should be rejected. + utxo_list = self.nodes[0].listunspent() + assert(len(utxo_list) > 0) + utxo = utxo_list[0] + + inputs = [] + outputs = {} + inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) + outputs[self.nodes[0].getnewaddress()] = utxo["amount"] + raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) + tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] + tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"] + + # This will raise an exception due to min relay fee not being met + assert_raises_rpc_error(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex) + assert(tx_id not in self.nodes[0].getrawmempool()) + + # This is a less than 1000-byte transaction, so just set the fee + # to be the minimum for a 1000 byte transaction and check that it is + # accepted. + self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN)) + + self.log.info("Assert that prioritised free transaction is accepted to mempool") + assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id) + assert(tx_id in self.nodes[0].getrawmempool()) + + # Test that calling prioritisetransaction is sufficient to trigger + # getblocktemplate to (eventually) return a new block. + mock_time = int(time.time()) + self.nodes[0].setmocktime(mock_time) + template = self.nodes[0].getblocktemplate() + self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN)) + self.nodes[0].setmocktime(mock_time+10) + new_template = self.nodes[0].getblocktemplate() + + assert(template != new_template) + +if __name__ == '__main__': + PrioritiseTransactionTest().main() diff --git a/test/functional/prioritise_transaction.py b/test/functional/prioritise_transaction.py deleted file mode 100755 index 57954ce321..0000000000 --- a/test/functional/prioritise_transaction.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the prioritisetransaction mining RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE - -class PrioritiseTransactionTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [["-printpriority=1"], ["-printpriority=1"]] - - def run_test(self): - # Test `prioritisetransaction` required parameters - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction) - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '') - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0) - - # Test `prioritisetransaction` invalid extra parameters - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0) - - # Test `prioritisetransaction` invalid `txid` - assert_raises_rpc_error(-1, "txid must be hexadecimal string", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0) - - # Test `prioritisetransaction` invalid `dummy` - txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' - assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0) - assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0) - - # Test `prioritisetransaction` invalid `fee_delta` - assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo') - - self.txouts = gen_return_txouts() - self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] - - utxo_count = 90 - utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count) - base_fee = self.relayfee*100 # our transactions are smaller than 100kb - txids = [] - - # Create 3 batches of transactions at 3 different fee rate levels - range_size = utxo_count // 3 - for i in range(3): - txids.append([]) - start_range = i * range_size - end_range = start_range + range_size - txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee) - - # Make sure that the size of each group of transactions exceeds - # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create - # more transactions. - mempool = self.nodes[0].getrawmempool(True) - sizes = [0, 0, 0] - for i in range(3): - for j in txids[i]: - assert(j in mempool) - sizes[i] += mempool[j]['size'] - assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count - - # add a fee delta to something in the cheapest bucket and make sure it gets mined - # also check that a different entry in the cheapest bucket is NOT mined - self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN)) - - self.nodes[0].generate(1) - - mempool = self.nodes[0].getrawmempool() - self.log.info("Assert that prioritised transaction was mined") - assert(txids[0][0] not in mempool) - assert(txids[0][1] in mempool) - - high_fee_tx = None - for x in txids[2]: - if x not in mempool: - high_fee_tx = x - - # Something high-fee should have been mined! - assert(high_fee_tx != None) - - # Add a prioritisation before a tx is in the mempool (de-prioritising a - # high-fee transaction so that it's now low fee). - self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN)) - - # Add everything back to mempool - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Check to make sure our high fee rate tx is back in the mempool - mempool = self.nodes[0].getrawmempool() - assert(high_fee_tx in mempool) - - # Now verify the modified-high feerate transaction isn't mined before - # the other high fee transactions. Keep mining until our mempool has - # decreased by all the high fee size that we calculated above. - while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): - self.nodes[0].generate(1) - - # High fee transaction should not have been mined, but other high fee rate - # transactions should have been. - mempool = self.nodes[0].getrawmempool() - self.log.info("Assert that de-prioritised transaction is still in mempool") - assert(high_fee_tx in mempool) - for x in txids[2]: - if (x != high_fee_tx): - assert(x not in mempool) - - # Create a free transaction. Should be rejected. - utxo_list = self.nodes[0].listunspent() - assert(len(utxo_list) > 0) - utxo = utxo_list[0] - - inputs = [] - outputs = {} - inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) - outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) - tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] - tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"] - - # This will raise an exception due to min relay fee not being met - assert_raises_rpc_error(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex) - assert(tx_id not in self.nodes[0].getrawmempool()) - - # This is a less than 1000-byte transaction, so just set the fee - # to be the minimum for a 1000 byte transaction and check that it is - # accepted. - self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN)) - - self.log.info("Assert that prioritised free transaction is accepted to mempool") - assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id) - assert(tx_id in self.nodes[0].getrawmempool()) - - # Test that calling prioritisetransaction is sufficient to trigger - # getblocktemplate to (eventually) return a new block. - mock_time = int(time.time()) - self.nodes[0].setmocktime(mock_time) - template = self.nodes[0].getblocktemplate() - self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN)) - self.nodes[0].setmocktime(mock_time+10) - new_template = self.nodes[0].getblocktemplate() - - assert(template != new_template) - -if __name__ == '__main__': - PrioritiseTransactionTest().main() diff --git a/test/functional/rest.py b/test/functional/rest.py deleted file mode 100755 index 9006e27cbe..0000000000 --- a/test/functional/rest.py +++ /dev/null @@ -1,326 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the REST API.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from struct import * -from io import BytesIO -from codecs import encode - -import http.client -import urllib.parse - -def deser_uint256(f): - r = 0 - for i in range(8): - t = unpack(b" Date: Thu, 25 Jan 2018 09:44:30 +1000 Subject: [tests] Remove EXPECTED_VIOLATION_COUNT --- test/functional/test_runner.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'test') diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 1915406966..98944685e1 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -472,9 +472,8 @@ class TestResult(): def check_script_prefixes(): - """Check that no more than `EXPECTED_VIOLATION_COUNT` of the + """Check that at most a handful of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 0 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming @@ -484,13 +483,10 @@ def check_script_prefixes(): good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_") bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None] - if len(bad_script_names) < EXPECTED_VIOLATION_COUNT: - print("{}HURRAY!{} Number of functional tests violating naming convention reduced!".format(BOLD[1], BOLD[0])) - print("Consider reducing EXPECTED_VIOLATION_COUNT from %d to %d" % (EXPECTED_VIOLATION_COUNT, len(bad_script_names))) - elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT: - print("INFO: %d tests not meeting naming conventions (expected %d):" % (len(bad_script_names), EXPECTED_VIOLATION_COUNT)) + if len(bad_script_names) > 0: + print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names))) print(" %s" % ("\n ".join(sorted(bad_script_names)))) - assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + LEEWAY, "Too many tests not following naming convention! (%d found, expected: <= %d)" % (len(bad_script_names), EXPECTED_VIOLATION_COUNT) + assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY) def check_script_list(src_dir): -- cgit v1.2.3