From ca6523d0c8a44e0b9193367d1250a7d428d61be3 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Thu, 25 Jan 2018 09:44:29 +1000 Subject: [tests] Rename feature_* functional tests. --- test/functional/README.md | 2 +- test/functional/assumevalid.py | 204 ---- test/functional/bip65-cltv-p2p.py | 160 --- test/functional/bip68-112-113-p2p.py | 534 ---------- test/functional/bip68-sequence.py | 395 -------- test/functional/bip9-softforks.py | 283 ------ test/functional/bipdersig-p2p.py | 153 --- test/functional/conf_args.py | 49 - test/functional/dbcrash.py | 282 ------ test/functional/feature_assumevalid.py | 204 ++++ test/functional/feature_bip68_sequence.py | 395 ++++++++ test/functional/feature_bip9_softforks.py | 283 ++++++ test/functional/feature_block.py | 1293 ++++++++++++++++++++++++ test/functional/feature_cltv.py | 160 +++ test/functional/feature_config_args.py | 49 + test/functional/feature_csv_activation.py | 534 ++++++++++ test/functional/feature_dbcrash.py | 282 ++++++ test/functional/feature_dersig.py | 153 +++ test/functional/feature_fee_estimation.py | 262 +++++ test/functional/feature_maxuploadtarget.py | 169 ++++ test/functional/feature_minchainwork.py | 89 ++ test/functional/feature_notifications.py | 86 ++ test/functional/feature_nulldummy.py | 133 +++ test/functional/feature_proxy.py | 201 ++++ test/functional/feature_pruning.py | 454 +++++++++ test/functional/feature_rbf.py | 565 +++++++++++ test/functional/feature_reindex.py | 40 + test/functional/feature_segwit.py | 638 ++++++++++++ test/functional/feature_uacomment.py | 35 + test/functional/feature_versionbits_warning.py | 121 +++ test/functional/maxuploadtarget.py | 169 ---- test/functional/minchainwork.py | 89 -- test/functional/notifications.py | 86 -- test/functional/nulldummy.py | 133 --- test/functional/p2p-fullblocktest.py | 1293 ------------------------ test/functional/p2p-versionbits-warning.py | 121 --- test/functional/proxy_test.py | 201 ---- test/functional/pruning.py | 454 --------- test/functional/reindex.py | 40 - test/functional/replace-by-fee.py | 565 ----------- test/functional/segwit.py | 638 ------------ test/functional/smartfees.py | 262 ----- test/functional/test_runner.py | 44 +- test/functional/uacomment.py | 35 - 44 files changed, 6169 insertions(+), 6169 deletions(-) delete mode 100755 test/functional/assumevalid.py delete mode 100755 test/functional/bip65-cltv-p2p.py delete mode 100755 test/functional/bip68-112-113-p2p.py delete mode 100755 test/functional/bip68-sequence.py delete mode 100755 test/functional/bip9-softforks.py delete mode 100755 test/functional/bipdersig-p2p.py delete mode 100755 test/functional/conf_args.py delete mode 100755 test/functional/dbcrash.py create mode 100755 test/functional/feature_assumevalid.py create mode 100755 test/functional/feature_bip68_sequence.py create mode 100755 test/functional/feature_bip9_softforks.py create mode 100755 test/functional/feature_block.py create mode 100755 test/functional/feature_cltv.py create mode 100755 test/functional/feature_config_args.py create mode 100755 test/functional/feature_csv_activation.py create mode 100755 test/functional/feature_dbcrash.py create mode 100755 test/functional/feature_dersig.py create mode 100755 test/functional/feature_fee_estimation.py create mode 100755 test/functional/feature_maxuploadtarget.py create mode 100755 test/functional/feature_minchainwork.py create mode 100755 test/functional/feature_notifications.py create mode 100755 test/functional/feature_nulldummy.py create mode 100755 test/functional/feature_proxy.py create mode 100755 test/functional/feature_pruning.py create mode 100755 test/functional/feature_rbf.py create mode 100755 test/functional/feature_reindex.py create mode 100755 test/functional/feature_segwit.py create mode 100755 test/functional/feature_uacomment.py create mode 100755 test/functional/feature_versionbits_warning.py delete mode 100755 test/functional/maxuploadtarget.py delete mode 100755 test/functional/minchainwork.py delete mode 100755 test/functional/notifications.py delete mode 100755 test/functional/nulldummy.py delete mode 100755 test/functional/p2p-fullblocktest.py delete mode 100755 test/functional/p2p-versionbits-warning.py delete mode 100755 test/functional/proxy_test.py delete mode 100755 test/functional/pruning.py delete mode 100755 test/functional/reindex.py delete mode 100755 test/functional/replace-by-fee.py delete mode 100755 test/functional/segwit.py delete mode 100755 test/functional/smartfees.py delete mode 100755 test/functional/uacomment.py (limited to 'test/functional') diff --git a/test/functional/README.md b/test/functional/README.md index d6ce490ab3..b3d34c72fd 100644 --- a/test/functional/README.md +++ b/test/functional/README.md @@ -133,7 +133,7 @@ Each `TestInstance` consists of: acceptance is tested against the given outcome. - For examples of tests written in this framework, see - `invalidblockrequest.py` and `p2p-fullblocktest.py`. + `invalidblockrequest.py` and `feature_block.py`. ### test-framework modules diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py deleted file mode 100755 index 5a09142412..0000000000 --- a/test/functional/assumevalid.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for skipping signature validation on old blocks. - -Test logic for skipping signature validation on blocks which we've assumed -valid (https://github.com/bitcoin/bitcoin/pull/9484) - -We build a chain that includes and invalid signature for one of the -transactions: - - 0: genesis block - 1: block 1 with coinbase transaction output. - 2-101: bury that block with 100 blocks so the coinbase transaction - output can be spent - 102: a block containing a transaction spending the coinbase - transaction output. The transaction has an invalid signature. - 103-2202: bury the bad block with just over two weeks' worth of blocks - (2100 blocks) - -Start three nodes: - - - node0 has no -assumevalid parameter. Try to sync to block 2202. It will - reject block 102 and only sync as far as block 101 - - node1 has -assumevalid set to the hash of block 102. Try to sync to - block 2202. node1 will sync all the way to block 2202. - - node2 has -assumevalid set to the hash of block 102. Try to sync to - block 200. node2 will reject block 102 since it's assumed valid, but it - isn't buried by at least two weeks' work. -""" -import time - -from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.key import CECKey -from test_framework.mininode import (CBlockHeader, - COutPoint, - CTransaction, - CTxIn, - CTxOut, - network_thread_join, - network_thread_start, - P2PInterface, - msg_block, - msg_headers) -from test_framework.script import (CScript, OP_TRUE) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class BaseNode(P2PInterface): - def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() - headers_message.headers = [CBlockHeader(b) for b in new_blocks] - self.send_message(headers_message) - -class AssumeValidTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - def setup_network(self): - self.add_nodes(3) - # Start node0. We don't start the other nodes yet since - # we need to pre-mine a block with an invalid transaction - # signature so we can pass in the block hash as assumevalid. - self.start_node(0) - - def send_blocks_until_disconnected(self, p2p_conn): - """Keep sending blocks to the node until we're disconnected.""" - for i in range(len(self.blocks)): - if p2p_conn.state != "connected": - break - try: - p2p_conn.send_message(msg_block(self.blocks[i])) - except IOError as e: - assert str(e) == 'Not connected, no pushbuf' - break - - def assert_blockchain_height(self, node, height): - """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" - last_height = node.getblock(node.getbestblockhash())['height'] - timeout = 10 - while True: - time.sleep(0.25) - current_height = node.getblock(node.getbestblockhash())['height'] - if current_height != last_height: - last_height = current_height - if timeout < 0: - assert False, "blockchain too short after timeout: %d" % current_height - timeout - 0.25 - continue - elif current_height > height: - assert False, "blockchain too long: %d" % current_height - elif current_height == height: - break - - def run_test(self): - - # Connect to node0 - p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) - - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - # Build the blockchain - self.tip = int(self.nodes[0].getbestblockhash(), 16) - self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 - - self.blocks = [] - - # Get a pubkey for the coinbase TXO - coinbase_key = CECKey() - coinbase_key.set_secretbytes(b"horsebattery") - coinbase_pubkey = coinbase_key.get_pubkey() - - # Create the first block with a coinbase output to our key - height = 1 - block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) - self.blocks.append(block) - self.block_time += 1 - block.solve() - # Save the coinbase for later - self.block1 = block - self.tip = block.sha256 - height += 1 - - # Bury the block 100 deep so the coinbase output is spendable - for i in range(100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.solve() - self.blocks.append(block) - self.tip = block.sha256 - self.block_time += 1 - height += 1 - - # Create a transaction spending the coinbase output with an invalid (null) signature - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) - tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) - tx.calc_sha256() - - block102 = create_block(self.tip, create_coinbase(height), self.block_time) - self.block_time += 1 - block102.vtx.extend([tx]) - block102.hashMerkleRoot = block102.calc_merkle_root() - block102.rehash() - block102.solve() - self.blocks.append(block102) - self.tip = block102.sha256 - self.block_time += 1 - height += 1 - - # Bury the assumed valid block 2100 deep - for i in range(2100): - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.nVersion = 4 - block.solve() - self.blocks.append(block) - self.tip = block.sha256 - self.block_time += 1 - height += 1 - - # We're adding new connections so terminate the network thread - self.nodes[0].disconnect_p2ps() - network_thread_join() - - # Start node1 and node2 with assumevalid so they accept a block with a bad signature. - self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) - self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) - - p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) - p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) - p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) - - network_thread_start() - - p2p0.wait_for_verack() - p2p1.wait_for_verack() - p2p2.wait_for_verack() - - # send header lists to all three nodes - p2p0.send_header_for_blocks(self.blocks[0:2000]) - p2p0.send_header_for_blocks(self.blocks[2000:]) - p2p1.send_header_for_blocks(self.blocks[0:2000]) - p2p1.send_header_for_blocks(self.blocks[2000:]) - p2p2.send_header_for_blocks(self.blocks[0:200]) - - # Send blocks to node0. Block 102 will be rejected. - self.send_blocks_until_disconnected(p2p0) - self.assert_blockchain_height(self.nodes[0], 101) - - # Send all blocks to node1. All blocks will be accepted. - for i in range(2202): - p2p1.send_message(msg_block(self.blocks[i])) - # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. - p2p1.sync_with_ping(120) - assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) - - # Send blocks to node2. Block 102 will be rejected. - self.send_blocks_until_disconnected(p2p2) - self.assert_blockchain_height(self.nodes[2], 101) - -if __name__ == '__main__': - AssumeValidTest().main() diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py deleted file mode 100755 index f62ae31654..0000000000 --- a/test/functional/bip65-cltv-p2p.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP65 (CHECKLOCKTIMEVERIFY). - -Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height -1351. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum -from io import BytesIO - -CLTV_HEIGHT = 1351 - -# Reject codes that we might receive in this test -REJECT_INVALID = 16 -REJECT_OBSOLETE = 17 -REJECT_NONSTANDARD = 64 - -def cltv_invalidate(tx): - '''Modify the signature in vin 0 of the tx to fail CLTV - - Prepends -1 CLTV DROP in the scriptSig itself. - - TODO: test more ways that transactions using CLTV could be invalid (eg - locktime requirements fail, sequence time requirements fail, etc). - ''' - tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + - list(CScript(tx.vin[0].scriptSig))) - -def cltv_validate(node, tx, height): - '''Modify the signature in vin 0 of the tx to pass CLTV - Prepends CLTV DROP in the scriptSig, and sets - the locktime to height''' - tx.vin[0].nSequence = 0 - tx.nLockTime = height - - # Need to re-sign, since nSequence and nLockTime changed - signed_result = node.signrawtransaction(ToHex(tx)) - new_tx = CTransaction() - new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) - - new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + - list(CScript(new_tx.vin[0].scriptSig))) - return new_tx - -def create_transaction(node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) - return tx - -class BIP65Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # wait_for_verack ensures that the P2P connection is fully up. - self.nodes[0].p2p.wait_for_verack() - - self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) - self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) - self.nodeaddress = self.nodes[0].getnewaddress() - - self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block") - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], - self.nodeaddress, 1.0) - cltv_invalidate(spendtx) - spendtx.rehash() - - tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) - block.nVersion = 3 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - self.log.info("Test that blocks must now be at least version 4") - tip = block.sha256 - block_time += 1 - block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) - block.nVersion = 3 - block.solve() - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - del self.nodes[0].p2p.last_message["reject"] - - self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") - block.nVersion = 4 - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], - self.nodeaddress, 1.0) - cltv_invalidate(spendtx) - spendtx.rehash() - - # First we show that this tx is valid except for CLTV by getting it - # accepted to the mempool (which we can achieve with - # -promiscuousmempoolflags). - self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) - assert spendtx.hash in self.nodes[0].getrawmempool() - - # Now we verify that a block with this transaction is invalid. - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: - # Generic rejection when a block is invalid - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') - else: - assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason - - self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") - spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) - spendtx.rehash() - - block.vtx.pop(1) - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) - - -if __name__ == '__main__': - BIP65Test().main() diff --git a/test/functional/bip68-112-113-p2p.py b/test/functional/bip68-112-113-p2p.py deleted file mode 100755 index 82aa0ff891..0000000000 --- a/test/functional/bip68-112-113-p2p.py +++ /dev/null @@ -1,534 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test activation of the first version bits soft fork. - -This soft fork will activate the following BIPS: -BIP 68 - nSequence relative lock times -BIP 112 - CHECKSEQUENCEVERIFY -BIP 113 - MedianTimePast semantics for nLockTime - -regtest lock-in with 108/144 block signalling -activation after a further 144 blocks - -mine 82 blocks whose coinbases will be used to generate inputs for our tests -mine 61 blocks to transition from DEFINED to STARTED -mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period -mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN -mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572 -mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered -mine 1 block and test that enforcement has triggered (which triggers ACTIVE) -Test BIP 113 is enforced -Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height -Mine 1 block so next height is 581 and test BIP 68 now passes time but not height -Mine 1 block so next height is 582 and test BIP 68 now passes time and height -Test that BIP 112 is enforced - -Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates -And that after the soft fork activates transactions pass and fail as they should according to the rules. -For each BIP, transactions of versions 1 and 2 will be tested. ----------------- -BIP 113: -bip113tx - modify the nLocktime variable - -BIP 68: -bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below - -BIP 112: -bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP -bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP -bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP -bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP -bip112tx_special - test negative argument to OP_CSV -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import ToHex, CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import * -from io import BytesIO -import time - -base_relative_locktime = 10 -seq_disable_flag = 1<<31 -seq_random_high_bit = 1<<25 -seq_type_flag = 1<<22 -seq_random_low_bit = 1<<18 - -# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field -# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 -relative_locktimes = [] -for b31 in range(2): - b25times = [] - for b25 in range(2): - b22times = [] - for b22 in range(2): - b18times = [] - for b18 in range(2): - rlt = base_relative_locktime - if (b31): - rlt = rlt | seq_disable_flag - if (b25): - rlt = rlt | seq_random_high_bit - if (b22): - rlt = rlt | seq_type_flag - if (b18): - rlt = rlt | seq_random_low_bit - b18times.append(rlt) - b22times.append(b18times) - b25times.append(b22times) - relative_locktimes.append(b25times) - -def all_rlt_txs(txarray): - txs = [] - for b31 in range(2): - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - txs.append(txarray[b31][b25][b22][b18]) - return txs - -class BIP68_112_113Test(ComparisonTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] - - def run_test(self): - test = TestManager(self, self.options.tmpdir) - test.add_all_connections(self.nodes) - network_thread_start() - test.run() - - def send_generic_input_tx(self, node, coinbases): - amount = Decimal("49.99") - return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - return tx - - def sign_transaction(self, node, unsignedtx): - rawtx = ToHex(unsignedtx) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): - for i in range(number): - block = self.create_test_block([], version) - test_blocks.append([block, True]) - self.last_block_time += 600 - self.tip = block.sha256 - self.tipheight += 1 - return test_blocks - - def create_test_block(self, txs, version = 536870912): - block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) - block.nVersion = version - block.vtx.extend(txs) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - return block - - def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): - txs = [] - assert(len(bip68inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - tx.nVersion = txversion - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - b18txs.append(self.sign_transaction(self.nodes[0], tx)) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def create_bip112special(self, input, txversion): - tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - return signtx - - def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): - txs = [] - assert(len(bip112inputs) >= 16) - i = 0 - for b31 in range(2): - b25txs = [] - for b25 in range(2): - b22txs = [] - for b22 in range(2): - b18txs = [] - for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) - i += 1 - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed - tx.vin[0].nSequence = base_relative_locktime + locktime_delta - else: # vary nSequence instead, OP_CSV is fixed - tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta - tx.nVersion = txversion - signtx = self.sign_transaction(self.nodes[0], tx) - if (varyOP_CSV): - signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - else: - signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) - b18txs.append(signtx) - b22txs.append(b18txs) - b25txs.append(b22txs) - txs.append(b25txs) - return txs - - def get_tests(self): - long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future - self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time - self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs - self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time - self.tipheight = 82 # height of the next block to build - self.last_block_time = long_past_time - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.nodeaddress = self.nodes[0].getnewaddress() - - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') - test_blocks = self.generate_blocks(61, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 1 - # Advanced from DEFINED to STARTED, height = 143 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 2 - # Failed to advance past STARTED, height = 287 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') - - # 108 out of 144 signal bit 0 to achieve lock-in - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 3 - # Advanced from STARTED to LOCKED_IN, height = 431 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - - # 140 more version 4 blocks - test_blocks = self.generate_blocks(140, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 4 - - ### Inputs at height = 572 - # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) - # Note we reuse inputs for v1 and v2 txs so must test these separately - # 16 normal inputs - bip68inputs = [] - for i in range(16): - bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112basicinputs = [] - for j in range(2): - inputs = [] - for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - bip112basicinputs.append(inputs) - # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112diverseinputs = [] - for j in range(2): - inputs = [] - for i in range(16): - inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) - bip112diverseinputs.append(inputs) - # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) - bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) - # 1 normal input - bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) - - self.nodes[0].setmocktime(self.last_block_time + 600) - inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 - self.nodes[0].setmocktime(0) - self.tip = int("0x" + inputblockhash, 0) - self.tipheight += 1 - self.last_block_time += 600 - assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) - - # 2 more version 4 blocks - test_blocks = self.generate_blocks(2, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 5 - # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') - - # Test both version 1 and version 2 transactions for all tests - # BIP113 test transaction will be modified before each use to put in appropriate block time - bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) - bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE - bip113tx_v1.nVersion = 1 - bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) - bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE - bip113tx_v2.nVersion = 2 - - # For BIP68 test all 16 relative sequence locktimes - bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) - bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) - - # For BIP112 test: - # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) - bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) - # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs - bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) - bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) - # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) - bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) - # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs - bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) - bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) - # -1 OP_CSV OP_DROP input - bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) - bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) - - - ### TESTING ### - ################################## - ### Before Soft Forks Activate ### - ################################## - # All txs should pass - ### Version 1 txs ### - success_txs = [] - # add BIP113 tx and -1 CSV tx - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - success_txs.append(bip113signed1) - success_txs.append(bip112tx_special_v1) - # add BIP 68 txs - success_txs.extend(all_rlt_txs(bip68txs_v1)) - # add BIP 112 with seq=10 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) - # try BIP 112 with seq=9 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Version 2 txs ### - success_txs = [] - # add BIP113 tx and -1 CSV tx - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - success_txs.append(bip113signed2) - success_txs.append(bip112tx_special_v2) - # add BIP 68 txs - success_txs.extend(all_rlt_txs(bip68txs_v2)) - # add BIP 112 with seq=10 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) - # try BIP 112 with seq=9 txs - success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) - success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - - # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block - test_blocks = self.generate_blocks(1, 4) - yield TestInstance(test_blocks, sync_every_block=False) # 8 - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') - - - ################################# - ### After Soft Forks Activate ### - ################################# - ### BIP 113 ### - # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 - # BIP 113 tests should now pass if the locktime is < MTP - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block - bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) - for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Next block height = 580 after 4 blocks of random version - test_blocks = self.generate_blocks(4, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 13 - - ### BIP 68 ### - ### Version 1 txs ### - # All still pass - success_txs = [] - success_txs.extend(all_rlt_txs(bip68txs_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Version 2 txs ### - bip68success_txs = [] - # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 - bip68timetxs = [] - for b25 in range(2): - for b18 in range(2): - bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) - for tx in bip68timetxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 - bip68heighttxs = [] - for b25 in range(2): - for b18 in range(2): - bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) - for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 - - # Advance one block to 581 - test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 24 - - # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 - bip68success_txs.extend(bip68timetxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 - - # Advance one block to 582 - test_blocks = self.generate_blocks(1, 1234) - yield TestInstance(test_blocks, sync_every_block=False) # 30 - - # All BIP 68 txs should pass - bip68success_txs.extend(bip68heighttxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - - ### BIP 112 ### - ### Version 1 txs ### - # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) - success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) - fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) - - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 - - ### Version 2 txs ### - # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) - success_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV - success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 - - yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## - # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check - fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 - - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 - - # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail - fail_txs = [] - for b25 in range(2): - for b22 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 - - # If sequencelock types mismatch, tx should fail - fail_txs = [] - for b25 in range(2): - for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence - fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV - for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 - - # Remaining txs should pass, just test masking works properly - success_txs = [] - for b25 in range(2): - for b18 in range(2): - success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence - success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV - yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Additional test, of checking that comparison of two time types works properly - time_txs = [] - for b25 in range(2): - for b18 in range(2): - tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] - tx.vin[0].nSequence = base_relative_locktime | seq_type_flag - signtx = self.sign_transaction(self.nodes[0], tx) - time_txs.append(signtx) - yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - ### Missing aspects of test - ## Testing empty stack fails - - -if __name__ == '__main__': - BIP68_112_113Test().main() diff --git a/test/functional/bip68-sequence.py b/test/functional/bip68-sequence.py deleted file mode 100755 index 94b13653b9..0000000000 --- a/test/functional/bip68-sequence.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP68 implementation.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.blocktools import * - -SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) -SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) -SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift -SEQUENCE_LOCKTIME_MASK = 0x0000ffff - -# RPC error for non-BIP68 final transactions -NOT_FINAL_ERROR = "64: non-BIP68-final" - -class BIP68Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.extra_args = [[], ["-acceptnonstdtxn=0"]] - - def run_test(self): - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Generate some coins - self.nodes[0].generate(110) - - self.log.info("Running test disable flag") - self.test_disable_flag() - - self.log.info("Running test sequence-lock-confirmed-inputs") - self.test_sequence_lock_confirmed_inputs() - - self.log.info("Running test sequence-lock-unconfirmed-inputs") - self.test_sequence_lock_unconfirmed_inputs() - - self.log.info("Running test BIP68 not consensus before versionbits activation") - self.test_bip68_not_consensus() - - self.log.info("Activating BIP68 (and 112/113)") - self.activateCSV() - - self.log.info("Verifying nVersion=2 transactions are standard.") - self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).") - self.test_version2_relay() - - self.log.info("Passed") - - # Test that BIP68 is not in effect if tx version is 1, or if - # the first sequence bit is set. - def test_disable_flag(self): - # Create some unconfirmed inputs - new_addr = self.nodes[0].getnewaddress() - self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC - - utxos = self.nodes[0].listunspent(0, 0) - assert(len(utxos) > 0) - - utxo = utxos[0] - - tx1 = CTransaction() - value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN) - - # Check that the disable flag disables relative locktime. - # If sequence locks were used, this would require 1 block for the - # input to mature. - sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 - tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] - tx1.vout = [CTxOut(value, CScript([b'a']))] - - tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] - tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) - tx1_id = int(tx1_id, 16) - - # This transaction will enable sequence-locks, so this transaction should - # fail - tx2 = CTransaction() - tx2.nVersion = 2 - sequence_value = sequence_value & 0x7fffffff - tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] - tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] - tx2.rehash() - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) - - # Setting the version back down to 1 should disable the sequence lock, - # so this should be accepted. - tx2.nVersion = 1 - - self.nodes[0].sendrawtransaction(ToHex(tx2)) - - # Calculate the median time past of a prior block ("confirmations" before - # the current tip). - def get_median_time_past(self, confirmations): - block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations) - return self.nodes[0].getblockheader(block_hash)["mediantime"] - - # Test that sequence locks are respected for transactions spending confirmed inputs. - def test_sequence_lock_confirmed_inputs(self): - # Create lots of confirmed utxos, and use them to generate lots of random - # transactions. - max_outputs = 50 - addresses = [] - while len(addresses) < max_outputs: - addresses.append(self.nodes[0].getnewaddress()) - while len(self.nodes[0].listunspent()) < 200: - import random - random.shuffle(addresses) - num_outputs = random.randint(1, max_outputs) - outputs = {} - for i in range(num_outputs): - outputs[addresses[i]] = random.randint(1, 20)*0.01 - self.nodes[0].sendmany("", outputs) - self.nodes[0].generate(1) - - utxos = self.nodes[0].listunspent() - - # Try creating a lot of random transactions. - # Each time, choose a random number of inputs, and randomly set - # some of those inputs to be sequence locked (and randomly choose - # between height/time locking). Small random chance of making the locks - # all pass. - for i in range(400): - # Randomly choose up to 10 inputs - num_inputs = random.randint(1, 10) - random.shuffle(utxos) - - # Track whether any sequence locks used should fail - should_pass = True - - # Track whether this transaction was built with sequence locks - using_sequence_locks = False - - tx = CTransaction() - tx.nVersion = 2 - value = 0 - for j in range(num_inputs): - sequence_value = 0xfffffffe # this disables sequence locks - - # 50% chance we enable sequence locks - if random.randint(0,1): - using_sequence_locks = True - - # 10% of the time, make the input sequence value pass - input_will_pass = (random.randint(1,10) == 1) - sequence_value = utxos[j]["confirmations"] - if not input_will_pass: - sequence_value += 1 - should_pass = False - - # Figure out what the median-time-past was for the confirmed input - # Note that if an input has N confirmations, we're going back N blocks - # from the tip so that we're looking up MTP of the block - # PRIOR to the one the input appears in, as per the BIP68 spec. - orig_time = self.get_median_time_past(utxos[j]["confirmations"]) - cur_time = self.get_median_time_past(0) # MTP of the tip - - # can only timelock this input if it's not too old -- otherwise use height - can_time_lock = True - if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: - can_time_lock = False - - # if time-lockable, then 50% chance we make this a time lock - if random.randint(0,1) and can_time_lock: - # Find first time-lock value that fails, or latest one that succeeds - time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY - if input_will_pass and time_delta > cur_time - orig_time: - sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) - elif (not input_will_pass and time_delta <= cur_time - orig_time): - sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 - sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG - tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) - value += utxos[j]["amount"]*COIN - # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output - tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 - tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) - rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] - - if (using_sequence_locks and not should_pass): - # This transaction should be rejected - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) - else: - # This raw transaction should be accepted - self.nodes[0].sendrawtransaction(rawtx) - utxos = self.nodes[0].listunspent() - - # Test that sequence locks on unconfirmed inputs must have nSequence - # height or time of 0 to be accepted. - # Then test that BIP68-invalid transactions are removed from the mempool - # after a reorg. - def test_sequence_lock_unconfirmed_inputs(self): - # Store height so we can easily reset the chain at the end of the test - cur_height = self.nodes[0].getblockcount() - - # Create a mempool tx. - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) - tx1.rehash() - - # Anyone-can-spend mempool tx. - # Sequence lock of 0 should pass. - tx2 = CTransaction() - tx2.nVersion = 2 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] - tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) - tx2.rehash() - - self.nodes[0].sendrawtransaction(tx2_raw) - - # Create a spend of the 0th output of orig_tx with a sequence lock - # of 1, and test what happens when submitting. - # orig_tx.vout[0] must be an anyone-can-spend output - def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): - sequence_value = 1 - if not use_height_lock: - sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG - - tx = CTransaction() - tx.nVersion = 2 - tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] - tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] - tx.rehash() - - if (orig_tx.hash in node.getrawmempool()): - # sendrawtransaction should fail if the tx is in the mempool - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) - else: - # sendrawtransaction should succeed if the tx is not in the mempool - node.sendrawtransaction(ToHex(tx)) - - return tx - - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - - # Now mine some blocks, but make sure tx2 doesn't get mined. - # Use prioritisetransaction to lower the effective feerate to 0 - self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) - cur_time = int(time.time()) - for i in range(10): - self.nodes[0].setmocktime(cur_time + 600) - self.nodes[0].generate(1) - cur_time += 600 - - assert(tx2.hash in self.nodes[0].getrawmempool()) - - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) - test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - - # Mine tx2, and then try again - self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN)) - - # Advance the time on the node so that we can test timelocks - self.nodes[0].setmocktime(cur_time+600) - self.nodes[0].generate(1) - assert(tx2.hash not in self.nodes[0].getrawmempool()) - - # Now that tx2 is not in the mempool, a sequence locked spend should - # succeed - tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) - assert(tx3.hash in self.nodes[0].getrawmempool()) - - self.nodes[0].generate(1) - assert(tx3.hash not in self.nodes[0].getrawmempool()) - - # One more test, this time using height locks - tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) - assert(tx4.hash in self.nodes[0].getrawmempool()) - - # Now try combining confirmed and unconfirmed inputs - tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) - assert(tx5.hash not in self.nodes[0].getrawmempool()) - - utxos = self.nodes[0].listunspent() - tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) - tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) - raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) - - # Test mempool-BIP68 consistency after reorg - # - # State of the transactions in the last blocks: - # ... -> [ tx2 ] -> [ tx3 ] - # tip-1 tip - # And currently tx4 is in the mempool. - # - # If we invalidate the tip, tx3 should get added to the mempool, causing - # tx4 to be removed (fails sequence-lock). - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - assert(tx4.hash not in self.nodes[0].getrawmempool()) - assert(tx3.hash in self.nodes[0].getrawmempool()) - - # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in - # diagram above). - # This would cause tx2 to be added back to the mempool, which in turn causes - # tx3 to be removed. - tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) - height = self.nodes[0].getblockcount() - for i in range(2): - block = create_block(tip, create_coinbase(height), cur_time) - block.nVersion = 3 - block.rehash() - block.solve() - tip = block.sha256 - height += 1 - self.nodes[0].submitblock(ToHex(block)) - cur_time += 1 - - mempool = self.nodes[0].getrawmempool() - assert(tx3.hash not in mempool) - assert(tx2.hash in mempool) - - # Reset the chain and get rid of the mocktimed-blocks - self.nodes[0].setmocktime(0) - self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1)) - self.nodes[0].generate(10) - - # Make sure that BIP68 isn't being used to validate blocks, prior to - # versionbits activation. If more blocks are mined prior to this test - # being run, then it's possible the test has activated the soft fork, and - # this test should be moved to run earlier, or deleted. - def test_bip68_not_consensus(self): - assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) - tx1.rehash() - - # Make an anyone-can-spend transaction - tx2 = CTransaction() - tx2.nVersion = 1 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] - tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - - # sign tx2 - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) - tx2.rehash() - - self.nodes[0].sendrawtransaction(ToHex(tx2)) - - # Now make an invalid spend of tx2 according to BIP68 - sequence_value = 100 # 100 block relative locktime - - tx3 = CTransaction() - tx3.nVersion = 2 - tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] - tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - tx3.rehash() - - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) - - # make a block that violates bip68; ensure that the tip updates - tip = int(self.nodes[0].getbestblockhash(), 16) - block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) - block.nVersion = 3 - block.vtx.extend([tx1, tx2, tx3]) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - add_witness_commitment(block) - block.solve() - - self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - def activateCSV(self): - # activation should happen at block height 432 (3 periods) - # getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block. - min_activation_height = 432 - height = self.nodes[0].getblockcount() - assert_greater_than(min_activation_height - height, 2) - self.nodes[0].generate(min_activation_height - height - 2) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in") - self.nodes[0].generate(1) - assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active") - sync_blocks(self.nodes) - - # Use self.nodes[1] to test that version 2 transactions are standard. - def test_version2_relay(self): - inputs = [ ] - outputs = { self.nodes[1].getnewaddress() : 1.0 } - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] - tx = FromHex(CTransaction(), rawtxfund) - tx.nVersion = 2 - tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] - self.nodes[1].sendrawtransaction(tx_signed) - -if __name__ == '__main__': - BIP68Test().main() diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py deleted file mode 100755 index ae92e9f07c..0000000000 --- a/test/functional/bip9-softforks.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP 9 soft forks. - -Connect to a single node. -regtest lock-in with 108/144 block signalling -activation after a further 144 blocks -mine 2 block and save coinbases for later use -mine 141 blocks to transition from DEFINED to STARTED -mine 100 blocks signalling readiness and 44 not in order to fail to change state this period -mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN) -mine a further 143 blocks (LOCKED_IN) -test that enforcement has not triggered (which triggers ACTIVE) -test that enforcement has triggered -""" -from io import BytesIO -import shutil -import time -import itertools - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP - -class BIP9SoftForksTest(ComparisonTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.test = TestManager(self, self.options.tmpdir) - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.run() - - def create_transaction(self, node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(rawtx)) - tx.deserialize(f) - tx.nVersion = 2 - return tx - - def sign_transaction(self, node, tx): - signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize())) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - def generate_blocks(self, number, version, test_blocks = []): - for i in range(number): - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = version - block.rehash() - block.solve() - test_blocks.append([block, True]) - self.last_block_time += 1 - self.tip = block.sha256 - self.height += 1 - return test_blocks - - def get_bip9_status(self, key): - info = self.nodes[0].getblockchaininfo() - return info['bip9_softforks'][key] - - def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) - - # generate some coins for later - self.coinbase_blocks = self.nodes[0].generate(2) - self.height = 3 # height of the next block to build - self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) - self.nodeaddress = self.nodes[0].getnewaddress() - self.last_block_time = int(time.time()) - - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) - assert_equal(tmpl['vbrequired'], 0) - assert_equal(tmpl['version'], 0x20000000) - - # Test 1 - # Advance from DEFINED to STARTED - test_blocks = self.generate_blocks(141, 4) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) - assert_equal(tmpl['vbrequired'], 0) - assert(tmpl['version'] & activated_version) - - # Test 1-A - # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period - test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - - # Test 1-B - # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period - test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False) - - # Test 1-C - # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN - test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - - # Test 2 - # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) - assert_equal(tmpl['vbrequired'], 0) - assert(tmpl['version'] & activated_version) - - # Test 3 - # 108 out of 144 signal bit 1 to achieve LOCKED_IN - # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) - - # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN... - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - - # ...continue with Test 3 - test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - - # Test 4 - # 143 more version 536870913 blocks (waiting period-1) - test_blocks = self.generate_blocks(143, 4) - yield TestInstance(test_blocks, sync_every_block=False) - - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - - # Test 5 - # Check that the new rule is enforced - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[0], self.nodeaddress, 1.0) - invalidate(spendtx) - spendtx = self.sign_transaction(self.nodes[0], spendtx) - spendtx.rehash() - invalidatePostSignature(spendtx) - spendtx.rehash() - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = activated_version - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.last_block_time += 1 - self.tip = block.sha256 - self.height += 1 - yield TestInstance([[block, True]]) - - assert_equal(self.get_bip9_status(bipName)['status'], 'active') - assert_equal(self.get_bip9_status(bipName)['since'], 720) - tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) - assert_equal(tmpl['vbrequired'], 0) - assert(not (tmpl['version'] & (1 << bitno))) - - # Test 6 - # Check that the new sequence lock rules are enforced - spendtx = self.create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - invalidate(spendtx) - spendtx = self.sign_transaction(self.nodes[0], spendtx) - spendtx.rehash() - invalidatePostSignature(spendtx) - spendtx.rehash() - - block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) - block.nVersion = 5 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - self.last_block_time += 1 - yield TestInstance([[block, False]]) - - # Restart all - self.test.clear_all_connections() - self.stop_nodes() - self.nodes = [] - shutil.rmtree(self.options.tmpdir + "/node0") - self.setup_chain() - self.setup_network() - self.test.add_all_connections(self.nodes) - network_thread_start() - self.test.p2p_connections[0].wait_for_verack() - - def get_tests(self): - for test in itertools.chain( - self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) - ): - yield test - - def donothing(self, tx): - return - - def csv_invalidate(self, tx): - """Modify the signature in vin 0 of the tx to fail CSV - Prepends -1 CSV DROP in the scriptSig itself. - """ - tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + - list(CScript(tx.vin[0].scriptSig))) - - def sequence_lock_invalidate(self, tx): - """Modify the nSequence to make it fails once sequence lock rule is - activated (high timespan). - """ - tx.vin[0].nSequence = 0x00FFFFFF - tx.nLockTime = 0 - - def mtp_invalidate(self, tx): - """Modify the nLockTime to make it fails once MTP rule is activated.""" - # Disable Sequence lock, Activate nLockTime - tx.vin[0].nSequence = 0x90FFFFFF - tx.nLockTime = self.last_block_time - -if __name__ == '__main__': - BIP9SoftForksTest().main() diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py deleted file mode 100755 index 3414571678..0000000000 --- a/test/functional/bipdersig-p2p.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP66 (DER SIG). - -Test that the DERSIG soft-fork activates at (regtest) height 1251. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript -from io import BytesIO - -DERSIG_HEIGHT = 1251 - -# Reject codes that we might receive in this test -REJECT_INVALID = 16 -REJECT_OBSOLETE = 17 -REJECT_NONSTANDARD = 64 - -# A canonical signature consists of: -# <30> <02> <02> -def unDERify(tx): - """ - Make the signature in vin 0 of a tx non-DER-compliant, - by adding padding after the S-value. - """ - scriptSig = CScript(tx.vin[0].scriptSig) - newscript = [] - for i in scriptSig: - if (len(newscript) == 0): - newscript.append(i[0:-1] + b'\0' + i[-1:]) - else: - newscript.append(i) - tx.vin[0].scriptSig = CScript(newscript) - -def create_transaction(node, coinbase, to_address, amount): - from_txid = node.getblock(coinbase)['tx'][0] - inputs = [{ "txid" : from_txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) - return tx - -class BIP66Test(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] - self.setup_clean_chain = True - - def run_test(self): - self.nodes[0].add_p2p_connection(P2PInterface()) - - network_thread_start() - - # wait_for_verack ensures that the P2P connection is fully up. - self.nodes[0].p2p.wait_for_verack() - - self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) - self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) - self.nodeaddress = self.nodes[0].getnewaddress() - - self.log.info("Test that a transaction with non-DER signature can still appear in a block") - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], - self.nodeaddress, 1.0) - unDERify(spendtx) - spendtx.rehash() - - tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 - block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) - block.nVersion = 2 - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - self.log.info("Test that blocks must now be at least version 3") - tip = block.sha256 - block_time += 1 - block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) - block.nVersion = 2 - block.rehash() - block.solve() - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - del self.nodes[0].p2p.last_message["reject"] - - self.log.info("Test that transactions with non-DER signatures cannot appear in a block") - block.nVersion = 3 - - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], - self.nodeaddress, 1.0) - unDERify(spendtx) - spendtx.rehash() - - # First we show that this tx is valid except for DERSIG by getting it - # accepted to the mempool (which we can achieve with - # -promiscuousmempoolflags). - self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) - assert spendtx.hash in self.nodes[0].getrawmempool() - - # Now we verify that a block with this transaction is invalid. - block.vtx.append(spendtx) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - - wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) - with mininode_lock: - # We can receive different reject messages depending on whether - # bitcoind is running with multiple script check threads. If script - # check threads are not in use, then transaction script validation - # happens sequentially, and bitcoind produces more specific reject - # reasons. - assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] - assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) - if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: - # Generic rejection when a block is invalid - assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') - else: - assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason - - self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") - block.vtx[1] = create_transaction(self.nodes[0], - self.coinbase_blocks[1], self.nodeaddress, 1.0) - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - block.solve() - - self.nodes[0].p2p.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) - -if __name__ == '__main__': - BIP66Test().main() diff --git a/test/functional/conf_args.py b/test/functional/conf_args.py deleted file mode 100755 index 61abba8082..0000000000 --- a/test/functional/conf_args.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various command line arguments and configuration file parameters.""" - -import os - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import get_datadir_path - -class ConfArgsTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def run_test(self): - self.stop_node(0) - # Remove the -datadir argument so it doesn't override the config file - self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")] - - default_data_dir = get_datadir_path(self.options.tmpdir, 0) - new_data_dir = os.path.join(default_data_dir, 'newdatadir') - new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') - - # Check that using -datadir argument on non-existent directory fails - self.nodes[0].datadir = new_data_dir - self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') - - # Check that using non-existent datadir in conf file fails - conf_file = os.path.join(default_data_dir, "bitcoin.conf") - with open(conf_file, 'a', encoding='utf8') as f: - f.write("datadir=" + new_data_dir + "\n") - self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') - - # Create the directory and ensure the config file now works - os.mkdir(new_data_dir) - self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) - self.stop_node(0) - assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1')) - - # Ensure command line argument overrides datadir in conf - os.mkdir(new_data_dir_2) - self.nodes[0].datadir = new_data_dir_2 - self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) - assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2')) - -if __name__ == '__main__': - ConfArgsTest().main() diff --git a/test/functional/dbcrash.py b/test/functional/dbcrash.py deleted file mode 100755 index 24b9765b4e..0000000000 --- a/test/functional/dbcrash.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test recovery from a crash during chainstate writing. - -- 4 nodes - * node0, node1, and node2 will have different dbcrash ratios, and different - dbcache sizes - * node3 will be a regular node, with no crashing. - * The nodes will not connect to each other. - -- use default test framework starting chain. initialize starting_tip_height to - tip height. - -- Main loop: - * generate lots of transactions on node3, enough to fill up a block. - * uniformly randomly pick a tip height from starting_tip_height to - tip_height; with probability 1/(height_difference+4), invalidate this block. - * mine enough blocks to overtake tip_height at start of loop. - * for each node in [node0,node1,node2]: - - for each mined block: - * submit block to node - * if node crashed on/after submitting: - - restart until recovery succeeds - - check that utxo matches node3 using gettxoutsetinfo""" - -import errno -import http.client -import random -import sys -import time - -from test_framework.mininode import * -from test_framework.script import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest] -try: - HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected) -except AttributeError: - pass - -class ChainstateWriteCrashTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = False - - # Set -maxmempool=0 to turn off mempool memory sharing with dbcache - # Set -rpcservertimeout=900 to reduce socket disconnects in this - # long-running test - self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"] - - # Set different crash ratios and cache sizes. Note that not all of - # -dbcache goes to pcoinsTip. - self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args - self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args - self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args - - # Node3 is a normal node with default args, except will mine full blocks - self.node3_args = ["-blockmaxweight=4000000"] - self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] - - def setup_network(self): - # Need a bit of extra time for the nodes to start up for this test - self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90) - self.start_nodes() - # Leave them unconnected, we'll use submitblock directly in this test - - def restart_node(self, node_index, expected_tip): - """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. - - Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up - after 60 seconds. Returns the utxo hash of the given node.""" - - time_start = time.time() - while time.time() - time_start < 120: - try: - # Any of these RPC calls could throw due to node crash - self.start_node(node_index) - self.nodes[node_index].waitforblock(expected_tip) - utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2'] - return utxo_hash - except: - # An exception here should mean the node is about to crash. - # If bitcoind exits, then try again. wait_for_node_exit() - # should raise an exception if bitcoind doesn't exit. - self.wait_for_node_exit(node_index, timeout=10) - self.crashed_on_restart += 1 - time.sleep(1) - - # If we got here, bitcoind isn't coming back up on restart. Could be a - # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- - # perhaps we generated a test case that blew up our cache? - # TODO: If this happens a lot, we should try to restart without -dbcrashratio - # and make sure that recovery happens. - raise AssertionError("Unable to successfully restart node %d in allotted time", node_index) - - def submit_block_catch_error(self, node_index, block): - """Try submitting a block to the given node. - - Catch any exceptions that indicate the node has crashed. - Returns true if the block was submitted successfully; false otherwise.""" - - try: - self.nodes[node_index].submitblock(block) - return True - except http.client.BadStatusLine as e: - # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. - if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": - self.log.debug("node %d submitblock raised exception: %s", node_index, e) - return False - else: - raise - except tuple(HTTP_DISCONNECT_ERRORS) as e: - self.log.debug("node %d submitblock raised exception: %s", node_index, e) - return False - except OSError as e: - self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) - if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: - # The node has likely crashed - return False - else: - # Unexpected exception, raise - raise - - def sync_node3blocks(self, block_hashes): - """Use submitblock to sync node3's chain with the other nodes - - If submitblock fails, restart the node and get the new utxo hash. - If any nodes crash while updating, we'll compare utxo hashes to - ensure recovery was successful.""" - - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] - - # Retrieve all the blocks from node3 - blocks = [] - for block_hash in block_hashes: - blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)]) - - # Deliver each block to each other node - for i in range(3): - nodei_utxo_hash = None - self.log.debug("Syncing blocks to node %d", i) - for (block_hash, block) in blocks: - # Get the block from node3, and submit to node_i - self.log.debug("submitting block %s", block_hash) - if not self.submit_block_catch_error(i, block): - # TODO: more carefully check that the crash is due to -dbcrashratio - # (change the exit code perhaps, and check that here?) - self.wait_for_node_exit(i, timeout=30) - self.log.debug("Restarting node %d after block hash %s", i, block_hash) - nodei_utxo_hash = self.restart_node(i, block_hash) - assert nodei_utxo_hash is not None - self.restart_counts[i] += 1 - else: - # Clear it out after successful submitblock calls -- the cached - # utxo hash will no longer be correct - nodei_utxo_hash = None - - # Check that the utxo hash matches node3's utxo set - # NOTE: we only check the utxo set if we had to restart the node - # after the last block submitted: - # - checking the utxo hash causes a cache flush, which we don't - # want to do every time; so - # - we only update the utxo cache after a node restart, since flushing - # the cache is a no-op at that point - if nodei_utxo_hash is not None: - self.log.debug("Checking txoutsetinfo matches for node %d", i) - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def verify_utxo_hash(self): - """Verify that the utxo hash of each node matches node3. - - Restart any nodes that crash while querying.""" - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] - self.log.info("Verifying utxo hash matches for all nodes") - - for i in range(3): - try: - nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2'] - except OSError: - # probably a crash on db flushing - nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def generate_small_transactions(self, node, count, utxo_list): - FEE = 1000 # TODO: replace this with node relay fee based calculation - num_transactions = 0 - random.shuffle(utxo_list) - while len(utxo_list) >= 2 and num_transactions < count: - tx = CTransaction() - input_amount = 0 - for i in range(2): - utxo = utxo_list.pop() - tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) - input_amount += int(utxo['amount'] * COIN) - output_amount = (input_amount - FEE) // 3 - - if output_amount <= 0: - # Sanity check -- if we chose inputs that are too small, skip - continue - - for i in range(3): - tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) - - # Sign and send the transaction to get into the mempool - tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] - node.sendrawtransaction(tx_signed_hex) - num_transactions += 1 - - def run_test(self): - # Track test coverage statistics - self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 - self.crashed_on_restart = 0 # Track count of crashes during recovery - - # Start by creating a lot of utxos on node3 - initial_height = self.nodes[3].getblockcount() - utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000) - self.log.info("Prepped %d utxo entries", len(utxo_list)) - - # Sync these blocks with the other nodes - block_hashes_to_sync = [] - for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): - block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - - self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync)) - # Syncing the blocks could cause nodes to crash, so the test begins here. - self.sync_node3blocks(block_hashes_to_sync) - - starting_tip_height = self.nodes[3].getblockcount() - - # Main test loop: - # each time through the loop, generate a bunch of transactions, - # and then either mine a single new block on the tip, or some-sized reorg. - for i in range(40): - self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts) - # Generate a bunch of small-ish transactions - self.generate_small_transactions(self.nodes[3], 2500, utxo_list) - # Pick a random block between current tip, and starting tip - current_height = self.nodes[3].getblockcount() - random_height = random.randint(starting_tip_height, current_height) - self.log.debug("At height %d, considering height %d", current_height, random_height) - if random_height > starting_tip_height: - # Randomly reorg from this point with some probability (1/4 for - # tip, 1/5 for tip-1, ...) - if random.random() < 1.0 / (current_height + 4 - random_height): - self.log.debug("Invalidating block at height %d", random_height) - self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) - - # Now generate new blocks until we pass the old tip height - self.log.debug("Mining longer tip") - block_hashes = [] - while current_height + 1 > self.nodes[3].getblockcount(): - block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount()))) - self.log.debug("Syncing %d new blocks...", len(block_hashes)) - self.sync_node3blocks(block_hashes) - utxo_list = self.nodes[3].listunspent() - self.log.debug("Node3 utxo count: %d", len(utxo_list)) - - # Check that the utxo hashes agree with node3 - # Useful side effect: each utxo cache gets flushed here, so that we - # won't get crashes on shutdown at the end of the test. - self.verify_utxo_hash() - - # Check the test coverage - self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart) - - # If no nodes were restarted, we didn't test anything. - assert self.restart_counts != [0, 0, 0] - - # Make sure we tested the case of crash-during-recovery. - assert self.crashed_on_restart > 0 - - # Warn if any of the nodes escaped restart. - for i in range(3): - if self.restart_counts[i] == 0: - self.log.warn("Node %d never crashed during utxo flush!", i) - -if __name__ == "__main__": - ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py new file mode 100755 index 0000000000..5a09142412 --- /dev/null +++ b/test/functional/feature_assumevalid.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test logic for skipping signature validation on old blocks. + +Test logic for skipping signature validation on blocks which we've assumed +valid (https://github.com/bitcoin/bitcoin/pull/9484) + +We build a chain that includes and invalid signature for one of the +transactions: + + 0: genesis block + 1: block 1 with coinbase transaction output. + 2-101: bury that block with 100 blocks so the coinbase transaction + output can be spent + 102: a block containing a transaction spending the coinbase + transaction output. The transaction has an invalid signature. + 103-2202: bury the bad block with just over two weeks' worth of blocks + (2100 blocks) + +Start three nodes: + + - node0 has no -assumevalid parameter. Try to sync to block 2202. It will + reject block 102 and only sync as far as block 101 + - node1 has -assumevalid set to the hash of block 102. Try to sync to + block 2202. node1 will sync all the way to block 2202. + - node2 has -assumevalid set to the hash of block 102. Try to sync to + block 200. node2 will reject block 102 since it's assumed valid, but it + isn't buried by at least two weeks' work. +""" +import time + +from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.key import CECKey +from test_framework.mininode import (CBlockHeader, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + network_thread_join, + network_thread_start, + P2PInterface, + msg_block, + msg_headers) +from test_framework.script import (CScript, OP_TRUE) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class BaseNode(P2PInterface): + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [CBlockHeader(b) for b in new_blocks] + self.send_message(headers_message) + +class AssumeValidTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + def setup_network(self): + self.add_nodes(3) + # Start node0. We don't start the other nodes yet since + # we need to pre-mine a block with an invalid transaction + # signature so we can pass in the block hash as assumevalid. + self.start_node(0) + + def send_blocks_until_disconnected(self, p2p_conn): + """Keep sending blocks to the node until we're disconnected.""" + for i in range(len(self.blocks)): + if p2p_conn.state != "connected": + break + try: + p2p_conn.send_message(msg_block(self.blocks[i])) + except IOError as e: + assert str(e) == 'Not connected, no pushbuf' + break + + def assert_blockchain_height(self, node, height): + """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" + last_height = node.getblock(node.getbestblockhash())['height'] + timeout = 10 + while True: + time.sleep(0.25) + current_height = node.getblock(node.getbestblockhash())['height'] + if current_height != last_height: + last_height = current_height + if timeout < 0: + assert False, "blockchain too short after timeout: %d" % current_height + timeout - 0.25 + continue + elif current_height > height: + assert False, "blockchain too long: %d" % current_height + elif current_height == height: + break + + def run_test(self): + + # Connect to node0 + p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) + + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + # Build the blockchain + self.tip = int(self.nodes[0].getbestblockhash(), 16) + self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 + + self.blocks = [] + + # Get a pubkey for the coinbase TXO + coinbase_key = CECKey() + coinbase_key.set_secretbytes(b"horsebattery") + coinbase_pubkey = coinbase_key.get_pubkey() + + # Create the first block with a coinbase output to our key + height = 1 + block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) + self.blocks.append(block) + self.block_time += 1 + block.solve() + # Save the coinbase for later + self.block1 = block + self.tip = block.sha256 + height += 1 + + # Bury the block 100 deep so the coinbase output is spendable + for i in range(100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # Create a transaction spending the coinbase output with an invalid (null) signature + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) + tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) + tx.calc_sha256() + + block102 = create_block(self.tip, create_coinbase(height), self.block_time) + self.block_time += 1 + block102.vtx.extend([tx]) + block102.hashMerkleRoot = block102.calc_merkle_root() + block102.rehash() + block102.solve() + self.blocks.append(block102) + self.tip = block102.sha256 + self.block_time += 1 + height += 1 + + # Bury the assumed valid block 2100 deep + for i in range(2100): + block = create_block(self.tip, create_coinbase(height), self.block_time) + block.nVersion = 4 + block.solve() + self.blocks.append(block) + self.tip = block.sha256 + self.block_time += 1 + height += 1 + + # We're adding new connections so terminate the network thread + self.nodes[0].disconnect_p2ps() + network_thread_join() + + # Start node1 and node2 with assumevalid so they accept a block with a bad signature. + self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) + self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) + + p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) + p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) + p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) + + network_thread_start() + + p2p0.wait_for_verack() + p2p1.wait_for_verack() + p2p2.wait_for_verack() + + # send header lists to all three nodes + p2p0.send_header_for_blocks(self.blocks[0:2000]) + p2p0.send_header_for_blocks(self.blocks[2000:]) + p2p1.send_header_for_blocks(self.blocks[0:2000]) + p2p1.send_header_for_blocks(self.blocks[2000:]) + p2p2.send_header_for_blocks(self.blocks[0:200]) + + # Send blocks to node0. Block 102 will be rejected. + self.send_blocks_until_disconnected(p2p0) + self.assert_blockchain_height(self.nodes[0], 101) + + # Send all blocks to node1. All blocks will be accepted. + for i in range(2202): + p2p1.send_message(msg_block(self.blocks[i])) + # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. + p2p1.sync_with_ping(120) + assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) + + # Send blocks to node2. Block 102 will be rejected. + self.send_blocks_until_disconnected(p2p2) + self.assert_blockchain_height(self.nodes[2], 101) + +if __name__ == '__main__': + AssumeValidTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py new file mode 100755 index 0000000000..94b13653b9 --- /dev/null +++ b/test/functional/feature_bip68_sequence.py @@ -0,0 +1,395 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP68 implementation.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.blocktools import * + +SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) +SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) +SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift +SEQUENCE_LOCKTIME_MASK = 0x0000ffff + +# RPC error for non-BIP68 final transactions +NOT_FINAL_ERROR = "64: non-BIP68-final" + +class BIP68Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [[], ["-acceptnonstdtxn=0"]] + + def run_test(self): + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + + # Generate some coins + self.nodes[0].generate(110) + + self.log.info("Running test disable flag") + self.test_disable_flag() + + self.log.info("Running test sequence-lock-confirmed-inputs") + self.test_sequence_lock_confirmed_inputs() + + self.log.info("Running test sequence-lock-unconfirmed-inputs") + self.test_sequence_lock_unconfirmed_inputs() + + self.log.info("Running test BIP68 not consensus before versionbits activation") + self.test_bip68_not_consensus() + + self.log.info("Activating BIP68 (and 112/113)") + self.activateCSV() + + self.log.info("Verifying nVersion=2 transactions are standard.") + self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).") + self.test_version2_relay() + + self.log.info("Passed") + + # Test that BIP68 is not in effect if tx version is 1, or if + # the first sequence bit is set. + def test_disable_flag(self): + # Create some unconfirmed inputs + new_addr = self.nodes[0].getnewaddress() + self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC + + utxos = self.nodes[0].listunspent(0, 0) + assert(len(utxos) > 0) + + utxo = utxos[0] + + tx1 = CTransaction() + value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN) + + # Check that the disable flag disables relative locktime. + # If sequence locks were used, this would require 1 block for the + # input to mature. + sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 + tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] + tx1.vout = [CTxOut(value, CScript([b'a']))] + + tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] + tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) + tx1_id = int(tx1_id, 16) + + # This transaction will enable sequence-locks, so this transaction should + # fail + tx2 = CTransaction() + tx2.nVersion = 2 + sequence_value = sequence_value & 0x7fffffff + tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] + tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] + tx2.rehash() + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) + + # Setting the version back down to 1 should disable the sequence lock, + # so this should be accepted. + tx2.nVersion = 1 + + self.nodes[0].sendrawtransaction(ToHex(tx2)) + + # Calculate the median time past of a prior block ("confirmations" before + # the current tip). + def get_median_time_past(self, confirmations): + block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations) + return self.nodes[0].getblockheader(block_hash)["mediantime"] + + # Test that sequence locks are respected for transactions spending confirmed inputs. + def test_sequence_lock_confirmed_inputs(self): + # Create lots of confirmed utxos, and use them to generate lots of random + # transactions. + max_outputs = 50 + addresses = [] + while len(addresses) < max_outputs: + addresses.append(self.nodes[0].getnewaddress()) + while len(self.nodes[0].listunspent()) < 200: + import random + random.shuffle(addresses) + num_outputs = random.randint(1, max_outputs) + outputs = {} + for i in range(num_outputs): + outputs[addresses[i]] = random.randint(1, 20)*0.01 + self.nodes[0].sendmany("", outputs) + self.nodes[0].generate(1) + + utxos = self.nodes[0].listunspent() + + # Try creating a lot of random transactions. + # Each time, choose a random number of inputs, and randomly set + # some of those inputs to be sequence locked (and randomly choose + # between height/time locking). Small random chance of making the locks + # all pass. + for i in range(400): + # Randomly choose up to 10 inputs + num_inputs = random.randint(1, 10) + random.shuffle(utxos) + + # Track whether any sequence locks used should fail + should_pass = True + + # Track whether this transaction was built with sequence locks + using_sequence_locks = False + + tx = CTransaction() + tx.nVersion = 2 + value = 0 + for j in range(num_inputs): + sequence_value = 0xfffffffe # this disables sequence locks + + # 50% chance we enable sequence locks + if random.randint(0,1): + using_sequence_locks = True + + # 10% of the time, make the input sequence value pass + input_will_pass = (random.randint(1,10) == 1) + sequence_value = utxos[j]["confirmations"] + if not input_will_pass: + sequence_value += 1 + should_pass = False + + # Figure out what the median-time-past was for the confirmed input + # Note that if an input has N confirmations, we're going back N blocks + # from the tip so that we're looking up MTP of the block + # PRIOR to the one the input appears in, as per the BIP68 spec. + orig_time = self.get_median_time_past(utxos[j]["confirmations"]) + cur_time = self.get_median_time_past(0) # MTP of the tip + + # can only timelock this input if it's not too old -- otherwise use height + can_time_lock = True + if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: + can_time_lock = False + + # if time-lockable, then 50% chance we make this a time lock + if random.randint(0,1) and can_time_lock: + # Find first time-lock value that fails, or latest one that succeeds + time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY + if input_will_pass and time_delta > cur_time - orig_time: + sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + elif (not input_will_pass and time_delta <= cur_time - orig_time): + sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 + sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG + tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) + value += utxos[j]["amount"]*COIN + # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output + tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 + tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) + rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] + + if (using_sequence_locks and not should_pass): + # This transaction should be rejected + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) + else: + # This raw transaction should be accepted + self.nodes[0].sendrawtransaction(rawtx) + utxos = self.nodes[0].listunspent() + + # Test that sequence locks on unconfirmed inputs must have nSequence + # height or time of 0 to be accepted. + # Then test that BIP68-invalid transactions are removed from the mempool + # after a reorg. + def test_sequence_lock_unconfirmed_inputs(self): + # Store height so we can easily reset the chain at the end of the test + cur_height = self.nodes[0].getblockcount() + + # Create a mempool tx. + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) + tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1.rehash() + + # Anyone-can-spend mempool tx. + # Sequence lock of 0 should pass. + tx2 = CTransaction() + tx2.nVersion = 2 + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2 = FromHex(tx2, tx2_raw) + tx2.rehash() + + self.nodes[0].sendrawtransaction(tx2_raw) + + # Create a spend of the 0th output of orig_tx with a sequence lock + # of 1, and test what happens when submitting. + # orig_tx.vout[0] must be an anyone-can-spend output + def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): + sequence_value = 1 + if not use_height_lock: + sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG + + tx = CTransaction() + tx.nVersion = 2 + tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] + tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] + tx.rehash() + + if (orig_tx.hash in node.getrawmempool()): + # sendrawtransaction should fail if the tx is in the mempool + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) + else: + # sendrawtransaction should succeed if the tx is not in the mempool + node.sendrawtransaction(ToHex(tx)) + + return tx + + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + + # Now mine some blocks, but make sure tx2 doesn't get mined. + # Use prioritisetransaction to lower the effective feerate to 0 + self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) + cur_time = int(time.time()) + for i in range(10): + self.nodes[0].setmocktime(cur_time + 600) + self.nodes[0].generate(1) + cur_time += 600 + + assert(tx2.hash in self.nodes[0].getrawmempool()) + + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) + test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + + # Mine tx2, and then try again + self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN)) + + # Advance the time on the node so that we can test timelocks + self.nodes[0].setmocktime(cur_time+600) + self.nodes[0].generate(1) + assert(tx2.hash not in self.nodes[0].getrawmempool()) + + # Now that tx2 is not in the mempool, a sequence locked spend should + # succeed + tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) + assert(tx3.hash in self.nodes[0].getrawmempool()) + + self.nodes[0].generate(1) + assert(tx3.hash not in self.nodes[0].getrawmempool()) + + # One more test, this time using height locks + tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) + assert(tx4.hash in self.nodes[0].getrawmempool()) + + # Now try combining confirmed and unconfirmed inputs + tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) + assert(tx5.hash not in self.nodes[0].getrawmempool()) + + utxos = self.nodes[0].listunspent() + tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) + tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) + raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) + + # Test mempool-BIP68 consistency after reorg + # + # State of the transactions in the last blocks: + # ... -> [ tx2 ] -> [ tx3 ] + # tip-1 tip + # And currently tx4 is in the mempool. + # + # If we invalidate the tip, tx3 should get added to the mempool, causing + # tx4 to be removed (fails sequence-lock). + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + assert(tx4.hash not in self.nodes[0].getrawmempool()) + assert(tx3.hash in self.nodes[0].getrawmempool()) + + # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in + # diagram above). + # This would cause tx2 to be added back to the mempool, which in turn causes + # tx3 to be removed. + tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) + height = self.nodes[0].getblockcount() + for i in range(2): + block = create_block(tip, create_coinbase(height), cur_time) + block.nVersion = 3 + block.rehash() + block.solve() + tip = block.sha256 + height += 1 + self.nodes[0].submitblock(ToHex(block)) + cur_time += 1 + + mempool = self.nodes[0].getrawmempool() + assert(tx3.hash not in mempool) + assert(tx2.hash in mempool) + + # Reset the chain and get rid of the mocktimed-blocks + self.nodes[0].setmocktime(0) + self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1)) + self.nodes[0].generate(10) + + # Make sure that BIP68 isn't being used to validate blocks, prior to + # versionbits activation. If more blocks are mined prior to this test + # being run, then it's possible the test has activated the soft fork, and + # this test should be moved to run earlier, or deleted. + def test_bip68_not_consensus(self): + assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) + + tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1.rehash() + + # Make an anyone-can-spend transaction + tx2 = CTransaction() + tx2.nVersion = 1 + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + + # sign tx2 + tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2 = FromHex(tx2, tx2_raw) + tx2.rehash() + + self.nodes[0].sendrawtransaction(ToHex(tx2)) + + # Now make an invalid spend of tx2 according to BIP68 + sequence_value = 100 # 100 block relative locktime + + tx3 = CTransaction() + tx3.nVersion = 2 + tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] + tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] + tx3.rehash() + + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) + + # make a block that violates bip68; ensure that the tip updates + tip = int(self.nodes[0].getbestblockhash(), 16) + block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) + block.nVersion = 3 + block.vtx.extend([tx1, tx2, tx3]) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + add_witness_commitment(block) + block.solve() + + self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + def activateCSV(self): + # activation should happen at block height 432 (3 periods) + # getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block. + min_activation_height = 432 + height = self.nodes[0].getblockcount() + assert_greater_than(min_activation_height - height, 2) + self.nodes[0].generate(min_activation_height - height - 2) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in") + self.nodes[0].generate(1) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active") + sync_blocks(self.nodes) + + # Use self.nodes[1] to test that version 2 transactions are standard. + def test_version2_relay(self): + inputs = [ ] + outputs = { self.nodes[1].getnewaddress() : 1.0 } + rawtx = self.nodes[1].createrawtransaction(inputs, outputs) + rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] + tx = FromHex(CTransaction(), rawtxfund) + tx.nVersion = 2 + tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] + self.nodes[1].sendrawtransaction(tx_signed) + +if __name__ == '__main__': + BIP68Test().main() diff --git a/test/functional/feature_bip9_softforks.py b/test/functional/feature_bip9_softforks.py new file mode 100755 index 0000000000..ae92e9f07c --- /dev/null +++ b/test/functional/feature_bip9_softforks.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP 9 soft forks. + +Connect to a single node. +regtest lock-in with 108/144 block signalling +activation after a further 144 blocks +mine 2 block and save coinbases for later use +mine 141 blocks to transition from DEFINED to STARTED +mine 100 blocks signalling readiness and 44 not in order to fail to change state this period +mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN) +mine a further 143 blocks (LOCKED_IN) +test that enforcement has not triggered (which triggers ACTIVE) +test that enforcement has triggered +""" +from io import BytesIO +import shutil +import time +import itertools + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.mininode import CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block +from test_framework.comptool import TestInstance, TestManager +from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP + +class BIP9SoftForksTest(ComparisonTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + network_thread_start() + self.test.run() + + def create_transaction(self, node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + tx.nVersion = 2 + return tx + + def sign_transaction(self, node, tx): + signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize())) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + def generate_blocks(self, number, version, test_blocks = []): + for i in range(number): + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = version + block.rehash() + block.solve() + test_blocks.append([block, True]) + self.last_block_time += 1 + self.tip = block.sha256 + self.height += 1 + return test_blocks + + def get_bip9_status(self, key): + info = self.nodes[0].getblockchaininfo() + return info['bip9_softforks'][key] + + def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): + assert_equal(self.get_bip9_status(bipName)['status'], 'defined') + assert_equal(self.get_bip9_status(bipName)['since'], 0) + + # generate some coins for later + self.coinbase_blocks = self.nodes[0].generate(2) + self.height = 3 # height of the next block to build + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.nodeaddress = self.nodes[0].getnewaddress() + self.last_block_time = int(time.time()) + + assert_equal(self.get_bip9_status(bipName)['status'], 'defined') + assert_equal(self.get_bip9_status(bipName)['since'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert(bipName not in tmpl['vbavailable']) + assert_equal(tmpl['vbrequired'], 0) + assert_equal(tmpl['version'], 0x20000000) + + # Test 1 + # Advance from DEFINED to STARTED + test_blocks = self.generate_blocks(141, 4) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bipName)['since'], 144) + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bipName], bitno) + assert_equal(tmpl['vbrequired'], 0) + assert(tmpl['version'] & activated_version) + + # Test 1-A + # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period + test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + + # Test 1-B + # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period + test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False) + + # Test 1-C + # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN + test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + + # Test 2 + # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bipName)['since'], 144) + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bipName], bitno) + assert_equal(tmpl['vbrequired'], 0) + assert(tmpl['version'] & activated_version) + + # Test 3 + # 108 out of 144 signal bit 1 to achieve LOCKED_IN + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) + + # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN... + assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143) + assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107) + assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bipName)['status'], 'started') + + # ...continue with Test 3 + test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bipName)['since'], 576) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + + # Test 4 + # 143 more version 536870913 blocks (waiting period-1) + test_blocks = self.generate_blocks(143, 4) + yield TestInstance(test_blocks, sync_every_block=False) + + assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bipName)['since'], 576) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName not in tmpl['rules']) + + # Test 5 + # Check that the new rule is enforced + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[0], self.nodeaddress, 1.0) + invalidate(spendtx) + spendtx = self.sign_transaction(self.nodes[0], spendtx) + spendtx.rehash() + invalidatePostSignature(spendtx) + spendtx.rehash() + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = activated_version + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.last_block_time += 1 + self.tip = block.sha256 + self.height += 1 + yield TestInstance([[block, True]]) + + assert_equal(self.get_bip9_status(bipName)['status'], 'active') + assert_equal(self.get_bip9_status(bipName)['since'], 720) + tmpl = self.nodes[0].getblocktemplate({}) + assert(bipName in tmpl['rules']) + assert(bipName not in tmpl['vbavailable']) + assert_equal(tmpl['vbrequired'], 0) + assert(not (tmpl['version'] & (1 << bitno))) + + # Test 6 + # Check that the new sequence lock rules are enforced + spendtx = self.create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + invalidate(spendtx) + spendtx = self.sign_transaction(self.nodes[0], spendtx) + spendtx.rehash() + invalidatePostSignature(spendtx) + spendtx.rehash() + + block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) + block.nVersion = 5 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + self.last_block_time += 1 + yield TestInstance([[block, False]]) + + # Restart all + self.test.clear_all_connections() + self.stop_nodes() + self.nodes = [] + shutil.rmtree(self.options.tmpdir + "/node0") + self.setup_chain() + self.setup_network() + self.test.add_all_connections(self.nodes) + network_thread_start() + self.test.p2p_connections[0].wait_for_verack() + + def get_tests(self): + for test in itertools.chain( + self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), + self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), + self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) + ): + yield test + + def donothing(self, tx): + return + + def csv_invalidate(self, tx): + """Modify the signature in vin 0 of the tx to fail CSV + Prepends -1 CSV DROP in the scriptSig itself. + """ + tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + + list(CScript(tx.vin[0].scriptSig))) + + def sequence_lock_invalidate(self, tx): + """Modify the nSequence to make it fails once sequence lock rule is + activated (high timespan). + """ + tx.vin[0].nSequence = 0x00FFFFFF + tx.nLockTime = 0 + + def mtp_invalidate(self, tx): + """Modify the nLockTime to make it fails once MTP rule is activated.""" + # Disable Sequence lock, Activate nLockTime + tx.vin[0].nSequence = 0x90FFFFFF + tx.nLockTime = self.last_block_time + +if __name__ == '__main__': + BIP9SoftForksTest().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py new file mode 100755 index 0000000000..fe9bbda14b --- /dev/null +++ b/test/functional/feature_block.py @@ -0,0 +1,1293 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test block processing. + +This reimplements tests from the bitcoinj/FullBlockTestGenerator used +by the pull-tester. + +We use the testing framework in which we expect a particular answer from +each test. +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +from test_framework.mininode import network_thread_start +import struct + +class PreviousSpendableOutput(): + def __init__(self, tx = CTransaction(), n = -1): + self.tx = tx + self.n = n # the output we're spending + +# Use this class for tests that require behavior other than normal "mininode" behavior. +# For now, it is used to serialize a bloated varint (b64). +class CBrokenBlock(CBlock): + def __init__(self, header=None): + super(CBrokenBlock, self).__init__(header) + + def initialize(self, base_block): + self.vtx = copy.deepcopy(base_block.vtx) + self.hashMerkleRoot = self.calc_merkle_root() + + def serialize(self, with_witness=False): + r = b"" + r += super(CBlock, self).serialize() + r += struct.pack(" b1 (0) -> b2 (1) + block(1, spend=out[0]) + save_spendable_output() + yield accepted() + + block(2, spend=out[1]) + yield accepted() + save_spendable_output() + + # so fork like this: + # + # genesis -> b1 (0) -> b2 (1) + # \-> b3 (1) + # + # Nothing should happen at this point. We saw b2 first so it takes priority. + tip(1) + b3 = block(3, spend=out[1]) + txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) + yield rejected() + + + # Now we add another block to make the alternative chain longer. + # + # genesis -> b1 (0) -> b2 (1) + # \-> b3 (1) -> b4 (2) + block(4, spend=out[2]) + yield accepted() + + + # ... and back to the first chain. + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b3 (1) -> b4 (2) + tip(2) + block(5, spend=out[2]) + save_spendable_output() + yield rejected() + + block(6, spend=out[3]) + yield accepted() + + # Try to create a fork that double-spends + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b7 (2) -> b8 (4) + # \-> b3 (1) -> b4 (2) + tip(5) + block(7, spend=out[2]) + yield rejected() + + block(8, spend=out[4]) + yield rejected() + + # Try to create a block that has too much fee + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b9 (4) + # \-> b3 (1) -> b4 (2) + tip(6) + block(9, spend=out[4], additional_coinbase_value=1) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + # Create a fork that ends in a block with too much fee (the one that causes the reorg) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b10 (3) -> b11 (4) + # \-> b3 (1) -> b4 (2) + tip(5) + block(10, spend=out[3]) + yield rejected() + + block(11, spend=out[4], additional_coinbase_value=1) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + + # Try again, but with a valid fork first + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b14 (5) + # (b12 added last) + # \-> b3 (1) -> b4 (2) + tip(5) + b12 = block(12, spend=out[3]) + save_spendable_output() + b13 = block(13, spend=out[4]) + # Deliver the block header for b12, and the block b13. + # b13 should be accepted but the tip won't advance until b12 is delivered. + yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) + + save_spendable_output() + # b14 is invalid, but the node won't know that until it tries to connect + # Tip still can't advance because b12 is missing + block(14, spend=out[5], additional_coinbase_value=1) + yield rejected() + + yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. + + # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) + # \-> b3 (1) -> b4 (2) + + # Test that a block with a lot of checksigs is okay + lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) + tip(13) + block(15, spend=out[5], script=lots_of_checksigs) + yield accepted() + save_spendable_output() + + + # Test that a block with too many checksigs is rejected + too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) + block(16, spend=out[6], script=too_many_checksigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # Attempt to spend a transaction created on a different fork + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) + # \-> b3 (1) -> b4 (2) + tip(15) + block(17, spend=txout_b3) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Attempt to spend a transaction created on a different fork (on a fork this time) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) + # \-> b18 (b3.vtx[1]) -> b19 (6) + # \-> b3 (1) -> b4 (2) + tip(13) + block(18, spend=txout_b3) + yield rejected() + + block(19, spend=out[6]) + yield rejected() + + # Attempt to spend a coinbase at depth too low + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) + # \-> b3 (1) -> b4 (2) + tip(15) + block(20, spend=out[7]) + yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) + + # Attempt to spend a coinbase at depth too low (on a fork this time) + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) + # \-> b21 (6) -> b22 (5) + # \-> b3 (1) -> b4 (2) + tip(13) + block(21, spend=out[6]) + yield rejected() + + block(22, spend=out[5]) + yield rejected() + + # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) + # \-> b24 (6) -> b25 (7) + # \-> b3 (1) -> b4 (2) + tip(15) + b23 = block(23, spend=out[6]) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) + b23 = update_block(23, [tx]) + # Make sure the math above worked out to produce a max-sized block + assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) + yield accepted() + save_spendable_output() + + # Make the next block one byte bigger and check that it fails + tip(15) + b24 = block(24, spend=out[6]) + script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 + script_output = CScript([b'\x00' * (script_length+1)]) + tx.vout = [CTxOut(0, script_output)] + b24 = update_block(24, [tx]) + assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) + yield rejected(RejectResult(16, b'bad-blk-length')) + + block(25, spend=out[7]) + yield rejected() + + # Create blocks with a coinbase input script size out of range + # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) + # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) + # \-> ... (6) -> ... (7) + # \-> b3 (1) -> b4 (2) + tip(15) + b26 = block(26, spend=out[6]) + b26.vtx[0].vin[0].scriptSig = b'\x00' + b26.vtx[0].rehash() + # update_block causes the merkle root to get updated, even with no new + # transactions, and updates the required state. + b26 = update_block(26, []) + yield rejected(RejectResult(16, b'bad-cb-length')) + + # Extend the b26 chain to make sure bitcoind isn't accepting b26 + block(27, spend=out[7]) + yield rejected(False) + + # Now try a too-large-coinbase script + tip(15) + b28 = block(28, spend=out[6]) + b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 + b28.vtx[0].rehash() + b28 = update_block(28, []) + yield rejected(RejectResult(16, b'bad-cb-length')) + + # Extend the b28 chain to make sure bitcoind isn't accepting b28 + block(29, spend=out[7]) + yield rejected(False) + + # b30 has a max-sized coinbase scriptSig. + tip(23) + b30 = block(30) + b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 + b30.vtx[0].rehash() + b30 = update_block(30, []) + yield accepted() + save_spendable_output() + + # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY + # + # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) + # \-> b36 (11) + # \-> b34 (10) + # \-> b32 (9) + # + + # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. + lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + b31 = block(31, spend=out[8], script=lots_of_multisigs) + assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) + yield accepted() + save_spendable_output() + + # this goes over the limit because the coinbase has one sigop + too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) + b32 = block(32, spend=out[9], script=too_many_multisigs) + assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # CHECKMULTISIGVERIFY + tip(31) + lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + block(33, spend=out[9], script=lots_of_multisigs) + yield accepted() + save_spendable_output() + + too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) + block(34, spend=out[10], script=too_many_multisigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # CHECKSIGVERIFY + tip(33) + lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) + b35 = block(35, spend=out[10], script=lots_of_checksigs) + yield accepted() + save_spendable_output() + + too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) + block(36, spend=out[11], script=too_many_checksigs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + + # Check spending of a transaction in a block which failed to connect + # + # b6 (3) + # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) + # \-> b37 (11) + # \-> b38 (11/37) + # + + # save 37's spendable output, but then double-spend out11 to invalidate the block + tip(35) + b37 = block(37, spend=out[11]) + txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) + tx = create_and_sign_tx(out[11].tx, out[11].n, 0) + b37 = update_block(37, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid + tip(35) + block(38, spend=txout_b37) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Check P2SH SigOp counting + # + # + # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) + # \-> b40 (12) + # + # b39 - create some P2SH outputs that will require 6 sigops to spend: + # + # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG + # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL + # + tip(35) + b39 = block(39) + b39_outputs = 0 + b39_sigops_per_output = 6 + + # Build the redeem script, hash it, use hash to create the p2sh script + redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script_hash = hash160(redeem_script) + p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) + + # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE + # This must be signed because it is spending a coinbase + spend = out[11] + tx = create_tx(spend.tx, spend.n, 1, p2sh_script) + tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) + self.sign_tx(tx, spend.tx, spend.n) + tx.rehash() + b39 = update_block(39, [tx]) + b39_outputs += 1 + + # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE + tx_new = None + tx_last = tx + total_size=len(b39.serialize()) + while(total_size < MAX_BLOCK_BASE_SIZE): + tx_new = create_tx(tx_last, 1, 1, p2sh_script) + tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) + tx_new.rehash() + total_size += len(tx_new.serialize()) + if total_size >= MAX_BLOCK_BASE_SIZE: + break + b39.vtx.append(tx_new) # add tx to block + tx_last = tx_new + b39_outputs += 1 + + b39 = update_block(39, []) + yield accepted() + save_spendable_output() + + + # Test sigops in P2SH redeem scripts + # + # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. + # The first tx has one sigop and then at the end we add 2 more to put us just over the max. + # + # b41 does the same, less one, so it has the maximum sigops permitted. + # + tip(39) + b40 = block(40, spend=out[12]) + sigops = get_legacy_sigopcount_block(b40) + numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output + assert_equal(numTxes <= b39_outputs, True) + + lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) + new_txs = [] + for i in range(1, numTxes+1): + tx = CTransaction() + tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) + tx.vin.append(CTxIn(lastOutpoint, b'')) + # second input is corresponding P2SH output from b39 + tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) + # Note: must pass the redeem_script (not p2sh_script) to the signature hash function + (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) + sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) + scriptSig = CScript([sig, redeem_script]) + + tx.vin[1].scriptSig = scriptSig + tx.rehash() + new_txs.append(tx) + lastOutpoint = COutPoint(tx.sha256, 0) + + b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1 + tx = CTransaction() + tx.vin.append(CTxIn(lastOutpoint, b'')) + tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) + tx.rehash() + new_txs.append(tx) + update_block(40, new_txs) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + # same as b40, but one less sigop + tip(39) + block(41, spend=None) + update_block(41, b40.vtx[1:-1]) + b41_sigops_to_fill = b40_sigops_to_fill - 1 + tx = CTransaction() + tx.vin.append(CTxIn(lastOutpoint, b'')) + tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) + tx.rehash() + update_block(41, [tx]) + yield accepted() + + # Fork off of b39 to create a constant base again + # + # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) + # \-> b41 (12) + # + tip(39) + block(42, spend=out[12]) + yield rejected() + save_spendable_output() + + block(43, spend=out[13]) + yield accepted() + save_spendable_output() + + + # Test a number of really invalid scenarios + # + # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) + # \-> ??? (15) + + # The next few blocks are going to be created "by hand" since they'll do funky things, such as having + # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. + height = self.block_heights[self.tip.sha256] + 1 + coinbase = create_coinbase(height, self.coinbase_pubkey) + b44 = CBlock() + b44.nTime = self.tip.nTime + 1 + b44.hashPrevBlock = self.tip.sha256 + b44.nBits = 0x207fffff + b44.vtx.append(coinbase) + b44.hashMerkleRoot = b44.calc_merkle_root() + b44.solve() + self.tip = b44 + self.block_heights[b44.sha256] = height + self.blocks[44] = b44 + yield accepted() + + # A block with a non-coinbase as the first tx + non_coinbase = create_tx(out[15].tx, out[15].n, 1) + b45 = CBlock() + b45.nTime = self.tip.nTime + 1 + b45.hashPrevBlock = self.tip.sha256 + b45.nBits = 0x207fffff + b45.vtx.append(non_coinbase) + b45.hashMerkleRoot = b45.calc_merkle_root() + b45.calc_sha256() + b45.solve() + self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 + self.tip = b45 + self.blocks[45] = b45 + yield rejected(RejectResult(16, b'bad-cb-missing')) + + # A block with no txns + tip(44) + b46 = CBlock() + b46.nTime = b44.nTime+1 + b46.hashPrevBlock = b44.sha256 + b46.nBits = 0x207fffff + b46.vtx = [] + b46.hashMerkleRoot = 0 + b46.solve() + self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 + self.tip = b46 + assert 46 not in self.blocks + self.blocks[46] = b46 + s = ser_uint256(b46.hashMerkleRoot) + yield rejected(RejectResult(16, b'bad-blk-length')) + + # A block with invalid work + tip(44) + b47 = block(47, solve=False) + target = uint256_from_compact(b47.nBits) + while b47.sha256 < target: #changed > to < + b47.nNonce += 1 + b47.rehash() + yield rejected(RejectResult(16, b'high-hash')) + + # A block with timestamp > 2 hrs in the future + tip(44) + b48 = block(48, solve=False) + b48.nTime = int(time.time()) + 60 * 60 * 3 + b48.solve() + yield rejected(RejectResult(16, b'time-too-new')) + + # A block with an invalid merkle hash + tip(44) + b49 = block(49) + b49.hashMerkleRoot += 1 + b49.solve() + yield rejected(RejectResult(16, b'bad-txnmrklroot')) + + # A block with an incorrect POW limit + tip(44) + b50 = block(50) + b50.nBits = b50.nBits - 1 + b50.solve() + yield rejected(RejectResult(16, b'bad-diffbits')) + + # A block with two coinbase txns + tip(44) + b51 = block(51) + cb2 = create_coinbase(51, self.coinbase_pubkey) + b51 = update_block(51, [cb2]) + yield rejected(RejectResult(16, b'bad-cb-multiple')) + + # A block w/ duplicate txns + # Note: txns have to be in the right position in the merkle tree to trigger this error + tip(44) + b52 = block(52, spend=out[15]) + tx = create_tx(b52.vtx[1], 0, 1) + b52 = update_block(52, [tx, tx]) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + # Test block timestamps + # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) + # \-> b54 (15) + # + tip(43) + block(53, spend=out[14]) + yield rejected() # rejected since b44 is at same height + save_spendable_output() + + # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) + b54 = block(54, spend=out[15]) + b54.nTime = b35.nTime - 1 + b54.solve() + yield rejected(RejectResult(16, b'time-too-old')) + + # valid timestamp + tip(53) + b55 = block(55, spend=out[15]) + b55.nTime = b35.nTime + update_block(55, []) + yield accepted() + save_spendable_output() + + + # Test CVE-2012-2459 + # + # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) + # \-> b57 (16) + # \-> b56p2 (16) + # \-> b56 (16) + # + # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without + # affecting the merkle root of a block, while still invalidating it. + # See: src/consensus/merkle.h + # + # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. + # Result: OK + # + # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle + # root but duplicate transactions. + # Result: Fails + # + # b57p2 has six transactions in its merkle tree: + # - coinbase, tx, tx1, tx2, tx3, tx4 + # Merkle root calculation will duplicate as necessary. + # Result: OK. + # + # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches + # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates + # that the error was caught early, avoiding a DOS vulnerability.) + + # b57 - a good block with 2 txs, don't submit until end + tip(55) + b57 = block(57) + tx = create_and_sign_tx(out[16].tx, out[16].n, 1) + tx1 = create_tx(tx, 0, 1) + b57 = update_block(57, [tx, tx1]) + + # b56 - copy b57, add a duplicate tx + tip(55) + b56 = copy.deepcopy(b57) + self.blocks[56] = b56 + assert_equal(len(b56.vtx),3) + b56 = update_block(56, [tx1]) + assert_equal(b56.hash, b57.hash) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + # b57p2 - a good block with 6 tx'es, don't submit until end + tip(55) + b57p2 = block("57p2") + tx = create_and_sign_tx(out[16].tx, out[16].n, 1) + tx1 = create_tx(tx, 0, 1) + tx2 = create_tx(tx1, 0, 1) + tx3 = create_tx(tx2, 0, 1) + tx4 = create_tx(tx3, 0, 1) + b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) + + # b56p2 - copy b57p2, duplicate two non-consecutive tx's + tip(55) + b56p2 = copy.deepcopy(b57p2) + self.blocks["b56p2"] = b56p2 + assert_equal(b56p2.hash, b57p2.hash) + assert_equal(len(b56p2.vtx),6) + b56p2 = update_block("b56p2", [tx3, tx4]) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + + tip("57p2") + yield accepted() + + tip(57) + yield rejected() #rejected because 57p2 seen first + save_spendable_output() + + # Test a few invalid tx types + # + # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> ??? (17) + # + + # tx with prevout.n out of range + tip(57) + b58 = block(58, spend=out[17]) + tx = CTransaction() + assert(len(out[17].tx.vout) < 42) + tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) + tx.vout.append(CTxOut(0, b"")) + tx.calc_sha256() + b58 = update_block(58, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # tx with output value > input value out of range + tip(57) + b59 = block(59) + tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) + b59 = update_block(59, [tx]) + yield rejected(RejectResult(16, b'bad-txns-in-belowout')) + + # reset to good chain + tip(57) + b60 = block(60, spend=out[17]) + yield accepted() + save_spendable_output() + + # Test BIP30 + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b61 (18) + # + # Blocks are not allowed to contain a transaction whose id matches that of an earlier, + # not-fully-spent transaction in the same chain. To test, make identical coinbases; + # the second one should be rejected. + # + tip(60) + b61 = block(61, spend=out[18]) + b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases + b61.vtx[0].rehash() + b61 = update_block(61, []) + assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) + yield rejected(RejectResult(16, b'bad-txns-BIP30')) + + + # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b62 (18) + # + tip(60) + b62 = block(62) + tx = CTransaction() + tx.nLockTime = 0xffffffff #this locktime is non-final + assert(out[18].n < len(out[18].tx.vout)) + tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence + tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) + assert(tx.vin[0].nSequence < 0xffffffff) + tx.calc_sha256() + b62 = update_block(62, [tx]) + yield rejected(RejectResult(16, b'bad-txns-nonfinal')) + + + # Test a non-final coinbase is also rejected + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) + # \-> b63 (-) + # + tip(60) + b63 = block(63) + b63.vtx[0].nLockTime = 0xffffffff + b63.vtx[0].vin[0].nSequence = 0xDEADBEEF + b63.vtx[0].rehash() + b63 = update_block(63, []) + yield rejected(RejectResult(16, b'bad-txns-nonfinal')) + + + # This checks that a block with a bloated VARINT between the block_header and the array of tx such that + # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, + # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not + # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. + # + # What matters is that the receiving node should not reject the bloated block, and then reject the canonical + # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) + # + # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) + # \ + # b64a (18) + # b64a is a bloated block (non-canonical varint) + # b64 is a good block (same as b64 but w/ canonical varint) + # + tip(60) + regular_block = block("64a", spend=out[18]) + + # make it a "broken_block," with non-canonical serialization + b64a = CBrokenBlock(regular_block) + b64a.initialize(regular_block) + self.blocks["64a"] = b64a + self.tip = b64a + tx = CTransaction() + + # use canonical serialization to calculate size + script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) + b64a = update_block("64a", [tx]) + assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) + yield TestInstance([[self.tip, None]]) + + # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore + self.test.block_store.erase(b64a.sha256) + + tip(60) + b64 = CBlock(b64a) + b64.vtx = copy.deepcopy(b64a.vtx) + assert_equal(b64.hash, b64a.hash) + assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) + self.blocks[64] = b64 + update_block(64, []) + yield accepted() + save_spendable_output() + + # Spend an output created in the block itself + # + # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # + tip(64) + block(65) + tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 0) + update_block(65, [tx1, tx2]) + yield accepted() + save_spendable_output() + + # Attempt to spend an output created later in the same block + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # \-> b66 (20) + tip(65) + block(66) + tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 1) + update_block(66, [tx2, tx1]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # Attempt to double-spend a transaction created in a block + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) + # \-> b67 (20) + # + # + tip(65) + block(67) + tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) + tx2 = create_and_sign_tx(tx1, 0, 1) + tx3 = create_and_sign_tx(tx1, 0, 2) + update_block(67, [tx1, tx2, tx3]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + # More tests of block subsidy + # + # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) + # \-> b68 (20) + # + # b68 - coinbase with an extra 10 satoshis, + # creates a tx that has 9 satoshis from out[20] go to fees + # this fails because the coinbase is trying to claim 1 satoshi too much in fees + # + # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee + # this succeeds + # + tip(65) + block(68, additional_coinbase_value=10) + tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) + update_block(68, [tx]) + yield rejected(RejectResult(16, b'bad-cb-amount')) + + tip(65) + b69 = block(69, additional_coinbase_value=10) + tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) + update_block(69, [tx]) + yield accepted() + save_spendable_output() + + # Test spending the outpoint of a non-existent transaction + # + # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) + # \-> b70 (21) + # + tip(69) + block(70, spend=out[21]) + bogus_tx = CTransaction() + bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) + tx.vout.append(CTxOut(1, b"")) + update_block(70, [tx]) + yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) + + + # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) + # + # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) + # \-> b71 (21) + # + # b72 is a good block. + # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. + # + tip(69) + b72 = block(72) + tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) + tx2 = create_and_sign_tx(tx1, 0, 1) + b72 = update_block(72, [tx1, tx2]) # now tip is 72 + b71 = copy.deepcopy(b72) + b71.vtx.append(tx2) # add duplicate tx2 + self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 + self.blocks[71] = b71 + + assert_equal(len(b71.vtx), 4) + assert_equal(len(b72.vtx), 3) + assert_equal(b72.sha256, b71.sha256) + + tip(71) + yield rejected(RejectResult(16, b'bad-txns-duplicate')) + tip(72) + yield accepted() + save_spendable_output() + + + # Test some invalid scripts and MAX_BLOCK_SIGOPS + # + # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) + # \-> b** (22) + # + + # b73 - tx with excessive sigops that are placed after an excessively large script element. + # The purpose of the test is to make sure those sigops are counted. + # + # script is a bytearray of size 20,526 + # + # bytearray[0-19,998] : OP_CHECKSIG + # bytearray[19,999] : OP_PUSHDATA4 + # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) + # bytearray[20,004-20,525]: unread data (script_element) + # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) + # + tip(72) + b73 = block(73) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 + + element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 + a[MAX_BLOCK_SIGOPS] = element_size % 256 + a[MAX_BLOCK_SIGOPS+1] = element_size // 256 + a[MAX_BLOCK_SIGOPS+2] = 0 + a[MAX_BLOCK_SIGOPS+3] = 0 + + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b73 = update_block(73, [tx]) + assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + # b74/75 - if we push an invalid script element, all prevous sigops are counted, + # but sigops after the element are not counted. + # + # The invalid script element is that the push_data indicates that + # there will be a large amount of data (0xffffff bytes), but we only + # provide a much smaller number. These bytes are CHECKSIGS so they would + # cause b75 to fail for excessive sigops, if those bytes were counted. + # + # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element + # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element + # + # + tip(72) + b74 = block(74) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS] = 0x4e + a[MAX_BLOCK_SIGOPS+1] = 0xfe + a[MAX_BLOCK_SIGOPS+2] = 0xff + a[MAX_BLOCK_SIGOPS+3] = 0xff + a[MAX_BLOCK_SIGOPS+4] = 0xff + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b74 = update_block(74, [tx]) + yield rejected(RejectResult(16, b'bad-blk-sigops')) + + tip(72) + b75 = block(75) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS-1] = 0x4e + a[MAX_BLOCK_SIGOPS] = 0xff + a[MAX_BLOCK_SIGOPS+1] = 0xff + a[MAX_BLOCK_SIGOPS+2] = 0xff + a[MAX_BLOCK_SIGOPS+3] = 0xff + tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) + b75 = update_block(75, [tx]) + yield accepted() + save_spendable_output() + + # Check that if we push an element filled with CHECKSIGs, they are not counted + tip(75) + b76 = block(76) + size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + a = bytearray([OP_CHECKSIG] * size) + a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs + tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) + b76 = update_block(76, [tx]) + yield accepted() + save_spendable_output() + + # Test transaction resurrection + # + # -> b77 (24) -> b78 (25) -> b79 (26) + # \-> b80 (25) -> b81 (26) -> b82 (27) + # + # b78 creates a tx, which is spent in b79. After b82, both should be in mempool + # + # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the + # rather obscure reason that the Python signature code does not distinguish between + # Low-S and High-S values (whereas the bitcoin code has custom code which does so); + # as a result of which, the odds are 50% that the python code will use the right + # value and the transaction will be accepted into the mempool. Until we modify the + # test framework to support low-S signing, we are out of luck. + # + # To get around this issue, we construct transactions which are not signed and which + # spend to OP_TRUE. If the standard-ness rules change, this test would need to be + # updated. (Perhaps to spend to a P2SH OP_TRUE script) + # + tip(76) + block(77) + tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) + update_block(77, [tx77]) + yield accepted() + save_spendable_output() + + block(78) + tx78 = create_tx(tx77, 0, 9*COIN) + update_block(78, [tx78]) + yield accepted() + + block(79) + tx79 = create_tx(tx78, 0, 8*COIN) + update_block(79, [tx79]) + yield accepted() + + # mempool should be empty + assert_equal(len(self.nodes[0].getrawmempool()), 0) + + tip(77) + block(80, spend=out[25]) + yield rejected() + save_spendable_output() + + block(81, spend=out[26]) + yield rejected() # other chain is same length + save_spendable_output() + + block(82, spend=out[27]) + yield accepted() # now this chain is longer, triggers re-org + save_spendable_output() + + # now check that tx78 and tx79 have been put back into the peer's mempool + mempool = self.nodes[0].getrawmempool() + assert_equal(len(mempool), 2) + assert(tx78.hash in mempool) + assert(tx79.hash in mempool) + + + # Test invalid opcodes in dead execution paths. + # + # -> b81 (26) -> b82 (27) -> b83 (28) + # + block(83) + op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] + script = CScript(op_codes) + tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) + + tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) + tx2.vin[0].scriptSig = CScript([OP_FALSE]) + tx2.rehash() + + update_block(83, [tx1, tx2]) + yield accepted() + save_spendable_output() + + + # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) + # + # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) + # \-> b85 (29) -> b86 (30) \-> b89a (32) + # + # + block(84) + tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx1.calc_sha256() + self.sign_tx(tx1, out[29].tx, out[29].n) + tx1.rehash() + tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) + tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) + tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) + tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) + tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) + tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) + tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) + + update_block(84, [tx1,tx2,tx3,tx4,tx5]) + yield accepted() + save_spendable_output() + + tip(83) + block(85, spend=out[29]) + yield rejected() + + block(86, spend=out[30]) + yield accepted() + + tip(84) + block(87, spend=out[30]) + yield rejected() + save_spendable_output() + + block(88, spend=out[31]) + yield accepted() + save_spendable_output() + + # trying to spend the OP_RETURN output is rejected + block("89a", spend=out[32]) + tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) + update_block("89a", [tx]) + yield rejected() + + + # Test re-org of a week's worth of blocks (1088 blocks) + # This test takes a minute or two and can be accomplished in memory + # + if self.options.runbarelyexpensive: + tip(88) + LARGE_REORG_SIZE = 1088 + test1 = TestInstance(sync_every_block=False) + spend=out[32] + for i in range(89, LARGE_REORG_SIZE + 89): + b = block(i, spend) + tx = CTransaction() + script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 + script_output = CScript([b'\x00' * script_length]) + tx.vout.append(CTxOut(0, script_output)) + tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) + b = update_block(i, [tx]) + assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) + test1.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + spend = get_spendable_output() + + yield test1 + chain1_tip = i + + # now create alt chain of same length + tip(88) + test2 = TestInstance(sync_every_block=False) + for i in range(89, LARGE_REORG_SIZE + 89): + block("alt"+str(i)) + test2.blocks_and_transactions.append([self.tip, False]) + yield test2 + + # extend alt chain to trigger re-org + block("alt" + str(chain1_tip + 1)) + yield accepted() + + # ... and re-org back to the first chain + tip(chain1_tip) + block(chain1_tip + 1) + yield rejected() + block(chain1_tip + 2) + yield accepted() + + chain1_tip += 2 + + + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py new file mode 100755 index 0000000000..f62ae31654 --- /dev/null +++ b/test/functional/feature_cltv.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP65 (CHECKLOCKTIMEVERIFY). + +Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height +1351. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import * +from test_framework.blocktools import create_coinbase, create_block +from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum +from io import BytesIO + +CLTV_HEIGHT = 1351 + +# Reject codes that we might receive in this test +REJECT_INVALID = 16 +REJECT_OBSOLETE = 17 +REJECT_NONSTANDARD = 64 + +def cltv_invalidate(tx): + '''Modify the signature in vin 0 of the tx to fail CLTV + + Prepends -1 CLTV DROP in the scriptSig itself. + + TODO: test more ways that transactions using CLTV could be invalid (eg + locktime requirements fail, sequence time requirements fail, etc). + ''' + tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + + list(CScript(tx.vin[0].scriptSig))) + +def cltv_validate(node, tx, height): + '''Modify the signature in vin 0 of the tx to pass CLTV + Prepends CLTV DROP in the scriptSig, and sets + the locktime to height''' + tx.vin[0].nSequence = 0 + tx.nLockTime = height + + # Need to re-sign, since nSequence and nLockTime changed + signed_result = node.signrawtransaction(ToHex(tx)) + new_tx = CTransaction() + new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) + + new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + + list(CScript(new_tx.vin[0].scriptSig))) + return new_tx + +def create_transaction(node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) + return tx + +class BIP65Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + + # wait_for_verack ensures that the P2P connection is fully up. + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) + self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) + self.nodeaddress = self.nodes[0].getnewaddress() + + self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block") + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], + self.nodeaddress, 1.0) + cltv_invalidate(spendtx) + spendtx.rehash() + + tip = self.nodes[0].getbestblockhash() + block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 + block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) + block.nVersion = 3 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + self.log.info("Test that blocks must now be at least version 4") + tip = block.sha256 + block_time += 1 + block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) + block.nVersion = 3 + block.solve() + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] + + self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") + block.nVersion = 4 + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], + self.nodeaddress, 1.0) + cltv_invalidate(spendtx) + spendtx.rehash() + + # First we show that this tx is valid except for CLTV by getting it + # accepted to the mempool (which we can achieve with + # -promiscuousmempoolflags). + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) + assert spendtx.hash in self.nodes[0].getrawmempool() + + # Now we verify that a block with this transaction is invalid. + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: + # Generic rejection when a block is invalid + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') + else: + assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason + + self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") + spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) + spendtx.rehash() + + block.vtx.pop(1) + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + + +if __name__ == '__main__': + BIP65Test().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py new file mode 100755 index 0000000000..61abba8082 --- /dev/null +++ b/test/functional/feature_config_args.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various command line arguments and configuration file parameters.""" + +import os + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import get_datadir_path + +class ConfArgsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + self.stop_node(0) + # Remove the -datadir argument so it doesn't override the config file + self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")] + + default_data_dir = get_datadir_path(self.options.tmpdir, 0) + new_data_dir = os.path.join(default_data_dir, 'newdatadir') + new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') + + # Check that using -datadir argument on non-existent directory fails + self.nodes[0].datadir = new_data_dir + self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') + + # Check that using non-existent datadir in conf file fails + conf_file = os.path.join(default_data_dir, "bitcoin.conf") + with open(conf_file, 'a', encoding='utf8') as f: + f.write("datadir=" + new_data_dir + "\n") + self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') + + # Create the directory and ensure the config file now works + os.mkdir(new_data_dir) + self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) + self.stop_node(0) + assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1')) + + # Ensure command line argument overrides datadir in conf + os.mkdir(new_data_dir_2) + self.nodes[0].datadir = new_data_dir_2 + self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) + assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2')) + +if __name__ == '__main__': + ConfArgsTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py new file mode 100755 index 0000000000..82aa0ff891 --- /dev/null +++ b/test/functional/feature_csv_activation.py @@ -0,0 +1,534 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test activation of the first version bits soft fork. + +This soft fork will activate the following BIPS: +BIP 68 - nSequence relative lock times +BIP 112 - CHECKSEQUENCEVERIFY +BIP 113 - MedianTimePast semantics for nLockTime + +regtest lock-in with 108/144 block signalling +activation after a further 144 blocks + +mine 82 blocks whose coinbases will be used to generate inputs for our tests +mine 61 blocks to transition from DEFINED to STARTED +mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period +mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN +mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572 +mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered +mine 1 block and test that enforcement has triggered (which triggers ACTIVE) +Test BIP 113 is enforced +Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height +Mine 1 block so next height is 581 and test BIP 68 now passes time but not height +Mine 1 block so next height is 582 and test BIP 68 now passes time and height +Test that BIP 112 is enforced + +Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates +And that after the soft fork activates transactions pass and fail as they should according to the rules. +For each BIP, transactions of versions 1 and 2 will be tested. +---------------- +BIP 113: +bip113tx - modify the nLocktime variable + +BIP 68: +bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below + +BIP 112: +bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP +bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP +bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP +bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP +bip112tx_special - test negative argument to OP_CSV +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.mininode import ToHex, CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block +from test_framework.comptool import TestInstance, TestManager +from test_framework.script import * +from io import BytesIO +import time + +base_relative_locktime = 10 +seq_disable_flag = 1<<31 +seq_random_high_bit = 1<<25 +seq_type_flag = 1<<22 +seq_random_low_bit = 1<<18 + +# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field +# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 +relative_locktimes = [] +for b31 in range(2): + b25times = [] + for b25 in range(2): + b22times = [] + for b22 in range(2): + b18times = [] + for b18 in range(2): + rlt = base_relative_locktime + if (b31): + rlt = rlt | seq_disable_flag + if (b25): + rlt = rlt | seq_random_high_bit + if (b22): + rlt = rlt | seq_type_flag + if (b18): + rlt = rlt | seq_random_low_bit + b18times.append(rlt) + b22times.append(b18times) + b25times.append(b22times) + relative_locktimes.append(b25times) + +def all_rlt_txs(txarray): + txs = [] + for b31 in range(2): + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + txs.append(txarray[b31][b25][b22][b18]) + return txs + +class BIP68_112_113Test(ComparisonTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']] + + def run_test(self): + test = TestManager(self, self.options.tmpdir) + test.add_all_connections(self.nodes) + network_thread_start() + test.run() + + def send_generic_input_tx(self, node, coinbases): + amount = Decimal("49.99") + return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) + + def create_transaction(self, node, txid, to_address, amount): + inputs = [{ "txid" : txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(rawtx)) + tx.deserialize(f) + return tx + + def sign_transaction(self, node, unsignedtx): + rawtx = ToHex(unsignedtx) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + def generate_blocks(self, number, version, test_blocks = []): + for i in range(number): + block = self.create_test_block([], version) + test_blocks.append([block, True]) + self.last_block_time += 600 + self.tip = block.sha256 + self.tipheight += 1 + return test_blocks + + def create_test_block(self, txs, version = 536870912): + block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600) + block.nVersion = version + block.vtx.extend(txs) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + return block + + def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): + txs = [] + assert(len(bip68inputs) >= 16) + i = 0 + for b31 in range(2): + b25txs = [] + for b25 in range(2): + b22txs = [] + for b22 in range(2): + b18txs = [] + for b18 in range(2): + tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) + i += 1 + tx.nVersion = txversion + tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta + b18txs.append(self.sign_transaction(self.nodes[0], tx)) + b22txs.append(b18txs) + b25txs.append(b22txs) + txs.append(b25txs) + return txs + + def create_bip112special(self, input, txversion): + tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) + tx.nVersion = txversion + signtx = self.sign_transaction(self.nodes[0], tx) + signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + return signtx + + def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): + txs = [] + assert(len(bip112inputs) >= 16) + i = 0 + for b31 in range(2): + b25txs = [] + for b25 in range(2): + b22txs = [] + for b22 in range(2): + b18txs = [] + for b18 in range(2): + tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) + i += 1 + if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + tx.vin[0].nSequence = base_relative_locktime + locktime_delta + else: # vary nSequence instead, OP_CSV is fixed + tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta + tx.nVersion = txversion + signtx = self.sign_transaction(self.nodes[0], tx) + if (varyOP_CSV): + signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + else: + signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) + b18txs.append(signtx) + b22txs.append(b18txs) + b25txs.append(b22txs) + txs.append(b25txs) + return txs + + def get_tests(self): + long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future + self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time + self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs + self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time + self.tipheight = 82 # height of the next block to build + self.last_block_time = long_past_time + self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) + self.nodeaddress = self.nodes[0].getnewaddress() + + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') + test_blocks = self.generate_blocks(61, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 1 + # Advanced from DEFINED to STARTED, height = 143 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + + # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 2 + # Failed to advance past STARTED, height = 287 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') + + # 108 out of 144 signal bit 0 to achieve lock-in + # using a variety of bits to simulate multiple parallel softforks + test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 3 + # Advanced from STARTED to LOCKED_IN, height = 431 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') + + # 140 more version 4 blocks + test_blocks = self.generate_blocks(140, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 4 + + ### Inputs at height = 572 + # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) + # Note we reuse inputs for v1 and v2 txs so must test these separately + # 16 normal inputs + bip68inputs = [] + for i in range(16): + bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112basicinputs = [] + for j in range(2): + inputs = [] + for i in range(16): + inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip112basicinputs.append(inputs) + # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112diverseinputs = [] + for j in range(2): + inputs = [] + for i in range(16): + inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) + bip112diverseinputs.append(inputs) + # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) + bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + # 1 normal input + bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks) + + self.nodes[0].setmocktime(self.last_block_time + 600) + inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572 + self.nodes[0].setmocktime(0) + self.tip = int("0x" + inputblockhash, 0) + self.tipheight += 1 + self.last_block_time += 600 + assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1) + + # 2 more version 4 blocks + test_blocks = self.generate_blocks(2, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 5 + # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575) + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') + + # Test both version 1 and version 2 transactions for all tests + # BIP113 test transaction will be modified before each use to put in appropriate block time + bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE + bip113tx_v1.nVersion = 1 + bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) + bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE + bip113tx_v2.nVersion = 2 + + # For BIP68 test all 16 relative sequence locktimes + bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) + bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) + + # For BIP112 test: + # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs + bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) + bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) + # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs + bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) + bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) + # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs + bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) + bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) + # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs + bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) + bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) + # -1 OP_CSV OP_DROP input + bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) + bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) + + + ### TESTING ### + ################################## + ### Before Soft Forks Activate ### + ################################## + # All txs should pass + ### Version 1 txs ### + success_txs = [] + # add BIP113 tx and -1 CSV tx + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + success_txs.append(bip113signed1) + success_txs.append(bip112tx_special_v1) + # add BIP 68 txs + success_txs.extend(all_rlt_txs(bip68txs_v1)) + # add BIP 112 with seq=10 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) + # try BIP 112 with seq=9 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Version 2 txs ### + success_txs = [] + # add BIP113 tx and -1 CSV tx + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + success_txs.append(bip113signed2) + success_txs.append(bip112tx_special_v2) + # add BIP 68 txs + success_txs.extend(all_rlt_txs(bip68txs_v2)) + # add BIP 112 with seq=10 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) + # try BIP 112 with seq=9 txs + success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) + success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + + # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block + test_blocks = self.generate_blocks(1, 4) + yield TestInstance(test_blocks, sync_every_block=False) # 8 + assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') + + + ################################# + ### After Soft Forks Activate ### + ################################# + ### BIP 113 ### + # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + for bip113tx in [bip113signed1, bip113signed2]: + yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 + # BIP 113 tests should now pass if the locktime is < MTP + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) + for bip113tx in [bip113signed1, bip113signed2]: + yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # Next block height = 580 after 4 blocks of random version + test_blocks = self.generate_blocks(4, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 13 + + ### BIP 68 ### + ### Version 1 txs ### + # All still pass + success_txs = [] + success_txs.extend(all_rlt_txs(bip68txs_v1)) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Version 2 txs ### + bip68success_txs = [] + # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 + bip68timetxs = [] + for b25 in range(2): + for b18 in range(2): + bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) + for tx in bip68timetxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 + bip68heighttxs = [] + for b25 in range(2): + for b18 in range(2): + bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) + for tx in bip68heighttxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 + + # Advance one block to 581 + test_blocks = self.generate_blocks(1, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 24 + + # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 + bip68success_txs.extend(bip68timetxs) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + for tx in bip68heighttxs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 + + # Advance one block to 582 + test_blocks = self.generate_blocks(1, 1234) + yield TestInstance(test_blocks, sync_every_block=False) # 30 + + # All BIP 68 txs should pass + bip68success_txs.extend(bip68heighttxs) + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + + ### BIP 112 ### + ### Version 1 txs ### + # -1 OP_CSV tx should fail + yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass + success_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) + success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) + yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail + fail_txs = [] + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) + fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) + + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 + + ### Version 2 txs ### + # -1 OP_CSV tx should fail + yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) + success_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV + success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 + + yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## + # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check + fail_txs = [] + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 + + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 + + # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail + fail_txs = [] + for b25 in range(2): + for b22 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 + + # If sequencelock types mismatch, tx should fail + fail_txs = [] + for b25 in range(2): + for b18 in range(2): + fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence + fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV + for tx in fail_txs: + yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 + + # Remaining txs should pass, just test masking works properly + success_txs = [] + for b25 in range(2): + for b18 in range(2): + success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence + success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV + yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + # Additional test, of checking that comparison of two time types works properly + time_txs = [] + for b25 in range(2): + for b18 in range(2): + tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] + tx.vin[0].nSequence = base_relative_locktime | seq_type_flag + signtx = self.sign_transaction(self.nodes[0], tx) + time_txs.append(signtx) + yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + + ### Missing aspects of test + ## Testing empty stack fails + + +if __name__ == '__main__': + BIP68_112_113Test().main() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py new file mode 100755 index 0000000000..24b9765b4e --- /dev/null +++ b/test/functional/feature_dbcrash.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test recovery from a crash during chainstate writing. + +- 4 nodes + * node0, node1, and node2 will have different dbcrash ratios, and different + dbcache sizes + * node3 will be a regular node, with no crashing. + * The nodes will not connect to each other. + +- use default test framework starting chain. initialize starting_tip_height to + tip height. + +- Main loop: + * generate lots of transactions on node3, enough to fill up a block. + * uniformly randomly pick a tip height from starting_tip_height to + tip_height; with probability 1/(height_difference+4), invalidate this block. + * mine enough blocks to overtake tip_height at start of loop. + * for each node in [node0,node1,node2]: + - for each mined block: + * submit block to node + * if node crashed on/after submitting: + - restart until recovery succeeds + - check that utxo matches node3 using gettxoutsetinfo""" + +import errno +import http.client +import random +import sys +import time + +from test_framework.mininode import * +from test_framework.script import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest] +try: + HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected) +except AttributeError: + pass + +class ChainstateWriteCrashTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + self.setup_clean_chain = False + + # Set -maxmempool=0 to turn off mempool memory sharing with dbcache + # Set -rpcservertimeout=900 to reduce socket disconnects in this + # long-running test + self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"] + + # Set different crash ratios and cache sizes. Note that not all of + # -dbcache goes to pcoinsTip. + self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args + self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args + self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args + + # Node3 is a normal node with default args, except will mine full blocks + self.node3_args = ["-blockmaxweight=4000000"] + self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] + + def setup_network(self): + # Need a bit of extra time for the nodes to start up for this test + self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90) + self.start_nodes() + # Leave them unconnected, we'll use submitblock directly in this test + + def restart_node(self, node_index, expected_tip): + """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. + + Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up + after 60 seconds. Returns the utxo hash of the given node.""" + + time_start = time.time() + while time.time() - time_start < 120: + try: + # Any of these RPC calls could throw due to node crash + self.start_node(node_index) + self.nodes[node_index].waitforblock(expected_tip) + utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2'] + return utxo_hash + except: + # An exception here should mean the node is about to crash. + # If bitcoind exits, then try again. wait_for_node_exit() + # should raise an exception if bitcoind doesn't exit. + self.wait_for_node_exit(node_index, timeout=10) + self.crashed_on_restart += 1 + time.sleep(1) + + # If we got here, bitcoind isn't coming back up on restart. Could be a + # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- + # perhaps we generated a test case that blew up our cache? + # TODO: If this happens a lot, we should try to restart without -dbcrashratio + # and make sure that recovery happens. + raise AssertionError("Unable to successfully restart node %d in allotted time", node_index) + + def submit_block_catch_error(self, node_index, block): + """Try submitting a block to the given node. + + Catch any exceptions that indicate the node has crashed. + Returns true if the block was submitted successfully; false otherwise.""" + + try: + self.nodes[node_index].submitblock(block) + return True + except http.client.BadStatusLine as e: + # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. + if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": + self.log.debug("node %d submitblock raised exception: %s", node_index, e) + return False + else: + raise + except tuple(HTTP_DISCONNECT_ERRORS) as e: + self.log.debug("node %d submitblock raised exception: %s", node_index, e) + return False + except OSError as e: + self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno) + if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: + # The node has likely crashed + return False + else: + # Unexpected exception, raise + raise + + def sync_node3blocks(self, block_hashes): + """Use submitblock to sync node3's chain with the other nodes + + If submitblock fails, restart the node and get the new utxo hash. + If any nodes crash while updating, we'll compare utxo hashes to + ensure recovery was successful.""" + + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] + + # Retrieve all the blocks from node3 + blocks = [] + for block_hash in block_hashes: + blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)]) + + # Deliver each block to each other node + for i in range(3): + nodei_utxo_hash = None + self.log.debug("Syncing blocks to node %d", i) + for (block_hash, block) in blocks: + # Get the block from node3, and submit to node_i + self.log.debug("submitting block %s", block_hash) + if not self.submit_block_catch_error(i, block): + # TODO: more carefully check that the crash is due to -dbcrashratio + # (change the exit code perhaps, and check that here?) + self.wait_for_node_exit(i, timeout=30) + self.log.debug("Restarting node %d after block hash %s", i, block_hash) + nodei_utxo_hash = self.restart_node(i, block_hash) + assert nodei_utxo_hash is not None + self.restart_counts[i] += 1 + else: + # Clear it out after successful submitblock calls -- the cached + # utxo hash will no longer be correct + nodei_utxo_hash = None + + # Check that the utxo hash matches node3's utxo set + # NOTE: we only check the utxo set if we had to restart the node + # after the last block submitted: + # - checking the utxo hash causes a cache flush, which we don't + # want to do every time; so + # - we only update the utxo cache after a node restart, since flushing + # the cache is a no-op at that point + if nodei_utxo_hash is not None: + self.log.debug("Checking txoutsetinfo matches for node %d", i) + assert_equal(nodei_utxo_hash, node3_utxo_hash) + + def verify_utxo_hash(self): + """Verify that the utxo hash of each node matches node3. + + Restart any nodes that crash while querying.""" + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2'] + self.log.info("Verifying utxo hash matches for all nodes") + + for i in range(3): + try: + nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2'] + except OSError: + # probably a crash on db flushing + nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) + assert_equal(nodei_utxo_hash, node3_utxo_hash) + + def generate_small_transactions(self, node, count, utxo_list): + FEE = 1000 # TODO: replace this with node relay fee based calculation + num_transactions = 0 + random.shuffle(utxo_list) + while len(utxo_list) >= 2 and num_transactions < count: + tx = CTransaction() + input_amount = 0 + for i in range(2): + utxo = utxo_list.pop() + tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) + input_amount += int(utxo['amount'] * COIN) + output_amount = (input_amount - FEE) // 3 + + if output_amount <= 0: + # Sanity check -- if we chose inputs that are too small, skip + continue + + for i in range(3): + tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) + + # Sign and send the transaction to get into the mempool + tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] + node.sendrawtransaction(tx_signed_hex) + num_transactions += 1 + + def run_test(self): + # Track test coverage statistics + self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 + self.crashed_on_restart = 0 # Track count of crashes during recovery + + # Start by creating a lot of utxos on node3 + initial_height = self.nodes[3].getblockcount() + utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000) + self.log.info("Prepped %d utxo entries", len(utxo_list)) + + # Sync these blocks with the other nodes + block_hashes_to_sync = [] + for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): + block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) + + self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync)) + # Syncing the blocks could cause nodes to crash, so the test begins here. + self.sync_node3blocks(block_hashes_to_sync) + + starting_tip_height = self.nodes[3].getblockcount() + + # Main test loop: + # each time through the loop, generate a bunch of transactions, + # and then either mine a single new block on the tip, or some-sized reorg. + for i in range(40): + self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts) + # Generate a bunch of small-ish transactions + self.generate_small_transactions(self.nodes[3], 2500, utxo_list) + # Pick a random block between current tip, and starting tip + current_height = self.nodes[3].getblockcount() + random_height = random.randint(starting_tip_height, current_height) + self.log.debug("At height %d, considering height %d", current_height, random_height) + if random_height > starting_tip_height: + # Randomly reorg from this point with some probability (1/4 for + # tip, 1/5 for tip-1, ...) + if random.random() < 1.0 / (current_height + 4 - random_height): + self.log.debug("Invalidating block at height %d", random_height) + self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) + + # Now generate new blocks until we pass the old tip height + self.log.debug("Mining longer tip") + block_hashes = [] + while current_height + 1 > self.nodes[3].getblockcount(): + block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount()))) + self.log.debug("Syncing %d new blocks...", len(block_hashes)) + self.sync_node3blocks(block_hashes) + utxo_list = self.nodes[3].listunspent() + self.log.debug("Node3 utxo count: %d", len(utxo_list)) + + # Check that the utxo hashes agree with node3 + # Useful side effect: each utxo cache gets flushed here, so that we + # won't get crashes on shutdown at the end of the test. + self.verify_utxo_hash() + + # Check the test coverage + self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart) + + # If no nodes were restarted, we didn't test anything. + assert self.restart_counts != [0, 0, 0] + + # Make sure we tested the case of crash-during-recovery. + assert self.crashed_on_restart > 0 + + # Warn if any of the nodes escaped restart. + for i in range(3): + if self.restart_counts[i] == 0: + self.log.warn("Node %d never crashed during utxo flush!", i) + +if __name__ == "__main__": + ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py new file mode 100755 index 0000000000..3414571678 --- /dev/null +++ b/test/functional/feature_dersig.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test BIP66 (DER SIG). + +Test that the DERSIG soft-fork activates at (regtest) height 1251. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import * +from test_framework.blocktools import create_coinbase, create_block +from test_framework.script import CScript +from io import BytesIO + +DERSIG_HEIGHT = 1251 + +# Reject codes that we might receive in this test +REJECT_INVALID = 16 +REJECT_OBSOLETE = 17 +REJECT_NONSTANDARD = 64 + +# A canonical signature consists of: +# <30> <02> <02> +def unDERify(tx): + """ + Make the signature in vin 0 of a tx non-DER-compliant, + by adding padding after the S-value. + """ + scriptSig = CScript(tx.vin[0].scriptSig) + newscript = [] + for i in scriptSig: + if (len(newscript) == 0): + newscript.append(i[0:-1] + b'\0' + i[-1:]) + else: + newscript.append(i) + tx.vin[0].scriptSig = CScript(newscript) + +def create_transaction(node, coinbase, to_address, amount): + from_txid = node.getblock(coinbase)['tx'][0] + inputs = [{ "txid" : from_txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex']))) + return tx + +class BIP66Test(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] + self.setup_clean_chain = True + + def run_test(self): + self.nodes[0].add_p2p_connection(P2PInterface()) + + network_thread_start() + + # wait_for_verack ensures that the P2P connection is fully up. + self.nodes[0].p2p.wait_for_verack() + + self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) + self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) + self.nodeaddress = self.nodes[0].getnewaddress() + + self.log.info("Test that a transaction with non-DER signature can still appear in a block") + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], + self.nodeaddress, 1.0) + unDERify(spendtx) + spendtx.rehash() + + tip = self.nodes[0].getbestblockhash() + block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 + block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) + block.nVersion = 2 + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(self.nodes[0].getbestblockhash(), block.hash) + + self.log.info("Test that blocks must now be at least version 3") + tip = block.sha256 + block_time += 1 + block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) + block.nVersion = 2 + block.rehash() + block.solve() + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + del self.nodes[0].p2p.last_message["reject"] + + self.log.info("Test that transactions with non-DER signatures cannot appear in a block") + block.nVersion = 3 + + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], + self.nodeaddress, 1.0) + unDERify(spendtx) + spendtx.rehash() + + # First we show that this tx is valid except for DERSIG by getting it + # accepted to the mempool (which we can achieve with + # -promiscuousmempoolflags). + self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) + assert spendtx.hash in self.nodes[0].getrawmempool() + + # Now we verify that a block with this transaction is invalid. + block.vtx.append(spendtx) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) + + wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) + with mininode_lock: + # We can receive different reject messages depending on whether + # bitcoind is running with multiple script check threads. If script + # check threads are not in use, then transaction script validation + # happens sequentially, and bitcoind produces more specific reject + # reasons. + assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] + assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) + if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: + # Generic rejection when a block is invalid + assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') + else: + assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason + + self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") + block.vtx[1] = create_transaction(self.nodes[0], + self.coinbase_blocks[1], self.nodeaddress, 1.0) + block.hashMerkleRoot = block.calc_merkle_root() + block.rehash() + block.solve() + + self.nodes[0].p2p.send_and_ping(msg_block(block)) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + +if __name__ == '__main__': + BIP66Test().main() diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py new file mode 100755 index 0000000000..68453e50f4 --- /dev/null +++ b/test/functional/feature_fee_estimation.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test fee estimation code.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN + +# Construct 2 trivial P2SH's and the ScriptSigs that spend them +# So we can create many transactions without needing to spend +# time signing. +redeem_script_1 = CScript([OP_1, OP_DROP]) +redeem_script_2 = CScript([OP_2, OP_DROP]) +P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL]) +P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL]) + +# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 +SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] + +global log + +def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): + """ + Create and send a transaction with a random fee. + The transaction pays to a trivial P2SH script, and assumes that its inputs + are of the same form. + The function takes a list of confirmed outputs and unconfirmed outputs + and attempts to use the confirmed list first for its inputs. + It adds the newly created outputs to the unconfirmed list. + Returns (raw transaction, fee) + """ + # It's best to exponentially distribute our random fees + # because the buckets are exponentially spaced. + # Exponentially distributed from 1-128 * fee_increment + rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) + # Total fee ranges from min_fee to min_fee + 127*fee_increment + fee = min_fee - fee_increment + satoshi_round(rand_fee) + tx = CTransaction() + total_in = Decimal("0.00000000") + while total_in <= (amount + fee) and len(conflist) > 0: + t = conflist.pop(0) + total_in += t["amount"] + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) + if total_in <= amount + fee: + while total_in <= (amount + fee) and len(unconflist) > 0: + t = unconflist.pop(0) + total_in += t["amount"] + tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) + if total_in <= amount + fee: + raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) + tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(amount*COIN), P2SH_2)) + # These transactions don't need to be signed, but we still have to insert + # the ScriptSig that will satisfy the ScriptPubKey. + for inp in tx.vin: + inp.scriptSig = SCRIPT_SIG[inp.prevout.n] + txid = from_node.sendrawtransaction(ToHex(tx), True) + unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) + unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) + + return (ToHex(tx), fee) + +def split_inputs(from_node, txins, txouts, initial_split = False): + """ + We need to generate a lot of inputs so we can generate a ton of transactions. + This function takes an input from txins, and creates and sends a transaction + which splits the value into 2 outputs which are appended to txouts. + Previously this was designed to be small inputs so they wouldn't have + a high coin age when the notion of priority still existed. + """ + prevtxout = txins.pop() + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b"")) + + half_change = satoshi_round(prevtxout["amount"]/2) + rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") + tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1)) + tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2)) + + # If this is the initial split we actually need to sign the transaction + # Otherwise we just need to insert the proper ScriptSig + if (initial_split) : + completetx = from_node.signrawtransaction(ToHex(tx))["hex"] + else : + tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] + completetx = ToHex(tx) + txid = from_node.sendrawtransaction(completetx, True) + txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) + txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) + +def check_estimates(node, fees_seen, max_invalid, print_estimates = True): + """ + This function calls estimatefee and verifies that the estimates + meet certain invariants. + """ + all_estimates = [ node.estimatefee(i) for i in range(1,26) ] + if print_estimates: + log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) + delta = 1.0e-6 # account for rounding error + last_e = max(fees_seen) + for e in [x for x in all_estimates if x >= 0]: + # Estimates should be within the bounds of what transactions fees actually were: + if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): + raise AssertionError("Estimated fee (%f) out of range (%f,%f)" + %(float(e), min(fees_seen), max(fees_seen))) + # Estimates should be monotonically decreasing + if float(e)-delta > last_e: + raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" + %(float(e),float(last_e))) + last_e = e + valid_estimate = False + invalid_estimates = 0 + for i,e in enumerate(all_estimates): # estimate is for i+1 + if e >= 0: + valid_estimate = True + if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n) + assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta) + + else: + invalid_estimates += 1 + + # estimatesmartfee should still be valid + approx_estimate = node.estimatesmartfee(i+1)["feerate"] + answer_found = node.estimatesmartfee(i+1)["blocks"] + assert(approx_estimate > 0) + assert(answer_found > i+1) + + # Once we're at a high enough confirmation count that we can give an estimate + # We should have estimates for all higher confirmation counts + if valid_estimate: + raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") + + # Check on the expected number of different confirmation counts + # that we might not have valid estimates for + if invalid_estimates > max_invalid: + raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) + return all_estimates + + +class EstimateFeeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 3 + + def setup_network(self): + """ + We'll setup the network to have 3 nodes that all mine with different parameters. + But first we need to use one node to create a lot of outputs + which we will use to generate our transactions. + """ + self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], + ["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"], + ["-blockmaxsize=8000", "-maxorphantx=1000"]]) + # Use node0 to mine blocks for input splitting + # Node1 mines small blocks but that are bigger than the expected transaction rate. + # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, + # (17k is room enough for 110 or so transactions) + # Node2 is a stingy miner, that + # produces too small blocks (room for only 55 or so transactions) + + + def transact_and_mine(self, numblocks, mining_node): + min_fee = Decimal("0.00001") + # We will now mine numblocks blocks generating on average 100 transactions between each block + # We shuffle our confirmed txout set before each set of transactions + # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible + # resorting to tx's that depend on the mempool when those run out + for i in range(numblocks): + random.shuffle(self.confutxo) + for j in range(random.randrange(100-50,100+50)): + from_index = random.randint(1,2) + (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, + self.memutxo, Decimal("0.005"), min_fee, min_fee) + tx_kbytes = (len(txhex) // 2) / 1000.0 + self.fees_per_kb.append(float(fee)/tx_kbytes) + sync_mempools(self.nodes[0:3], wait=.1) + mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] + sync_blocks(self.nodes[0:3], wait=.1) + # update which txouts are confirmed + newmem = [] + for utx in self.memutxo: + if utx["txid"] in mined: + self.confutxo.append(utx) + else: + newmem.append(utx) + self.memutxo = newmem + + def run_test(self): + self.log.info("This test is time consuming, please be patient") + self.log.info("Splitting inputs so we can generate tx's") + + # Make log handler available to helper functions + global log + log = self.log + + # Start node0 + self.start_node(0) + self.txouts = [] + self.txouts2 = [] + # Split a coinbase into two transaction puzzle outputs + split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) + + # Mine + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + + # Repeatedly split those 2 outputs, doubling twice for each rep + # Use txouts to monitor the available utxo, since these won't be tracked in wallet + reps = 0 + while (reps < 5): + #Double txouts to txouts2 + while (len(self.txouts)>0): + split_inputs(self.nodes[0], self.txouts, self.txouts2) + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + #Double txouts2 to txouts + while (len(self.txouts2)>0): + split_inputs(self.nodes[0], self.txouts2, self.txouts) + while (len(self.nodes[0].getrawmempool()) > 0): + self.nodes[0].generate(1) + reps += 1 + self.log.info("Finished splitting") + + # Now we can connect the other nodes, didn't want to connect them earlier + # so the estimates would not be affected by the splitting transactions + self.start_node(1) + self.start_node(2) + connect_nodes(self.nodes[1], 0) + connect_nodes(self.nodes[0], 2) + connect_nodes(self.nodes[2], 1) + + self.sync_all() + + self.fees_per_kb = [] + self.memutxo = [] + self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting + self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") + + for i in range(2): + self.log.info("Creating transactions and mining them with a block size that can't keep up") + # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine + self.transact_and_mine(10, self.nodes[2]) + check_estimates(self.nodes[1], self.fees_per_kb, 14) + + self.log.info("Creating transactions and mining them at a block size that is just big enough") + # Generate transactions while mining 10 more blocks, this time with node1 + # which mines blocks with capacity just above the rate that transactions are being created + self.transact_and_mine(10, self.nodes[1]) + check_estimates(self.nodes[1], self.fees_per_kb, 2) + + # Finish by mining a normal-sized block: + while len(self.nodes[1].getrawmempool()) > 0: + self.nodes[1].generate(1) + + sync_blocks(self.nodes[0:3], wait=.1) + self.log.info("Final estimates after emptying mempools") + check_estimates(self.nodes[1], self.fees_per_kb, 2) + +if __name__ == '__main__': + EstimateFeeTest().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py new file mode 100755 index 0000000000..45336ee801 --- /dev/null +++ b/test/functional/feature_maxuploadtarget.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test behavior of -maxuploadtarget. + +* Verify that getdata requests for old blocks (>1week) are dropped +if uploadtarget has been reached. +* Verify that getdata requests for recent blocks are respecteved even +if uploadtarget has been reached. +* Verify that the upload counters are reset after 24 hours. +""" +from collections import defaultdict +import time + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class TestNode(P2PInterface): + def __init__(self): + super().__init__() + self.block_receive_map = defaultdict(int) + + def on_inv(self, message): + pass + + def on_block(self, message): + message.block.calc_sha256() + self.block_receive_map[message.block.sha256] += 1 + +class MaxUploadTest(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-maxuploadtarget=800", "-blockmaxsize=999000"]] + + # Cache for utxos, as the listunspent may take a long time later in the test + self.utxo_cache = [] + + def run_test(self): + # Before we connect anything, we first set the time on the node + # to be in the past, otherwise things break because the CNode + # time counters can't be reset backward after initialization + old_time = int(time.time() - 2*60*60*24*7) + self.nodes[0].setmocktime(old_time) + + # Generate some old blocks + self.nodes[0].generate(130) + + # p2p_conns[0] will only request old blocks + # p2p_conns[1] will only request new blocks + # p2p_conns[2] will test resetting the counters + p2p_conns = [] + + for _ in range(3): + p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) + + network_thread_start() + for p2pc in p2p_conns: + p2pc.wait_for_verack() + + # Test logic begins here + + # Now mine a big block + mine_large_block(self.nodes[0], self.utxo_cache) + + # Store the hash; we'll request this later + big_old_block = self.nodes[0].getbestblockhash() + old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] + big_old_block = int(big_old_block, 16) + + # Advance to two days ago + self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) + + # Mine one more block, so that the prior block looks old + mine_large_block(self.nodes[0], self.utxo_cache) + + # We'll be requesting this new block too + big_new_block = self.nodes[0].getbestblockhash() + big_new_block = int(big_new_block, 16) + + # p2p_conns[0] will test what happens if we just keep requesting the + # the same big old block too many times (expect: disconnect) + + getdata_request = msg_getdata() + getdata_request.inv.append(CInv(2, big_old_block)) + + max_bytes_per_day = 800*1024*1024 + daily_buffer = 144 * 4000000 + max_bytes_available = max_bytes_per_day - daily_buffer + success_count = max_bytes_available // old_block_size + + # 576MB will be reserved for relaying new blocks, so expect this to + # succeed for ~235 tries. + for i in range(success_count): + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].sync_with_ping() + assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) + + assert_equal(len(self.nodes[0].getpeerinfo()), 3) + # At most a couple more tries should succeed (depending on how long + # the test has been running so far). + for i in range(3): + p2p_conns[0].send_message(getdata_request) + p2p_conns[0].wait_for_disconnect() + assert_equal(len(self.nodes[0].getpeerinfo()), 2) + self.log.info("Peer 0 disconnected after downloading old block too many times") + + # Requesting the current block on p2p_conns[1] should succeed indefinitely, + # even when over the max upload target. + # We'll try 800 times + getdata_request.inv = [CInv(2, big_new_block)] + for i in range(800): + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].sync_with_ping() + assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) + + self.log.info("Peer 1 able to repeatedly download new block") + + # But if p2p_conns[1] tries for an old block, it gets disconnected too. + getdata_request.inv = [CInv(2, big_old_block)] + p2p_conns[1].send_message(getdata_request) + p2p_conns[1].wait_for_disconnect() + assert_equal(len(self.nodes[0].getpeerinfo()), 1) + + self.log.info("Peer 1 disconnected after trying to download old block") + + self.log.info("Advancing system time on node to clear counters...") + + # If we advance the time by 24 hours, then the counters should reset, + # and p2p_conns[2] should be able to retrieve the old block. + self.nodes[0].setmocktime(int(time.time())) + p2p_conns[2].sync_with_ping() + p2p_conns[2].send_message(getdata_request) + p2p_conns[2].sync_with_ping() + assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) + + self.log.info("Peer 2 able to download old block") + + self.nodes[0].disconnect_p2ps() + + #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 + self.log.info("Restarting nodes with -whitelist=127.0.0.1") + self.stop_node(0) + self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) + + # Reconnect to self.nodes[0] + self.nodes[0].add_p2p_connection(TestNode()) + + network_thread_start() + self.nodes[0].p2p.wait_for_verack() + + #retrieve 20 blocks which should be enough to break the 1MB limit + getdata_request.inv = [CInv(2, big_new_block)] + for i in range(20): + self.nodes[0].p2p.send_message(getdata_request) + self.nodes[0].p2p.sync_with_ping() + assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) + + getdata_request.inv = [CInv(2, big_old_block)] + self.nodes[0].p2p.send_and_ping(getdata_request) + assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist + + self.log.info("Peer still connected after trying to download old block (whitelisted)") + +if __name__ == '__main__': + MaxUploadTest().main() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py new file mode 100755 index 0000000000..90a3de0e0d --- /dev/null +++ b/test/functional/feature_minchainwork.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test logic for setting nMinimumChainWork on command line. + +Nodes don't consider themselves out of "initial block download" until +their active chain has more work than nMinimumChainWork. + +Nodes don't download blocks from a peer unless the peer's best known block +has more work than nMinimumChainWork. + +While in initial block download, nodes won't relay blocks to their peers, so +test that this parameter functions as intended by verifying that block relay +only succeeds past a given node once its nMinimumChainWork has been exceeded. +""" + +import time + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import connect_nodes, assert_equal + +# 2 hashes required per regtest block (with no difficulty adjustment) +REGTEST_WORK_PER_BLOCK = 2 + +class MinimumChainWorkTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + + self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] + self.node_min_work = [0, 101, 101] + + def setup_network(self): + # This test relies on the chain setup being: + # node0 <- node1 <- node2 + # Before leaving IBD, nodes prefer to download blocks from outbound + # peers, so ensure that we're mining on an outbound peer and testing + # block relay to inbound peers. + self.setup_nodes() + for i in range(self.num_nodes-1): + connect_nodes(self.nodes[i+1], i) + + def run_test(self): + # Start building a chain on node0. node2 shouldn't be able to sync until node1's + # minchainwork is exceeded + starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work + self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) + + starting_blockcount = self.nodes[2].getblockcount() + + num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) + self.log.info("Generating %d blocks on node0", num_blocks_to_generate) + hashes = self.nodes[0].generate(num_blocks_to_generate) + + self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) + + # Sleep a few seconds and verify that node2 didn't get any new blocks + # or headers. We sleep, rather than sync_blocks(node0, node1) because + # it's reasonable either way for node1 to get the blocks, or not get + # them (since they're below node1's minchainwork). + time.sleep(3) + + self.log.info("Verifying node 2 has no more blocks than before") + self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + # Node2 shouldn't have any new headers yet, because node1 should not + # have relayed anything. + assert_equal(len(self.nodes[2].getchaintips()), 1) + assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) + + assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() + assert_equal(self.nodes[2].getblockcount(), starting_blockcount) + + self.log.info("Generating one more block") + self.nodes[0].generate(1) + + self.log.info("Verifying nodes are all synced") + + # Because nodes in regtest are all manual connections (eg using + # addnode), node1 should not have disconnected node0. If not for that, + # we'd expect node1 to have disconnected node0 for serving an + # insufficient work chain, in which case we'd need to reconnect them to + # continue the test. + + self.sync_all() + self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) + +if __name__ == '__main__': + MinimumChainWorkTest().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py new file mode 100755 index 0000000000..980bef5fc8 --- /dev/null +++ b/test/functional/feature_notifications.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the -alertnotify, -blocknotify and -walletnotify options.""" +import os + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, wait_until, connect_nodes_bi + +class NotificationsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + + def setup_network(self): + self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") + self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") + self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt") + + # -alertnotify and -blocknotify on node0, walletnotify on node1 + self.extra_args = [["-blockversion=2", + "-alertnotify=echo %%s >> %s" % self.alert_filename, + "-blocknotify=echo %%s >> %s" % self.block_filename], + ["-blockversion=211", + "-rescan", + "-walletnotify=echo %%s >> %s" % self.tx_filename]] + super().setup_network() + + def run_test(self): + self.log.info("test -blocknotify") + block_count = 10 + blocks = self.nodes[1].generate(block_count) + + # wait at most 10 seconds for expected file size before reading the content + wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated blocks hashes + with open(self.block_filename, 'r') as f: + assert_equal(sorted(blocks), sorted(f.read().splitlines())) + + self.log.info("test -walletnotify") + # wait at most 10 seconds for expected file size before reading the content + wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated transaction hashes + txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) + with open(self.tx_filename, 'r') as f: + assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) + os.remove(self.tx_filename) + + self.log.info("test -walletnotify after rescan") + # restart node to rescan to force wallet notifications + self.restart_node(1) + connect_nodes_bi(self.nodes, 0, 1) + + wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) + + # file content should equal the generated transaction hashes + txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) + with open(self.tx_filename, 'r') as f: + assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) + + # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st. + self.log.info("test -alertnotify") + self.nodes[1].generate(41) + self.sync_all() + + # Give bitcoind 10 seconds to write the alert notification + wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) + + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text = f.read() + + # Mine more up-version blocks, should not get more alerts: + self.nodes[1].generate(2) + self.sync_all() + + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text2 = f.read() + + self.log.info("-alertnotify should not continue notifying for more unknown version blocks") + assert_equal(alert_text, alert_text2) + +if __name__ == '__main__': + NotificationsTest().main() diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py new file mode 100755 index 0000000000..e4f413cc2a --- /dev/null +++ b/test/functional/feature_nulldummy.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test NULLDUMMY softfork. + +Connect to a single node. +Generate 2 blocks (save the coinbases for later). +Generate 427 more blocks. +[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. +[Policy] Check that non-NULLDUMMY transactions are rejected before activation. +[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. +[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import CTransaction, network_thread_start +from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment +from test_framework.script import CScript +from io import BytesIO +import time + +NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" + +def trueDummy(tx): + scriptSig = CScript(tx.vin[0].scriptSig) + newscript = [] + for i in scriptSig: + if (len(newscript) == 0): + assert(len(i) == 0) + newscript.append(b'\x51') + else: + newscript.append(i) + tx.vin[0].scriptSig = CScript(newscript) + tx.rehash() + +class NULLDUMMYTest(BitcoinTestFramework): + + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through + # normal segwit activation here (and don't use the default always-on behaviour). + self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]] + + def run_test(self): + self.address = self.nodes[0].getnewaddress() + self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])['address'] + self.wit_address = self.nodes[0].addwitnessaddress(self.address) + self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address'] + + network_thread_start() + self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 + coinbase_txid = [] + for i in self.coinbase_blocks: + coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) + self.nodes[0].generate(427) # Block 429 + self.lastblockhash = self.nodes[0].getbestblockhash() + self.tip = int("0x" + self.lastblockhash, 0) + self.lastblockheight = 429 + self.lastblocktime = int(time.time()) + 429 + + self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") + test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] + txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True) + test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) + txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True) + test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) + txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True) + self.block_submit(self.nodes[0], test1txs, False, True) + + self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") + test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) + trueDummy(test2tx) + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) + + self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") + self.block_submit(self.nodes[0], [test2tx], False, True) + + self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") + test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46) + test6txs=[CTransaction(test4tx)] + trueDummy(test4tx) + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True) + self.block_submit(self.nodes[0], [test4tx]) + + self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") + test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) + test6txs.append(CTransaction(test5tx)) + test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' + assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True) + self.block_submit(self.nodes[0], [test5tx], True) + + self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]") + for i in test6txs: + self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True) + self.block_submit(self.nodes[0], test6txs, True, True) + + + def create_transaction(self, node, txid, to_address, amount): + inputs = [{ "txid" : txid, "vout" : 0}] + outputs = { to_address : amount } + rawtx = node.createrawtransaction(inputs, outputs) + signresult = node.signrawtransaction(rawtx) + tx = CTransaction() + f = BytesIO(hex_str_to_bytes(signresult['hex'])) + tx.deserialize(f) + return tx + + + def block_submit(self, node, txs, witness = False, accept = False): + block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) + block.nVersion = 4 + for tx in txs: + tx.rehash() + block.vtx.append(tx) + block.hashMerkleRoot = block.calc_merkle_root() + witness and add_witness_commitment(block) + block.rehash() + block.solve() + node.submitblock(bytes_to_hex_str(block.serialize(True))) + if (accept): + assert_equal(node.getbestblockhash(), block.hash) + self.tip = block.sha256 + self.lastblockhash = block.hash + self.lastblocktime += 1 + self.lastblockheight += 1 + else: + assert_equal(node.getbestblockhash(), self.lastblockhash) + +if __name__ == '__main__': + NULLDUMMYTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py new file mode 100755 index 0000000000..2eb1be47a5 --- /dev/null +++ b/test/functional/feature_proxy.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test bitcoind with different proxy configuration. + +Test plan: +- Start bitcoind's with different proxy configurations +- Use addnode to initiate connections +- Verify that proxies are connected to, and the right connection command is given +- Proxy configurations to test on bitcoind side: + - `-proxy` (proxy everything) + - `-onion` (proxy just onions) + - `-proxyrandomize` Circuit randomization +- Proxy configurations to test on proxy side, + - support no authentication (other proxy) + - support no authentication + user/pass authentication (Tor) + - proxy on IPv6 + +- Create various proxies (as threads) +- Create bitcoinds that connect to them +- Manipulate the bitcoinds using addnode (onetry) an observe effects + +addnode connect to IPv4 +addnode connect to IPv6 +addnode connect to onion +addnode connect to generic DNS name +""" + +import socket +import os + +from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + PORT_MIN, + PORT_RANGE, + assert_equal, +) +from test_framework.netutil import test_ipv6_local + +RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports + +class ProxyTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 4 + + def setup_nodes(self): + self.have_ipv6 = test_ipv6_local() + # Create two proxies on different ports + # ... one unauthenticated + self.conf1 = Socks5Configuration() + self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) + self.conf1.unauth = True + self.conf1.auth = False + # ... one supporting authenticated and unauthenticated (Tor) + self.conf2 = Socks5Configuration() + self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) + self.conf2.unauth = True + self.conf2.auth = True + if self.have_ipv6: + # ... one on IPv6 with similar configuration + self.conf3 = Socks5Configuration() + self.conf3.af = socket.AF_INET6 + self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) + self.conf3.unauth = True + self.conf3.auth = True + else: + self.log.warning("Testing without local IPv6 support") + + self.serv1 = Socks5Server(self.conf1) + self.serv1.start() + self.serv2 = Socks5Server(self.conf2) + self.serv2.start() + if self.have_ipv6: + self.serv3 = Socks5Server(self.conf3) + self.serv3.start() + + # Note: proxies are not used to connect to local nodes + # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost + args = [ + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], + ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], + [] + ] + if self.have_ipv6: + args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] + self.add_nodes(self.num_nodes, extra_args=args) + self.start_nodes() + + def node_test(self, node, proxies, auth, test_onion=True): + rv = [] + # Test: outgoing IPv4 connection through node + node.addnode("15.61.23.23:1234", "onetry") + cmd = proxies[0].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"15.61.23.23") + assert_equal(cmd.port, 1234) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + if self.have_ipv6: + # Test: outgoing IPv6 connection through node + node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") + cmd = proxies[1].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") + assert_equal(cmd.port, 5443) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + if test_onion: + # Test: outgoing onion connection through node + node.addnode("bitcoinostk4e4re.onion:8333", "onetry") + cmd = proxies[2].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + # Test: outgoing DNS name connection through node + node.addnode("node.noumenon:8333", "onetry") + cmd = proxies[3].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, b"node.noumenon") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + return rv + + def run_test(self): + # basic -proxy + self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) + + # -proxy plus -onion + self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) + + # -proxy plus -onion, -proxyrandomize + rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) + # Check that credentials as used for -proxyrandomize connections are unique + credentials = set((x.username,x.password) for x in rv) + assert_equal(len(credentials), len(rv)) + + if self.have_ipv6: + # proxy on IPv6 localhost + self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) + + def networks_dict(d): + r = {} + for x in d['networks']: + r[x['name']] = x + return r + + # test RPC getnetworkinfo + n0 = networks_dict(self.nodes[0].getnetworkinfo()) + for net in ['ipv4','ipv6','onion']: + assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n0[net]['proxy_randomize_credentials'], True) + assert_equal(n0['onion']['reachable'], True) + + n1 = networks_dict(self.nodes[1].getnetworkinfo()) + for net in ['ipv4','ipv6']: + assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n1[net]['proxy_randomize_credentials'], False) + assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n1['onion']['proxy_randomize_credentials'], False) + assert_equal(n1['onion']['reachable'], True) + + n2 = networks_dict(self.nodes[2].getnetworkinfo()) + for net in ['ipv4','ipv6','onion']: + assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n2[net]['proxy_randomize_credentials'], True) + assert_equal(n2['onion']['reachable'], True) + + if self.have_ipv6: + n3 = networks_dict(self.nodes[3].getnetworkinfo()) + for net in ['ipv4','ipv6']: + assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) + assert_equal(n3[net]['proxy_randomize_credentials'], False) + assert_equal(n3['onion']['reachable'], False) + +if __name__ == '__main__': + ProxyTest().main() + diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py new file mode 100755 index 0000000000..49ad7f838c --- /dev/null +++ b/test/functional/feature_pruning.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the pruning code. + +WARNING: +This test uses 4GB of disk space. +This test takes 30 mins or more (up to 2 hours) +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import time +import os + +MIN_BLOCKS_TO_KEEP = 288 + +# Rescans start at the earliest block up to 2 hours before a key timestamp, so +# the manual prune RPC avoids pruning blocks in the same window to be +# compatible with pruning based on key creation time. +TIMESTAMP_WINDOW = 2 * 60 * 60 + + +def calc_usage(blockdir): + return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) + +class PruneTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 6 + + # Create nodes 0 and 1 to mine. + # Create node 2 to test pruning. + self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] + # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) + # Create nodes 5 to test wallet in prune mode, but do not connect + self.extra_args = [self.full_node_default_args, + self.full_node_default_args, + ["-maxreceivebuffer=20000", "-prune=550"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-prune=550"]] + + def setup_network(self): + self.setup_nodes() + + self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" + + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[1], 2) + connect_nodes(self.nodes[2], 0) + connect_nodes(self.nodes[0], 3) + connect_nodes(self.nodes[0], 4) + sync_blocks(self.nodes[0:5]) + + def setup_nodes(self): + self.add_nodes(self.num_nodes, self.extra_args, timewait=900) + self.start_nodes() + + def create_big_chain(self): + # Start by creating some coinbases we can spend later + self.nodes[1].generate(200) + sync_blocks(self.nodes[0:2]) + self.nodes[0].generate(150) + # Then mine enough full blocks to create more than 550MiB of data + for i in range(645): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + sync_blocks(self.nodes[0:5]) + + def test_height_min(self): + if not os.path.isfile(self.prunedir+"blk00000.dat"): + raise AssertionError("blk00000.dat is missing, pruning too early") + self.log.info("Success") + self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) + self.log.info("Mining 25 more blocks should cause the first block file to be pruned") + # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this + for i in range(25): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + waitstart = time.time() + while os.path.isfile(self.prunedir+"blk00000.dat"): + time.sleep(0.1) + if time.time() - waitstart > 30: + raise AssertionError("blk00000.dat not pruned when it should be") + + self.log.info("Success") + usage = calc_usage(self.prunedir) + self.log.info("Usage should be below target: %d" % usage) + if (usage > 550): + raise AssertionError("Pruning target not being met") + + def create_chain_with_staleblocks(self): + # Create stale blocks in manageable sized chunks + self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") + + for j in range(12): + # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain + # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects + # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine + self.stop_node(0) + self.start_node(0, extra_args=self.full_node_default_args) + # Mine 24 blocks in node 1 + for i in range(24): + if j == 0: + mine_large_block(self.nodes[1], self.utxo_cache_1) + else: + # Add node1's wallet transactions back to the mempool, to + # avoid the mined blocks from being too small. + self.nodes[1].resendwallettransactions() + self.nodes[1].generate(1) #tx's already in mempool from previous disconnects + + # Reorg back with 25 block chain from node 0 + for i in range(25): + mine_large_block(self.nodes[0], self.utxo_cache_0) + + # Create connections in the order so both nodes can see the reorg at the same time + connect_nodes(self.nodes[1], 0) + connect_nodes(self.nodes[2], 0) + sync_blocks(self.nodes[0:3]) + + self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) + + def reorg_test(self): + # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip + # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain + # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) + # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) + self.stop_node(1) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + + height = self.nodes[1].getblockcount() + self.log.info("Current block height: %d" % height) + + invalidheight = height-287 + badhash = self.nodes[1].getblockhash(invalidheight) + self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) + self.nodes[1].invalidateblock(badhash) + + # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want + # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) + mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + while curhash != mainchainhash: + self.nodes[1].invalidateblock(curhash) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + + assert(self.nodes[1].getblockcount() == invalidheight - 1) + self.log.info("New best height: %d" % self.nodes[1].getblockcount()) + + # Reboot node1 to clear those giant tx's from mempool + self.stop_node(1) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) + + self.log.info("Generating new longer chain of 300 more blocks") + self.nodes[1].generate(300) + + self.log.info("Reconnect nodes") + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[2], 1) + sync_blocks(self.nodes[0:3], timeout=120) + + self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) + self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) + + self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") + + # Get node0's wallet transactions back in its mempool, to avoid the + # mined blocks from being too small. + self.nodes[0].resendwallettransactions() + + for i in range(22): + # This can be slow, so do this in multiple RPC calls to avoid + # RPC timeouts. + self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects + sync_blocks(self.nodes[0:3], timeout=300) + + usage = calc_usage(self.prunedir) + self.log.info("Usage should be below target: %d" % usage) + if (usage > 550): + raise AssertionError("Pruning target not being met") + + return invalidheight,badhash + + def reorg_back(self): + # Verify that a block on the old main chain fork has been pruned away + assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) + self.log.info("Will need to redownload block %d" % self.forkheight) + + # Verify that we have enough history to reorg back to the fork point + # Although this is more than 288 blocks, because this chain was written more recently + # and only its other 299 small and 220 large block are in the block files after it, + # its expected to still be retained + self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) + + first_reorg_height = self.nodes[2].getblockcount() + curchainhash = self.nodes[2].getblockhash(self.mainchainheight) + self.nodes[2].invalidateblock(curchainhash) + goalbestheight = self.mainchainheight + goalbesthash = self.mainchainhash2 + + # As of 0.10 the current block download logic is not able to reorg to the original chain created in + # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to + # redownload its missing blocks. + # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain + # because it has all the block data. + # However it must mine enough blocks to have a more work chain than the reorg_test chain in order + # to trigger node 2's block download logic. + # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg + if self.nodes[2].getblockcount() < self.mainchainheight: + blocks_to_mine = first_reorg_height + 1 - self.mainchainheight + self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) + self.nodes[0].invalidateblock(curchainhash) + assert(self.nodes[0].getblockcount() == self.mainchainheight) + assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) + goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] + goalbestheight = first_reorg_height + 1 + + self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") + waitstart = time.time() + while self.nodes[2].getblockcount() < goalbestheight: + time.sleep(0.1) + if time.time() - waitstart > 900: + raise AssertionError("Node 2 didn't reorg to proper height") + assert(self.nodes[2].getbestblockhash() == goalbesthash) + # Verify we can now have the data for a block previously pruned + assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) + + def manual_test(self, node_number, use_timestamp): + # at this point, node has 995 blocks and has not yet run in prune mode + self.start_node(node_number) + node = self.nodes[node_number] + assert_equal(node.getblockcount(), 995) + assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) + + # now re-start in manual pruning mode + self.stop_node(node_number) + self.start_node(node_number, extra_args=["-prune=1"]) + node = self.nodes[node_number] + assert_equal(node.getblockcount(), 995) + + def height(index): + if use_timestamp: + return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW + else: + return index + + def prune(index, expected_ret=None): + ret = node.pruneblockchain(height(index)) + # Check the return value. When use_timestamp is True, just check + # that the return value is less than or equal to the expected + # value, because when more than one block is generated per second, + # a timestamp will not be granular enough to uniquely identify an + # individual block. + if expected_ret is None: + expected_ret = index + if use_timestamp: + assert_greater_than(ret, 0) + assert_greater_than(expected_ret + 1, ret) + else: + assert_equal(ret, expected_ret) + + def has_block(index): + return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) + + # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) + assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) + + # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) + node.generate(6) + assert_equal(node.getblockchaininfo()["blocks"], 1001) + + # negative heights should raise an exception + assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) + + # height=100 too low to prune first block file so this is a no-op + prune(100) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # Does nothing + node.pruneblockchain(height(0)) + if not has_block(0): + raise AssertionError("blk00000.dat is missing when should still be there") + + # height=500 should prune first file + prune(500) + if has_block(0): + raise AssertionError("blk00000.dat is still there, should be pruned by now") + if not has_block(1): + raise AssertionError("blk00001.dat is missing when should still be there") + + # height=650 should prune second file + prune(650) + if has_block(1): + raise AssertionError("blk00001.dat is still there, should be pruned by now") + + # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. + prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) + if not has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + + # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) + node.generate(288) + prune(1000) + if has_block(2): + raise AssertionError("blk00002.dat is still there, should be pruned by now") + if has_block(3): + raise AssertionError("blk00003.dat is still there, should be pruned by now") + + # stop node, start back up with auto-prune at 550MB, make sure still runs + self.stop_node(node_number) + self.start_node(node_number, extra_args=["-prune=550"]) + + self.log.info("Success") + + def wallet_test(self): + # check that the pruning node's wallet is still in good shape + self.log.info("Stop and start pruning node to trigger wallet rescan") + self.stop_node(2) + self.start_node(2, extra_args=["-prune=550"]) + self.log.info("Success") + + # check that wallet loads successfully when restarting a pruned node after IBD. + # this was reported to fail in #7494. + self.log.info("Syncing node 5 to test wallet") + connect_nodes(self.nodes[0], 5) + nds = [self.nodes[0], self.nodes[5]] + sync_blocks(nds, wait=5, timeout=300) + self.stop_node(5) #stop and start to trigger rescan + self.start_node(5, extra_args=["-prune=550"]) + self.log.info("Success") + + def run_test(self): + self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") + self.log.info("Mining a big blockchain of 995 blocks") + + # Determine default relay fee + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + + # Cache for utxos, as the listunspent may take a long time later in the test + self.utxo_cache_0 = [] + self.utxo_cache_1 = [] + + self.create_big_chain() + # Chain diagram key: + # * blocks on main chain + # +,&,$,@ blocks on other forks + # X invalidated block + # N1 Node 1 + # + # Start by mining a simple chain that all nodes have + # N0=N1=N2 **...*(995) + + # stop manual-pruning node with 995 blocks + self.stop_node(3) + self.stop_node(4) + + self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") + self.test_height_min() + # Extend this chain past the PruneAfterHeight + # N0=N1=N2 **...*(1020) + + self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") + self.create_chain_with_staleblocks() + # Disconnect N0 + # And mine a 24 block chain on N1 and a separate 25 block chain on N0 + # N1=N2 **...*+...+(1044) + # N0 **...**...**(1045) + # + # reconnect nodes causing reorg on N1 and N2 + # N1=N2 **...*(1020) *...**(1045) + # \ + # +...+(1044) + # + # repeat this process until you have 12 stale forks hanging off the + # main chain on N1 and N2 + # N0 *************************...***************************(1320) + # + # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) + # \ \ \ + # +...+(1044) &.. $...$(1319) + + # Save some current chain state for later use + self.mainchainheight = self.nodes[2].getblockcount() #1320 + self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) + + self.log.info("Check that we can survive a 288 block reorg still") + (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) + # Now create a 288 block reorg by mining a longer chain on N1 + # First disconnect N1 + # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain + # N1 **...*(1020) **...**(1032)X.. + # \ + # ++...+(1031)X.. + # + # Now mine 300 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@(1332) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # Reconnect nodes and mine 220 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # N2 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ *...**(1320) + # \ \ + # ++...++(1044) .. + # + # N0 ********************(1032) @@...@@@(1552) + # \ + # *...**(1320) + + self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") + self.reorg_back() + # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) + # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to + # original main chain (*), but will require redownload of some blocks + # In order to have a peer we think we can download from, must also perform this invalidation + # on N0 and mine a new longest chain to trigger. + # Final result: + # N0 ********************(1032) **...****(1553) + # \ + # X@...@@@(1552) + # + # N2 **...*(1020) **...**(1032) **...****(1553) + # \ \ + # \ X@...@@@(1552) + # \ + # +.. + # + # N1 doesn't change because 1033 on main chain (*) is invalid + + self.log.info("Test manual pruning with block indices") + self.manual_test(3, use_timestamp=False) + + self.log.info("Test manual pruning with timestamps") + self.manual_test(4, use_timestamp=True) + + self.log.info("Test wallet re-scan") + self.wallet_test() + + self.log.info("Done") + +if __name__ == '__main__': + PruneTest().main() diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py new file mode 100755 index 0000000000..6b7ab0f43e --- /dev/null +++ b/test/functional/feature_rbf.py @@ -0,0 +1,565 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the RBF code.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.script import * +from test_framework.mininode import * + +MAX_REPLACEMENT_LIMIT = 100 + +def txToHex(tx): + return bytes_to_hex_str(tx.serialize()) + +def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): + """Create a txout with a given amount and scriptPubKey + + Mines coins as needed. + + confirmed - txouts created will be confirmed in the blockchain; + unconfirmed otherwise. + """ + fee = 1*COIN + while node.getbalance() < satoshi_round((amount + fee)/COIN): + node.generate(100) + + new_addr = node.getnewaddress() + txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) + tx1 = node.getrawtransaction(txid, 1) + txid = int(txid, 16) + i = None + + for i, txout in enumerate(tx1['vout']): + if txout['scriptPubKey']['addresses'] == [new_addr]: + break + assert i is not None + + tx2 = CTransaction() + tx2.vin = [CTxIn(COutPoint(txid, i))] + tx2.vout = [CTxOut(amount, scriptPubKey)] + tx2.rehash() + + signed_tx = node.signrawtransaction(txToHex(tx2)) + + txid = node.sendrawtransaction(signed_tx['hex'], True) + + # If requested, ensure txouts are confirmed. + if confirmed: + mempool_size = len(node.getrawmempool()) + while mempool_size > 0: + node.generate(1) + new_size = len(node.getrawmempool()) + # Error out if we have something stuck in the mempool, as this + # would likely be a bug. + assert(new_size < mempool_size) + mempool_size = new_size + + return COutPoint(int(txid, 16), 0) + +class ReplaceByFeeTest(BitcoinTestFramework): + + def set_test_params(self): + self.num_nodes = 2 + self.extra_args= [["-maxorphantx=1000", + "-whitelist=127.0.0.1", + "-limitancestorcount=50", + "-limitancestorsize=101", + "-limitdescendantcount=200", + "-limitdescendantsize=101"], + ["-mempoolreplacement=0"]] + + def run_test(self): + # Leave IBD + self.nodes[0].generate(1) + + make_utxo(self.nodes[0], 1*COIN) + + # Ensure nodes are synced + self.sync_all() + + self.log.info("Running test simple doublespend...") + self.test_simple_doublespend() + + self.log.info("Running test doublespend chain...") + self.test_doublespend_chain() + + self.log.info("Running test doublespend tree...") + self.test_doublespend_tree() + + self.log.info("Running test replacement feeperkb...") + self.test_replacement_feeperkb() + + self.log.info("Running test spends of conflicting outputs...") + self.test_spends_of_conflicting_outputs() + + self.log.info("Running test new unconfirmed inputs...") + self.test_new_unconfirmed_inputs() + + self.log.info("Running test too many replacements...") + self.test_too_many_replacements() + + self.log.info("Running test opt-in...") + self.test_opt_in() + + self.log.info("Running test RPC...") + self.test_rpc() + + self.log.info("Running test prioritised transactions...") + self.test_prioritised_transactions() + + self.log.info("Passed") + + def test_simple_doublespend(self): + """Simple doublespend""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # make_utxo may have generated a bunch of blocks, so we need to sync + # before we can spend the coins generated, or else the resulting + # transactions might not be accepted by our peers. + self.sync_all() + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + self.sync_all() + + # Should fail because we haven't changed the fee + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + # This will raise an exception due to transaction replacement being disabled + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) + + # Extra 0.1 BTC fee + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + # Replacement still disabled even with "enough fee" + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) + # Works when enabled + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + + mempool = self.nodes[0].getrawmempool() + + assert (tx1a_txid not in mempool) + assert (tx1b_txid in mempool) + + assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) + + # Second node is running mempoolreplacement=0, will not replace originally-seen txn + mempool = self.nodes[1].getrawmempool() + assert tx1a_txid in mempool + assert tx1b_txid not in mempool + + def test_doublespend_chain(self): + """Doublespend of a long chain""" + + initial_nValue = 50*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + + prevout = tx0_outpoint + remaining_value = initial_nValue + chain_txids = [] + while remaining_value > 10*COIN: + remaining_value -= 1*COIN + tx = CTransaction() + tx.vin = [CTxIn(prevout, nSequence=0)] + tx.vout = [CTxOut(remaining_value, CScript([1]))] + tx_hex = txToHex(tx) + txid = self.nodes[0].sendrawtransaction(tx_hex, True) + chain_txids.append(txid) + prevout = COutPoint(int(txid, 16), 0) + + # Whether the double-spend is allowed is evaluated by including all + # child fees - 40 BTC - so this attempt is rejected. + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + # Accepted with sufficient fee + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + self.nodes[0].sendrawtransaction(dbl_tx_hex, True) + + mempool = self.nodes[0].getrawmempool() + for doublespent_txid in chain_txids: + assert(doublespent_txid not in mempool) + + def test_doublespend_tree(self): + """Doublespend of a big tree of transactions""" + + initial_nValue = 50*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + + def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): + if _total_txs is None: + _total_txs = [0] + if _total_txs[0] >= max_txs: + return + + txout_value = (initial_value - fee) // tree_width + if txout_value < fee: + return + + vout = [CTxOut(txout_value, CScript([i+1])) + for i in range(tree_width)] + tx = CTransaction() + tx.vin = [CTxIn(prevout, nSequence=0)] + tx.vout = vout + tx_hex = txToHex(tx) + + assert(len(tx.serialize()) < 100000) + txid = self.nodes[0].sendrawtransaction(tx_hex, True) + yield tx + _total_txs[0] += 1 + + txid = int(txid, 16) + + for i, txout in enumerate(tx.vout): + for x in branch(COutPoint(txid, i), txout_value, + max_txs, + tree_width=tree_width, fee=fee, + _total_txs=_total_txs): + yield x + + fee = int(0.0001*COIN) + n = MAX_REPLACEMENT_LIMIT + tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + assert_equal(len(tree_txs), n) + + # Attempt double-spend, will fail because too little fee paid + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + # 1 BTC fee is enough + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + self.nodes[0].sendrawtransaction(dbl_tx_hex, True) + + mempool = self.nodes[0].getrawmempool() + + for tx in tree_txs: + tx.rehash() + assert (tx.hash not in mempool) + + # Try again, but with more total transactions than the "max txs + # double-spent at once" anti-DoS limit. + for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): + fee = int(0.0001*COIN) + tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + assert_equal(len(tree_txs), n) + + dbl_tx = CTransaction() + dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] + dbl_tx_hex = txToHex(dbl_tx) + # This will raise an exception + assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) + + for tx in tree_txs: + tx.rehash() + self.nodes[0].getrawtransaction(tx.hash) + + def test_replacement_feeperkb(self): + """Replacement requires fee-per-KB to be higher""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Higher fee, but the fee per KB is much lower, so the replacement is + # rejected. + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception due to insufficient fee + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + def test_spends_of_conflicting_outputs(self): + """Replacements that spend conflicting tx outputs are rejected""" + utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) + utxo2 = make_utxo(self.nodes[0], 3*COIN) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(utxo1, nSequence=0)] + tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + tx1a_txid = int(tx1a_txid, 16) + + # Direct spend an output of the transaction we're replacing. + tx2 = CTransaction() + tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] + tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) + tx2.vout = tx1a.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) + + # Spend tx1a's output to test the indirect case. + tx1b = CTransaction() + tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1b_hex = txToHex(tx1b) + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + tx1b_txid = int(tx1b_txid, 16) + + tx2 = CTransaction() + tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), + CTxIn(COutPoint(tx1b_txid, 0))] + tx2.vout = tx1a.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) + + def test_new_unconfirmed_inputs(self): + """Replacements that add new unconfirmed inputs are rejected""" + confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) + unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) + + tx1 = CTransaction() + tx1.vin = [CTxIn(confirmed_utxo)] + tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1_hex = txToHex(tx1) + self.nodes[0].sendrawtransaction(tx1_hex, True) + + tx2 = CTransaction() + tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] + tx2.vout = tx1.vout + tx2_hex = txToHex(tx2) + + # This will raise an exception + assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True) + + def test_too_many_replacements(self): + """Replacements that evict too many transactions are rejected""" + # Try directly replacing more than MAX_REPLACEMENT_LIMIT + # transactions + + # Start by creating a single transaction with many outputs + initial_nValue = 10*COIN + utxo = make_utxo(self.nodes[0], initial_nValue) + fee = int(0.0001*COIN) + split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) + + outputs = [] + for i in range(MAX_REPLACEMENT_LIMIT+1): + outputs.append(CTxOut(split_value, CScript([1]))) + + splitting_tx = CTransaction() + splitting_tx.vin = [CTxIn(utxo, nSequence=0)] + splitting_tx.vout = outputs + splitting_tx_hex = txToHex(splitting_tx) + + txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) + txid = int(txid, 16) + + # Now spend each of those outputs individually + for i in range(MAX_REPLACEMENT_LIMIT+1): + tx_i = CTransaction() + tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] + tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] + tx_i_hex = txToHex(tx_i) + self.nodes[0].sendrawtransaction(tx_i_hex, True) + + # Now create doublespend of the whole lot; should fail. + # Need a big enough fee to cover all spending transactions and have + # a higher fee rate + double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) + inputs = [] + for i in range(MAX_REPLACEMENT_LIMIT+1): + inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) + double_tx = CTransaction() + double_tx.vin = inputs + double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] + double_tx_hex = txToHex(double_tx) + + # This will raise an exception + assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True) + + # If we remove an input, it should pass + double_tx = CTransaction() + double_tx.vin = inputs[0:-1] + double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] + double_tx_hex = txToHex(double_tx) + self.nodes[0].sendrawtransaction(double_tx_hex, True) + + def test_opt_in(self): + """Replacing should only work if orig tx opted in""" + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # Create a non-opting in transaction + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Shouldn't be able to double-spend + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx1b_hex = txToHex(tx1b) + + # This will raise an exception + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + # Create a different non-opting in transaction + tx2a = CTransaction() + tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] + tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx2a_hex = txToHex(tx2a) + tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) + + # Still shouldn't be able to double-spend + tx2b = CTransaction() + tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] + tx2b_hex = txToHex(tx2b) + + # This will raise an exception + assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True) + + # Now create a new transaction that spends from tx1a and tx2a + # opt-in on one of the inputs + # Transaction should be replaceable on either input + + tx1a_txid = int(tx1a_txid, 16) + tx2a_txid = int(tx2a_txid, 16) + + tx3a = CTransaction() + tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), + CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] + tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] + tx3a_hex = txToHex(tx3a) + + self.nodes[0].sendrawtransaction(tx3a_hex, True) + + tx3b = CTransaction() + tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] + tx3b_hex = txToHex(tx3b) + + tx3c = CTransaction() + tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] + tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] + tx3c_hex = txToHex(tx3c) + + self.nodes[0].sendrawtransaction(tx3b_hex, True) + # If tx3b was accepted, tx3c won't look like a replacement, + # but make sure it is accepted anyway + self.nodes[0].sendrawtransaction(tx3c_hex, True) + + def test_prioritised_transactions(self): + # Ensure that fee deltas used via prioritisetransaction are + # correctly used by replacement logic + + # 1. Check that feeperkb uses modified fees + tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx1a = CTransaction() + tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx1a_hex = txToHex(tx1a) + tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) + + # Higher fee, but the actual fee per KB is much lower. + tx1b = CTransaction() + tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] + tx1b_hex = txToHex(tx1b) + + # Verify tx1b cannot replace tx1a. + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) + + # Use prioritisetransaction to set tx1a's fee to 0. + self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) + + # Now tx1b should be able to replace tx1a + tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) + + assert(tx1b_txid in self.nodes[0].getrawmempool()) + + # 2. Check that absolute fee checks use modified fee. + tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + + tx2a = CTransaction() + tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] + tx2a_hex = txToHex(tx2a) + self.nodes[0].sendrawtransaction(tx2a_hex, True) + + # Lower fee, but we'll prioritise it + tx2b = CTransaction() + tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] + tx2b.rehash() + tx2b_hex = txToHex(tx2b) + + # Verify tx2b cannot replace tx2a. + assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True) + + # Now prioritise tx2b to have a higher modified fee + self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) + + # tx2b should now be accepted + tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) + + assert(tx2b_txid in self.nodes[0].getrawmempool()) + + def test_rpc(self): + us0 = self.nodes[0].listunspent()[0] + ins = [us0] + outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)} + rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) + rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) + json0 = self.nodes[0].decoderawtransaction(rawtx0) + json1 = self.nodes[0].decoderawtransaction(rawtx1) + assert_equal(json0["vin"][0]["sequence"], 4294967293) + assert_equal(json1["vin"][0]["sequence"], 4294967295) + + rawtx2 = self.nodes[0].createrawtransaction([], outs) + frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) + frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) + + json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) + json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) + assert_equal(json0["vin"][0]["sequence"], 4294967293) + assert_equal(json1["vin"][0]["sequence"], 4294967294) + +if __name__ == '__main__': + ReplaceByFeeTest().main() diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py new file mode 100755 index 0000000000..ac67e6e9ba --- /dev/null +++ b/test/functional/feature_reindex.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test running bitcoind with -reindex and -reindex-chainstate options. + +- Start a single node and generate 3 blocks. +- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. +- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal +import time + +class ReindexTest(BitcoinTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def reindex(self, justchainstate=False): + self.nodes[0].generate(3) + blockcount = self.nodes[0].getblockcount() + self.stop_nodes() + extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] + self.start_nodes(extra_args) + while self.nodes[0].getblockcount() < blockcount: + time.sleep(0.1) + assert_equal(self.nodes[0].getblockcount(), blockcount) + self.log.info("Success") + + def run_test(self): + self.reindex(False) + self.reindex(True) + self.reindex(False) + self.reindex(True) + +if __name__ == '__main__': + ReindexTest().main() diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py new file mode 100755 index 0000000000..ba6373fa33 --- /dev/null +++ b/test/functional/feature_segwit.py @@ -0,0 +1,638 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the SegWit changeover logic.""" + +from test_framework.address import ( + key_to_p2sh_p2wpkh, + key_to_p2wpkh, + program_to_witness, + script_to_p2sh_p2wsh, + script_to_p2wsh, +) +from test_framework.blocktools import witness_script, send_to_witness +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex +from test_framework.address import script_to_p2sh, key_to_p2pkh +from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE +from io import BytesIO + +NODE_0 = 0 +NODE_2 = 2 +WIT_V0 = 0 +WIT_V1 = 1 + +def getutxo(txid): + utxo = {} + utxo["vout"] = 0 + utxo["txid"] = txid + return utxo + +def find_unspent(node, min_value): + for utxo in node.listunspent(): + if utxo['amount'] >= min_value: + return utxo + +class SegWitTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. + self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], + ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], + ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] + + def setup_network(self): + super().setup_network() + connect_nodes(self.nodes[0], 2) + self.sync_all() + + def success_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + block = node.generate(1) + assert_equal(len(node.getblock(block[0])["tx"]), 2) + sync_blocks(self.nodes) + + def skip_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + block = node.generate(1) + assert_equal(len(node.getblock(block[0])["tx"]), 1) + sync_blocks(self.nodes) + + def fail_accept(self, node, error_msg, txid, sign, redeem_script=""): + assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + + def fail_mine(self, node, txid, sign, redeem_script=""): + send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) + assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1) + sync_blocks(self.nodes) + + def run_test(self): + self.nodes[0].generate(161) #block 161 + + self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + tmpl = self.nodes[0].getblocktemplate({}) + assert(tmpl['sizelimit'] == 1000000) + assert('weightlimit' not in tmpl) + assert(tmpl['sigoplimit'] == 20000) + assert(tmpl['transactions'][0]['hash'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 2) + tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) + assert(tmpl['sizelimit'] == 1000000) + assert('weightlimit' not in tmpl) + assert(tmpl['sigoplimit'] == 20000) + assert(tmpl['transactions'][0]['hash'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 2) + self.nodes[0].generate(1) #block 162 + + balance_presetup = self.nodes[0].getbalance() + self.pubkey = [] + p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh + wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness + for i in range(3): + newaddress = self.nodes[i].getnewaddress() + self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) + multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) + p2sh_addr = self.nodes[i].addwitnessaddress(newaddress) + bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False) + p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address'] + bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address'] + assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1])) + assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1])) + assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript)) + assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) + p2sh_ids.append([]) + wit_ids.append([]) + for v in range(2): + p2sh_ids[i].append([]) + wit_ids[i].append([]) + + for i in range(5): + for n in range(3): + for v in range(2): + wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) + p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) + + self.nodes[0].generate(1) #block 163 + sync_blocks(self.nodes) + + # Make sure all nodes recognize the transactions as theirs + assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50) + assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) + assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) + + self.nodes[0].generate(260) #block 423 + sync_blocks(self.nodes) + + self.log.info("Verify default node can't accept any witness format txs before fork") + # unsigned, no scriptsig + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False) + # unsigned with redeem script + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0])) + # signed + self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True) + self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True) + + self.log.info("Verify witness txs are skipped for mining before the fork") + self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424 + self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425 + self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426 + self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427 + + # TODO: An old node would see these txs without witnesses and be able to mine them + + self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork") + self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428 + self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429 + + self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") + self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False) + self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False) + + self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork") + self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430 + self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431 + + self.log.info("Verify previous witness txs skipped for mining can now be mined") + assert_equal(len(self.nodes[2].getrawmempool()), 4) + block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) + sync_blocks(self.nodes) + assert_equal(len(self.nodes[2].getrawmempool()), 0) + segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] + assert_equal(len(segwit_tx_list), 5) + + self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag") + assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False)) + assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False)) + for i in range(len(segwit_tx_list)): + tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i])) + assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i])) + assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) + assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness())) + + self.log.info("Verify witness txs without witness data are invalid after the fork") + self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False) + self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False) + self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2])) + self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2])) + + self.log.info("Verify default node can now use witness txs") + self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432 + self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433 + self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434 + self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435 + + self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork") + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) + tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) + assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data + assert(tmpl['weightlimit'] == 4000000) + assert(tmpl['sigoplimit'] == 80000) + assert(tmpl['transactions'][0]['txid'] == txid) + assert(tmpl['transactions'][0]['sigops'] == 8) + + self.nodes[0].generate(1) # Mine a block to clear the gbt cache + + self.log.info("Non-segwit miners are able to use GBT response after activation.") + # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) -> + # tx2 (segwit input, paying to a non-segwit output) -> + # tx3 (non-segwit input, paying to a non-segwit output). + # tx1 is allowed to appear in the block, but no others. + txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) + hex_tx = self.nodes[0].gettransaction(txid)['hex'] + tx = FromHex(CTransaction(), hex_tx) + assert(tx.wit.is_null()) # This should not be a segwit input + assert(txid1 in self.nodes[0].getrawmempool()) + + # Now create tx2, which will spend from txid1. + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) + tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) + tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] + txid2 = self.nodes[0].sendrawtransaction(tx2_hex) + tx = FromHex(CTransaction(), tx2_hex) + assert(not tx.wit.is_null()) + + # Now create tx3, which will spend from txid2 + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) + tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee + tx.calc_sha256() + txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) + assert(tx.wit.is_null()) + assert(txid3 in self.nodes[0].getrawmempool()) + + # Now try calling getblocktemplate() without segwit support. + template = self.nodes[0].getblocktemplate() + + # Check that tx1 is the only transaction of the 3 in the template. + template_txids = [ t['txid'] for t in template['transactions'] ] + assert(txid2 not in template_txids and txid3 not in template_txids) + assert(txid1 in template_txids) + + # Check that running with segwit support results in all 3 being included. + template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) + template_txids = [ t['txid'] for t in template['transactions'] ] + assert(txid1 in template_txids) + assert(txid2 in template_txids) + assert(txid3 in template_txids) + + # Check that wtxid is properly reported in mempool entry + assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) + + # Mine a block to clear the gbt cache again. + self.nodes[0].generate(1) + + self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent") + + # Some public keys to be used later + pubkeys = [ + "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb + "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97 + "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV + "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd + "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66 + "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K + "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ + ] + + # Import a compressed key and an uncompressed key, generate some multisig addresses + self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn") + uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] + self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") + compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] + assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) + assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) + + self.nodes[0].importpubkey(pubkeys[0]) + compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] + self.nodes[0].importpubkey(pubkeys[1]) + compressed_solvable_address.append(key_to_p2pkh(pubkeys[1])) + self.nodes[0].importpubkey(pubkeys[2]) + uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])] + + spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress + spendable_after_importaddress = [] # These outputs should be seen after importaddress + solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable + unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress + solvable_anytime = [] # These outputs should be solvable after importpubkey + unseen_anytime = [] # These outputs should never be seen + + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) + compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address']) + unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"] + + # Test multisig_without_privkey + # We have 2 public keys without private keys, use addmultisigaddress to add to wallet. + # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. + + multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address'] + script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) + solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL])) + + for i in compressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # bare and p2sh multisig with compressed keys should always be spendable + spendable_anytime.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress + spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with compressed keys should always be spendable + spendable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress + spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + # P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable + spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in uncompressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # bare and p2sh multisig with uncompressed keys should always be spendable + spendable_anytime.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with uncompressed keys should always be spendable + spendable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress + spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) + # Witness output types with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + for i in compressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + # Multisig without private is not seen after addmultisigaddress, but seen after importaddress + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen + solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh]) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress + solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + for i in uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress + solvable_after_importaddress.extend([bare, p2sh]) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # normal P2PKH and P2PK with uncompressed keys should always be seen + solvable_anytime.extend([p2pkh, p2pk]) + # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress + solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) + # Witness output types with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) + + op1 = CScript([OP_1]) + op0 = CScript([OP_0]) + # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V + unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)] + unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") + unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG]) + unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)]) + p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL]) + p2wshop1 = CScript([OP_0, sha256(op1)]) + unsolvable_after_importaddress.append(unsolvablep2pkh) + unsolvable_after_importaddress.append(unsolvablep2wshp2pkh) + unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script + unsolvable_after_importaddress.append(p2wshop1) + unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided + unsolvable_after_importaddress.append(p2shop0) + + spendable_txid = [] + solvable_txid = [] + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1)) + self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0) + + importlist = [] + for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + bare = hex_str_to_bytes(v['hex']) + importlist.append(bytes_to_hex_str(bare)) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) + else: + pubkey = hex_str_to_bytes(v['pubkey']) + p2pk = CScript([pubkey, OP_CHECKSIG]) + p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) + importlist.append(bytes_to_hex_str(p2pk)) + importlist.append(bytes_to_hex_str(p2pkh)) + importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)]))) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)]))) + importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)]))) + + importlist.append(bytes_to_hex_str(unsolvablep2pkh)) + importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh)) + importlist.append(bytes_to_hex_str(op1)) + importlist.append(bytes_to_hex_str(p2wshop1)) + + for i in importlist: + # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC + # exceptions and continue. + try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True) + + self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only + self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey + + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) + self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # addwitnessaddress should refuse to return a witness address if an uncompressed key is used + # note that no witness address should be returned by unsolvable addresses + for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address: + assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) + + # addwitnessaddress should return a witness addresses even if keys are not in the wallet + self.nodes[0].addwitnessaddress(multisig_without_privkey_address) + + for i in compressed_spendable_address + compressed_solvable_address: + witaddress = self.nodes[0].addwitnessaddress(i) + # addwitnessaddress should return the same address if it is a known P2SH-witness address + assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) + + spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) + self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # Repeat some tests. This time we don't add witness scripts with importaddress + # Import a compressed key and an uncompressed key, generate some multisig addresses + self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH") + uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"] + self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw") + compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"] + + self.nodes[0].importpubkey(pubkeys[5]) + compressed_solvable_address = [key_to_p2pkh(pubkeys[5])] + self.nodes[0].importpubkey(pubkeys[6]) + uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])] + + spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress + solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable + unseen_anytime = [] # These outputs should never be seen + solvable_anytime = [] # These outputs should be solvable after importpubkey + unseen_anytime = [] # These outputs should never be seen + + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) + compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) + uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address']) + compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) + + premature_witaddress = [] + + for i in compressed_spendable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress + spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) + premature_witaddress.append(script_to_p2sh(p2wsh)) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2WPKH, P2SH_P2WPKH are always spendable + spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in uncompressed_spendable_address + uncompressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen + unseen_anytime.extend([p2wsh, p2sh_p2wsh]) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen + unseen_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + for i in compressed_solvable_address: + v = self.nodes[0].validateaddress(i) + if (v['isscript']): + # P2WSH multisig without private key are seen after addwitnessaddress + [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) + solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) + premature_witaddress.append(script_to_p2sh(p2wsh)) + else: + [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) + # P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable + solvable_anytime.extend([p2wpkh, p2sh_p2wpkh]) + + self.mine_and_test_listunspent(spendable_anytime, 2) + self.mine_and_test_listunspent(solvable_anytime, 1) + self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0) + + # addwitnessaddress should refuse to return a witness address if an uncompressed key is used + # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress + # premature_witaddress are not accepted until the script is added with addwitnessaddress first + for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress: + # This will raise an exception + assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) + + # after importaddress it should pass addwitnessaddress + v = self.nodes[0].validateaddress(compressed_solvable_address[1]) + self.nodes[0].importaddress(v['hex'],"",False,True) + for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress: + witaddress = self.nodes[0].addwitnessaddress(i) + assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) + + spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2)) + solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1)) + self.mine_and_test_listunspent(unseen_anytime, 0) + + # Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works + v1_addr = program_to_witness(1, [3,5]) + v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1}) + v1_decoded = self.nodes[1].decoderawtransaction(v1_tx) + assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr) + assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305") + + # Check that spendable outputs are really spendable + self.create_and_mine_tx_from_txids(spendable_txid) + + # import all the private keys so solvable addresses become spendable + self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb") + self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97") + self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV") + self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd") + self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66") + self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K") + self.create_and_mine_tx_from_txids(solvable_txid) + + # Test that importing native P2WPKH/P2WSH scripts works + for use_p2wsh in [False, True]: + if use_p2wsh: + scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a" + transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000" + else: + scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87" + transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000" + + self.nodes[1].importaddress(scriptPubKey, "", False) + rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex'] + rawtxfund = self.nodes[1].signrawtransaction(rawtxfund)["hex"] + txid = self.nodes[1].sendrawtransaction(rawtxfund) + + assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) + assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) + + # Assert it is properly saved + self.stop_node(1) + self.start_node(1) + assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) + assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) + + def mine_and_test_listunspent(self, script_list, ismine): + utxo = find_unspent(self.nodes[0], 50) + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout']))) + for i in script_list: + tx.vout.append(CTxOut(10000000, i)) + tx.rehash() + signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] + txid = self.nodes[0].sendrawtransaction(signresults, True) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + watchcount = 0 + spendcount = 0 + for i in self.nodes[0].listunspent(): + if (i['txid'] == txid): + watchcount += 1 + if (i['spendable'] == True): + spendcount += 1 + if (ismine == 2): + assert_equal(spendcount, len(script_list)) + elif (ismine == 1): + assert_equal(watchcount, len(script_list)) + assert_equal(spendcount, 0) + else: + assert_equal(watchcount, 0) + return txid + + def p2sh_address_to_script(self,v): + bare = CScript(hex_str_to_bytes(v['hex'])) + p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + p2wsh = CScript([OP_0, sha256(bare)]) + p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) + return([bare, p2sh, p2wsh, p2sh_p2wsh]) + + def p2pkh_address_to_script(self,v): + pubkey = hex_str_to_bytes(v['pubkey']) + p2wpkh = CScript([OP_0, hash160(pubkey)]) + p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) + p2pk = CScript([pubkey, OP_CHECKSIG]) + p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL]) + p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL]) + p2wsh_p2pk = CScript([OP_0, sha256(p2pk)]) + p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)]) + p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL]) + p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) + return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] + + def create_and_mine_tx_from_txids(self, txids, success = True): + tx = CTransaction() + for i in txids: + txtmp = CTransaction() + txraw = self.nodes[0].getrawtransaction(i) + f = BytesIO(hex_str_to_bytes(txraw)) + txtmp.deserialize(f) + for j in range(len(txtmp.vout)): + tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j))) + tx.vout.append(CTxOut(0, CScript())) + tx.rehash() + signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] + self.nodes[0].sendrawtransaction(signresults, True) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + +if __name__ == '__main__': + SegWitTest().main() diff --git a/test/functional/feature_uacomment.py b/test/functional/feature_uacomment.py new file mode 100755 index 0000000000..0b2c64ab69 --- /dev/null +++ b/test/functional/feature_uacomment.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the -uacomment option.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class UacommentTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def run_test(self): + self.log.info("test multiple -uacomment") + test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1] + assert_equal(test_uacomment, "(testnode0)") + + self.restart_node(0, ["-uacomment=foo"]) + foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1] + assert_equal(foo_uacomment, "(testnode0; foo)") + + self.log.info("test -uacomment max length") + self.stop_node(0) + expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments." + self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected) + + self.log.info("test -uacomment unsafe characters") + for unsafe_char in ['/', ':', '(', ')']: + expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters" + self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected) + +if __name__ == '__main__': + UacommentTest().main() diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py new file mode 100755 index 0000000000..0bfe94622f --- /dev/null +++ b/test/functional/feature_versionbits_warning.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016-2017 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test version bits warning system. + +Generate chains with block versions that appear to be signalling unknown +soft-forks, and test that warning alerts are generated. +""" + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import re +from test_framework.blocktools import create_block, create_coinbase + +VB_PERIOD = 144 # versionbits period length for regtest +VB_THRESHOLD = 108 # versionbits activation threshold for regtest +VB_TOP_BITS = 0x20000000 +VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment + +WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" +WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) +VB_PATTERN = re.compile("^Warning.*versionbit") + +class TestNode(P2PInterface): + def on_inv(self, message): + pass + +class VersionBitsWarningTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def setup_network(self): + self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") + # Open and close to create zero-length file + with open(self.alert_filename, 'w', encoding='utf8') as _: + pass + self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] + self.setup_nodes() + + # Send numblocks blocks via peer with nVersionToUse set. + def send_blocks_with_version(self, peer, numblocks, nVersionToUse): + tip = self.nodes[0].getbestblockhash() + height = self.nodes[0].getblockcount() + block_time = self.nodes[0].getblockheader(tip)["time"]+1 + tip = int(tip, 16) + + for _ in range(numblocks): + block = create_block(tip, create_coinbase(height+1), block_time) + block.nVersion = nVersionToUse + block.solve() + peer.send_message(msg_block(block)) + block_time += 1 + height += 1 + tip = block.sha256 + peer.sync_with_ping() + + def test_versionbits_in_alert_file(self): + with open(self.alert_filename, 'r', encoding='utf8') as f: + alert_text = f.read() + assert(VB_PATTERN.match(alert_text)) + + def run_test(self): + # Setup the p2p connection and start up the network thread. + self.nodes[0].add_p2p_connection(TestNode()) + + network_thread_start() + + # Test logic begins here + self.nodes[0].p2p.wait_for_verack() + + # 1. Have the node mine one period worth of blocks + self.nodes[0].generate(VB_PERIOD) + + # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD + # blocks signaling some unknown bit. + nVersion = VB_TOP_BITS | (1<= VB_THRESHOLD blocks signaling + # some unknown bit + self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) + self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) + # Might not get a versionbits-related alert yet, as we should + # have gotten a different alert due to more than 51/100 blocks + # being of unexpected version. + # Check that get*info() shows some kind of error. + assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"]) + assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"]) + + # Mine a period worth of expected blocks so the generic block-version warning + # is cleared, and restart the node. This should move the versionbit state + # to ACTIVE. + self.nodes[0].generate(VB_PERIOD) + self.stop_nodes() + # Empty out the alert file + with open(self.alert_filename, 'w', encoding='utf8') as _: + pass + self.start_nodes() + + # Connecting one block should be enough to generate an error. + self.nodes[0].generate(1) + assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["warnings"]) + assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"]) + self.stop_nodes() + self.test_versionbits_in_alert_file() + + # Test framework expects the node to still be running... + self.start_nodes() + +if __name__ == '__main__': + VersionBitsWarningTest().main() diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py deleted file mode 100755 index 45336ee801..0000000000 --- a/test/functional/maxuploadtarget.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test behavior of -maxuploadtarget. - -* Verify that getdata requests for old blocks (>1week) are dropped -if uploadtarget has been reached. -* Verify that getdata requests for recent blocks are respecteved even -if uploadtarget has been reached. -* Verify that the upload counters are reset after 24 hours. -""" -from collections import defaultdict -import time - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * - -class TestNode(P2PInterface): - def __init__(self): - super().__init__() - self.block_receive_map = defaultdict(int) - - def on_inv(self, message): - pass - - def on_block(self, message): - message.block.calc_sha256() - self.block_receive_map[message.block.sha256] += 1 - -class MaxUploadTest(BitcoinTestFramework): - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [["-maxuploadtarget=800", "-blockmaxsize=999000"]] - - # Cache for utxos, as the listunspent may take a long time later in the test - self.utxo_cache = [] - - def run_test(self): - # Before we connect anything, we first set the time on the node - # to be in the past, otherwise things break because the CNode - # time counters can't be reset backward after initialization - old_time = int(time.time() - 2*60*60*24*7) - self.nodes[0].setmocktime(old_time) - - # Generate some old blocks - self.nodes[0].generate(130) - - # p2p_conns[0] will only request old blocks - # p2p_conns[1] will only request new blocks - # p2p_conns[2] will test resetting the counters - p2p_conns = [] - - for _ in range(3): - p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) - - network_thread_start() - for p2pc in p2p_conns: - p2pc.wait_for_verack() - - # Test logic begins here - - # Now mine a big block - mine_large_block(self.nodes[0], self.utxo_cache) - - # Store the hash; we'll request this later - big_old_block = self.nodes[0].getbestblockhash() - old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] - big_old_block = int(big_old_block, 16) - - # Advance to two days ago - self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) - - # Mine one more block, so that the prior block looks old - mine_large_block(self.nodes[0], self.utxo_cache) - - # We'll be requesting this new block too - big_new_block = self.nodes[0].getbestblockhash() - big_new_block = int(big_new_block, 16) - - # p2p_conns[0] will test what happens if we just keep requesting the - # the same big old block too many times (expect: disconnect) - - getdata_request = msg_getdata() - getdata_request.inv.append(CInv(2, big_old_block)) - - max_bytes_per_day = 800*1024*1024 - daily_buffer = 144 * 4000000 - max_bytes_available = max_bytes_per_day - daily_buffer - success_count = max_bytes_available // old_block_size - - # 576MB will be reserved for relaying new blocks, so expect this to - # succeed for ~235 tries. - for i in range(success_count): - p2p_conns[0].send_message(getdata_request) - p2p_conns[0].sync_with_ping() - assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) - - assert_equal(len(self.nodes[0].getpeerinfo()), 3) - # At most a couple more tries should succeed (depending on how long - # the test has been running so far). - for i in range(3): - p2p_conns[0].send_message(getdata_request) - p2p_conns[0].wait_for_disconnect() - assert_equal(len(self.nodes[0].getpeerinfo()), 2) - self.log.info("Peer 0 disconnected after downloading old block too many times") - - # Requesting the current block on p2p_conns[1] should succeed indefinitely, - # even when over the max upload target. - # We'll try 800 times - getdata_request.inv = [CInv(2, big_new_block)] - for i in range(800): - p2p_conns[1].send_message(getdata_request) - p2p_conns[1].sync_with_ping() - assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) - - self.log.info("Peer 1 able to repeatedly download new block") - - # But if p2p_conns[1] tries for an old block, it gets disconnected too. - getdata_request.inv = [CInv(2, big_old_block)] - p2p_conns[1].send_message(getdata_request) - p2p_conns[1].wait_for_disconnect() - assert_equal(len(self.nodes[0].getpeerinfo()), 1) - - self.log.info("Peer 1 disconnected after trying to download old block") - - self.log.info("Advancing system time on node to clear counters...") - - # If we advance the time by 24 hours, then the counters should reset, - # and p2p_conns[2] should be able to retrieve the old block. - self.nodes[0].setmocktime(int(time.time())) - p2p_conns[2].sync_with_ping() - p2p_conns[2].send_message(getdata_request) - p2p_conns[2].sync_with_ping() - assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) - - self.log.info("Peer 2 able to download old block") - - self.nodes[0].disconnect_p2ps() - - #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 - self.log.info("Restarting nodes with -whitelist=127.0.0.1") - self.stop_node(0) - self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) - - # Reconnect to self.nodes[0] - self.nodes[0].add_p2p_connection(TestNode()) - - network_thread_start() - self.nodes[0].p2p.wait_for_verack() - - #retrieve 20 blocks which should be enough to break the 1MB limit - getdata_request.inv = [CInv(2, big_new_block)] - for i in range(20): - self.nodes[0].p2p.send_message(getdata_request) - self.nodes[0].p2p.sync_with_ping() - assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) - - getdata_request.inv = [CInv(2, big_old_block)] - self.nodes[0].p2p.send_and_ping(getdata_request) - assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist - - self.log.info("Peer still connected after trying to download old block (whitelisted)") - -if __name__ == '__main__': - MaxUploadTest().main() diff --git a/test/functional/minchainwork.py b/test/functional/minchainwork.py deleted file mode 100755 index 90a3de0e0d..0000000000 --- a/test/functional/minchainwork.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for setting nMinimumChainWork on command line. - -Nodes don't consider themselves out of "initial block download" until -their active chain has more work than nMinimumChainWork. - -Nodes don't download blocks from a peer unless the peer's best known block -has more work than nMinimumChainWork. - -While in initial block download, nodes won't relay blocks to their peers, so -test that this parameter functions as intended by verifying that block relay -only succeeds past a given node once its nMinimumChainWork has been exceeded. -""" - -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import connect_nodes, assert_equal - -# 2 hashes required per regtest block (with no difficulty adjustment) -REGTEST_WORK_PER_BLOCK = 2 - -class MinimumChainWorkTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] - self.node_min_work = [0, 101, 101] - - def setup_network(self): - # This test relies on the chain setup being: - # node0 <- node1 <- node2 - # Before leaving IBD, nodes prefer to download blocks from outbound - # peers, so ensure that we're mining on an outbound peer and testing - # block relay to inbound peers. - self.setup_nodes() - for i in range(self.num_nodes-1): - connect_nodes(self.nodes[i+1], i) - - def run_test(self): - # Start building a chain on node0. node2 shouldn't be able to sync until node1's - # minchainwork is exceeded - starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work - self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1]) - - starting_blockcount = self.nodes[2].getblockcount() - - num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) - self.log.info("Generating %d blocks on node0", num_blocks_to_generate) - hashes = self.nodes[0].generate(num_blocks_to_generate) - - self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork']) - - # Sleep a few seconds and verify that node2 didn't get any new blocks - # or headers. We sleep, rather than sync_blocks(node0, node1) because - # it's reasonable either way for node1 to get the blocks, or not get - # them (since they're below node1's minchainwork). - time.sleep(3) - - self.log.info("Verifying node 2 has no more blocks than before") - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) - # Node2 shouldn't have any new headers yet, because node1 should not - # have relayed anything. - assert_equal(len(self.nodes[2].getchaintips()), 1) - assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) - - assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() - assert_equal(self.nodes[2].getblockcount(), starting_blockcount) - - self.log.info("Generating one more block") - self.nodes[0].generate(1) - - self.log.info("Verifying nodes are all synced") - - # Because nodes in regtest are all manual connections (eg using - # addnode), node1 should not have disconnected node0. If not for that, - # we'd expect node1 to have disconnected node0 for serving an - # insufficient work chain, in which case we'd need to reconnect them to - # continue the test. - - self.sync_all() - self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes]) - -if __name__ == '__main__': - MinimumChainWorkTest().main() diff --git a/test/functional/notifications.py b/test/functional/notifications.py deleted file mode 100755 index 980bef5fc8..0000000000 --- a/test/functional/notifications.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the -alertnotify, -blocknotify and -walletnotify options.""" -import os - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, wait_until, connect_nodes_bi - -class NotificationsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 2 - self.setup_clean_chain = True - - def setup_network(self): - self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") - self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") - self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt") - - # -alertnotify and -blocknotify on node0, walletnotify on node1 - self.extra_args = [["-blockversion=2", - "-alertnotify=echo %%s >> %s" % self.alert_filename, - "-blocknotify=echo %%s >> %s" % self.block_filename], - ["-blockversion=211", - "-rescan", - "-walletnotify=echo %%s >> %s" % self.tx_filename]] - super().setup_network() - - def run_test(self): - self.log.info("test -blocknotify") - block_count = 10 - blocks = self.nodes[1].generate(block_count) - - # wait at most 10 seconds for expected file size before reading the content - wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated blocks hashes - with open(self.block_filename, 'r') as f: - assert_equal(sorted(blocks), sorted(f.read().splitlines())) - - self.log.info("test -walletnotify") - # wait at most 10 seconds for expected file size before reading the content - wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) - with open(self.tx_filename, 'r') as f: - assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) - os.remove(self.tx_filename) - - self.log.info("test -walletnotify after rescan") - # restart node to rescan to force wallet notifications - self.restart_node(1) - connect_nodes_bi(self.nodes, 0, 1) - - wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10) - - # file content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) - with open(self.tx_filename, 'r') as f: - assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) - - # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st. - self.log.info("test -alertnotify") - self.nodes[1].generate(41) - self.sync_all() - - # Give bitcoind 10 seconds to write the alert notification - wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) - - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text = f.read() - - # Mine more up-version blocks, should not get more alerts: - self.nodes[1].generate(2) - self.sync_all() - - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text2 = f.read() - - self.log.info("-alertnotify should not continue notifying for more unknown version blocks") - assert_equal(alert_text, alert_text2) - -if __name__ == '__main__': - NotificationsTest().main() diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py deleted file mode 100755 index e4f413cc2a..0000000000 --- a/test/functional/nulldummy.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test NULLDUMMY softfork. - -Connect to a single node. -Generate 2 blocks (save the coinbases for later). -Generate 427 more blocks. -[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. -[Policy] Check that non-NULLDUMMY transactions are rejected before activation. -[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. -[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment -from test_framework.script import CScript -from io import BytesIO -import time - -NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" - -def trueDummy(tx): - scriptSig = CScript(tx.vin[0].scriptSig) - newscript = [] - for i in scriptSig: - if (len(newscript) == 0): - assert(len(i) == 0) - newscript.append(b'\x51') - else: - newscript.append(i) - tx.vin[0].scriptSig = CScript(newscript) - tx.rehash() - -class NULLDUMMYTest(BitcoinTestFramework): - - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - # This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through - # normal segwit activation here (and don't use the default always-on behaviour). - self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]] - - def run_test(self): - self.address = self.nodes[0].getnewaddress() - self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])['address'] - self.wit_address = self.nodes[0].addwitnessaddress(self.address) - self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address'] - - network_thread_start() - self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 - coinbase_txid = [] - for i in self.coinbase_blocks: - coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) - self.nodes[0].generate(427) # Block 429 - self.lastblockhash = self.nodes[0].getbestblockhash() - self.tip = int("0x" + self.lastblockhash, 0) - self.lastblockheight = 429 - self.lastblocktime = int(time.time()) + 429 - - self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") - test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] - txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True) - test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) - txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True) - test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) - txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True) - self.block_submit(self.nodes[0], test1txs, False, True) - - self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") - test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) - trueDummy(test2tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) - - self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") - self.block_submit(self.nodes[0], [test2tx], False, True) - - self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") - test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46) - test6txs=[CTransaction(test4tx)] - trueDummy(test4tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True) - self.block_submit(self.nodes[0], [test4tx]) - - self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") - test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) - test6txs.append(CTransaction(test5tx)) - test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True) - self.block_submit(self.nodes[0], [test5tx], True) - - self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]") - for i in test6txs: - self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True) - self.block_submit(self.nodes[0], test6txs, True, True) - - - def create_transaction(self, node, txid, to_address, amount): - inputs = [{ "txid" : txid, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(signresult['hex'])) - tx.deserialize(f) - return tx - - - def block_submit(self, node, txs, witness = False, accept = False): - block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) - block.nVersion = 4 - for tx in txs: - tx.rehash() - block.vtx.append(tx) - block.hashMerkleRoot = block.calc_merkle_root() - witness and add_witness_commitment(block) - block.rehash() - block.solve() - node.submitblock(bytes_to_hex_str(block.serialize(True))) - if (accept): - assert_equal(node.getbestblockhash(), block.hash) - self.tip = block.sha256 - self.lastblockhash = block.hash - self.lastblocktime += 1 - self.lastblockheight += 1 - else: - assert_equal(node.getbestblockhash(), self.lastblockhash) - -if __name__ == '__main__': - NULLDUMMYTest().main() diff --git a/test/functional/p2p-fullblocktest.py b/test/functional/p2p-fullblocktest.py deleted file mode 100755 index fe9bbda14b..0000000000 --- a/test/functional/p2p-fullblocktest.py +++ /dev/null @@ -1,1293 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test block processing. - -This reimplements tests from the bitcoinj/FullBlockTestGenerator used -by the pull-tester. - -We use the testing framework in which we expect a particular answer from -each test. -""" - -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * -import time -from test_framework.key import CECKey -from test_framework.script import * -from test_framework.mininode import network_thread_start -import struct - -class PreviousSpendableOutput(): - def __init__(self, tx = CTransaction(), n = -1): - self.tx = tx - self.n = n # the output we're spending - -# Use this class for tests that require behavior other than normal "mininode" behavior. -# For now, it is used to serialize a bloated varint (b64). -class CBrokenBlock(CBlock): - def __init__(self, header=None): - super(CBrokenBlock, self).__init__(header) - - def initialize(self, base_block): - self.vtx = copy.deepcopy(base_block.vtx) - self.hashMerkleRoot = self.calc_merkle_root() - - def serialize(self, with_witness=False): - r = b"" - r += super(CBlock, self).serialize() - r += struct.pack(" b1 (0) -> b2 (1) - block(1, spend=out[0]) - save_spendable_output() - yield accepted() - - block(2, spend=out[1]) - yield accepted() - save_spendable_output() - - # so fork like this: - # - # genesis -> b1 (0) -> b2 (1) - # \-> b3 (1) - # - # Nothing should happen at this point. We saw b2 first so it takes priority. - tip(1) - b3 = block(3, spend=out[1]) - txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) - yield rejected() - - - # Now we add another block to make the alternative chain longer. - # - # genesis -> b1 (0) -> b2 (1) - # \-> b3 (1) -> b4 (2) - block(4, spend=out[2]) - yield accepted() - - - # ... and back to the first chain. - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b3 (1) -> b4 (2) - tip(2) - block(5, spend=out[2]) - save_spendable_output() - yield rejected() - - block(6, spend=out[3]) - yield accepted() - - # Try to create a fork that double-spends - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b7 (2) -> b8 (4) - # \-> b3 (1) -> b4 (2) - tip(5) - block(7, spend=out[2]) - yield rejected() - - block(8, spend=out[4]) - yield rejected() - - # Try to create a block that has too much fee - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b9 (4) - # \-> b3 (1) -> b4 (2) - tip(6) - block(9, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - # Create a fork that ends in a block with too much fee (the one that causes the reorg) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b10 (3) -> b11 (4) - # \-> b3 (1) -> b4 (2) - tip(5) - block(10, spend=out[3]) - yield rejected() - - block(11, spend=out[4], additional_coinbase_value=1) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - - # Try again, but with a valid fork first - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b14 (5) - # (b12 added last) - # \-> b3 (1) -> b4 (2) - tip(5) - b12 = block(12, spend=out[3]) - save_spendable_output() - b13 = block(13, spend=out[4]) - # Deliver the block header for b12, and the block b13. - # b13 should be accepted but the tip won't advance until b12 is delivered. - yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) - - save_spendable_output() - # b14 is invalid, but the node won't know that until it tries to connect - # Tip still can't advance because b12 is missing - block(14, spend=out[5], additional_coinbase_value=1) - yield rejected() - - yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. - - # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) - # \-> b3 (1) -> b4 (2) - - # Test that a block with a lot of checksigs is okay - lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) - tip(13) - block(15, spend=out[5], script=lots_of_checksigs) - yield accepted() - save_spendable_output() - - - # Test that a block with too many checksigs is rejected - too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) - block(16, spend=out[6], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # Attempt to spend a transaction created on a different fork - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) - # \-> b3 (1) -> b4 (2) - tip(15) - block(17, spend=txout_b3) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Attempt to spend a transaction created on a different fork (on a fork this time) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) - # \-> b18 (b3.vtx[1]) -> b19 (6) - # \-> b3 (1) -> b4 (2) - tip(13) - block(18, spend=txout_b3) - yield rejected() - - block(19, spend=out[6]) - yield rejected() - - # Attempt to spend a coinbase at depth too low - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) - # \-> b3 (1) -> b4 (2) - tip(15) - block(20, spend=out[7]) - yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) - - # Attempt to spend a coinbase at depth too low (on a fork this time) - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) - # \-> b21 (6) -> b22 (5) - # \-> b3 (1) -> b4 (2) - tip(13) - block(21, spend=out[6]) - yield rejected() - - block(22, spend=out[5]) - yield rejected() - - # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) - # \-> b24 (6) -> b25 (7) - # \-> b3 (1) -> b4 (2) - tip(15) - b23 = block(23, spend=out[6]) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) - b23 = update_block(23, [tx]) - # Make sure the math above worked out to produce a max-sized block - assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) - yield accepted() - save_spendable_output() - - # Make the next block one byte bigger and check that it fails - tip(15) - b24 = block(24, spend=out[6]) - script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 - script_output = CScript([b'\x00' * (script_length+1)]) - tx.vout = [CTxOut(0, script_output)] - b24 = update_block(24, [tx]) - assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1) - yield rejected(RejectResult(16, b'bad-blk-length')) - - block(25, spend=out[7]) - yield rejected() - - # Create blocks with a coinbase input script size out of range - # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) - # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) - # \-> ... (6) -> ... (7) - # \-> b3 (1) -> b4 (2) - tip(15) - b26 = block(26, spend=out[6]) - b26.vtx[0].vin[0].scriptSig = b'\x00' - b26.vtx[0].rehash() - # update_block causes the merkle root to get updated, even with no new - # transactions, and updates the required state. - b26 = update_block(26, []) - yield rejected(RejectResult(16, b'bad-cb-length')) - - # Extend the b26 chain to make sure bitcoind isn't accepting b26 - block(27, spend=out[7]) - yield rejected(False) - - # Now try a too-large-coinbase script - tip(15) - b28 = block(28, spend=out[6]) - b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 - b28.vtx[0].rehash() - b28 = update_block(28, []) - yield rejected(RejectResult(16, b'bad-cb-length')) - - # Extend the b28 chain to make sure bitcoind isn't accepting b28 - block(29, spend=out[7]) - yield rejected(False) - - # b30 has a max-sized coinbase scriptSig. - tip(23) - b30 = block(30) - b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 - b30.vtx[0].rehash() - b30 = update_block(30, []) - yield accepted() - save_spendable_output() - - # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY - # - # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) - # \-> b36 (11) - # \-> b34 (10) - # \-> b32 (9) - # - - # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. - lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - b31 = block(31, spend=out[8], script=lots_of_multisigs) - assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) - yield accepted() - save_spendable_output() - - # this goes over the limit because the coinbase has one sigop - too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) - b32 = block(32, spend=out[9], script=too_many_multisigs) - assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # CHECKMULTISIGVERIFY - tip(31) - lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) - block(33, spend=out[9], script=lots_of_multisigs) - yield accepted() - save_spendable_output() - - too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) - block(34, spend=out[10], script=too_many_multisigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # CHECKSIGVERIFY - tip(33) - lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) - b35 = block(35, spend=out[10], script=lots_of_checksigs) - yield accepted() - save_spendable_output() - - too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) - block(36, spend=out[11], script=too_many_checksigs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - - # Check spending of a transaction in a block which failed to connect - # - # b6 (3) - # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) - # \-> b37 (11) - # \-> b38 (11/37) - # - - # save 37's spendable output, but then double-spend out11 to invalidate the block - tip(35) - b37 = block(37, spend=out[11]) - txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) - tx = create_and_sign_tx(out[11].tx, out[11].n, 0) - b37 = update_block(37, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid - tip(35) - block(38, spend=txout_b37) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Check P2SH SigOp counting - # - # - # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) - # \-> b40 (12) - # - # b39 - create some P2SH outputs that will require 6 sigops to spend: - # - # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG - # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL - # - tip(35) - b39 = block(39) - b39_outputs = 0 - b39_sigops_per_output = 6 - - # Build the redeem script, hash it, use hash to create the p2sh script - redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) - redeem_script_hash = hash160(redeem_script) - p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) - - # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE - # This must be signed because it is spending a coinbase - spend = out[11] - tx = create_tx(spend.tx, spend.n, 1, p2sh_script) - tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) - self.sign_tx(tx, spend.tx, spend.n) - tx.rehash() - b39 = update_block(39, [tx]) - b39_outputs += 1 - - # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE - tx_new = None - tx_last = tx - total_size=len(b39.serialize()) - while(total_size < MAX_BLOCK_BASE_SIZE): - tx_new = create_tx(tx_last, 1, 1, p2sh_script) - tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) - tx_new.rehash() - total_size += len(tx_new.serialize()) - if total_size >= MAX_BLOCK_BASE_SIZE: - break - b39.vtx.append(tx_new) # add tx to block - tx_last = tx_new - b39_outputs += 1 - - b39 = update_block(39, []) - yield accepted() - save_spendable_output() - - - # Test sigops in P2SH redeem scripts - # - # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. - # The first tx has one sigop and then at the end we add 2 more to put us just over the max. - # - # b41 does the same, less one, so it has the maximum sigops permitted. - # - tip(39) - b40 = block(40, spend=out[12]) - sigops = get_legacy_sigopcount_block(b40) - numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output - assert_equal(numTxes <= b39_outputs, True) - - lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) - new_txs = [] - for i in range(1, numTxes+1): - tx = CTransaction() - tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) - tx.vin.append(CTxIn(lastOutpoint, b'')) - # second input is corresponding P2SH output from b39 - tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) - # Note: must pass the redeem_script (not p2sh_script) to the signature hash function - (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) - sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) - scriptSig = CScript([sig, redeem_script]) - - tx.vin[1].scriptSig = scriptSig - tx.rehash() - new_txs.append(tx) - lastOutpoint = COutPoint(tx.sha256, 0) - - b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1 - tx = CTransaction() - tx.vin.append(CTxIn(lastOutpoint, b'')) - tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) - tx.rehash() - new_txs.append(tx) - update_block(40, new_txs) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - # same as b40, but one less sigop - tip(39) - block(41, spend=None) - update_block(41, b40.vtx[1:-1]) - b41_sigops_to_fill = b40_sigops_to_fill - 1 - tx = CTransaction() - tx.vin.append(CTxIn(lastOutpoint, b'')) - tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) - tx.rehash() - update_block(41, [tx]) - yield accepted() - - # Fork off of b39 to create a constant base again - # - # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) - # \-> b41 (12) - # - tip(39) - block(42, spend=out[12]) - yield rejected() - save_spendable_output() - - block(43, spend=out[13]) - yield accepted() - save_spendable_output() - - - # Test a number of really invalid scenarios - # - # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) - # \-> ??? (15) - - # The next few blocks are going to be created "by hand" since they'll do funky things, such as having - # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. - height = self.block_heights[self.tip.sha256] + 1 - coinbase = create_coinbase(height, self.coinbase_pubkey) - b44 = CBlock() - b44.nTime = self.tip.nTime + 1 - b44.hashPrevBlock = self.tip.sha256 - b44.nBits = 0x207fffff - b44.vtx.append(coinbase) - b44.hashMerkleRoot = b44.calc_merkle_root() - b44.solve() - self.tip = b44 - self.block_heights[b44.sha256] = height - self.blocks[44] = b44 - yield accepted() - - # A block with a non-coinbase as the first tx - non_coinbase = create_tx(out[15].tx, out[15].n, 1) - b45 = CBlock() - b45.nTime = self.tip.nTime + 1 - b45.hashPrevBlock = self.tip.sha256 - b45.nBits = 0x207fffff - b45.vtx.append(non_coinbase) - b45.hashMerkleRoot = b45.calc_merkle_root() - b45.calc_sha256() - b45.solve() - self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 - self.tip = b45 - self.blocks[45] = b45 - yield rejected(RejectResult(16, b'bad-cb-missing')) - - # A block with no txns - tip(44) - b46 = CBlock() - b46.nTime = b44.nTime+1 - b46.hashPrevBlock = b44.sha256 - b46.nBits = 0x207fffff - b46.vtx = [] - b46.hashMerkleRoot = 0 - b46.solve() - self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 - self.tip = b46 - assert 46 not in self.blocks - self.blocks[46] = b46 - s = ser_uint256(b46.hashMerkleRoot) - yield rejected(RejectResult(16, b'bad-blk-length')) - - # A block with invalid work - tip(44) - b47 = block(47, solve=False) - target = uint256_from_compact(b47.nBits) - while b47.sha256 < target: #changed > to < - b47.nNonce += 1 - b47.rehash() - yield rejected(RejectResult(16, b'high-hash')) - - # A block with timestamp > 2 hrs in the future - tip(44) - b48 = block(48, solve=False) - b48.nTime = int(time.time()) + 60 * 60 * 3 - b48.solve() - yield rejected(RejectResult(16, b'time-too-new')) - - # A block with an invalid merkle hash - tip(44) - b49 = block(49) - b49.hashMerkleRoot += 1 - b49.solve() - yield rejected(RejectResult(16, b'bad-txnmrklroot')) - - # A block with an incorrect POW limit - tip(44) - b50 = block(50) - b50.nBits = b50.nBits - 1 - b50.solve() - yield rejected(RejectResult(16, b'bad-diffbits')) - - # A block with two coinbase txns - tip(44) - b51 = block(51) - cb2 = create_coinbase(51, self.coinbase_pubkey) - b51 = update_block(51, [cb2]) - yield rejected(RejectResult(16, b'bad-cb-multiple')) - - # A block w/ duplicate txns - # Note: txns have to be in the right position in the merkle tree to trigger this error - tip(44) - b52 = block(52, spend=out[15]) - tx = create_tx(b52.vtx[1], 0, 1) - b52 = update_block(52, [tx, tx]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - # Test block timestamps - # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) - # \-> b54 (15) - # - tip(43) - block(53, spend=out[14]) - yield rejected() # rejected since b44 is at same height - save_spendable_output() - - # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast) - b54 = block(54, spend=out[15]) - b54.nTime = b35.nTime - 1 - b54.solve() - yield rejected(RejectResult(16, b'time-too-old')) - - # valid timestamp - tip(53) - b55 = block(55, spend=out[15]) - b55.nTime = b35.nTime - update_block(55, []) - yield accepted() - save_spendable_output() - - - # Test CVE-2012-2459 - # - # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) - # \-> b57 (16) - # \-> b56p2 (16) - # \-> b56 (16) - # - # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without - # affecting the merkle root of a block, while still invalidating it. - # See: src/consensus/merkle.h - # - # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. - # Result: OK - # - # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle - # root but duplicate transactions. - # Result: Fails - # - # b57p2 has six transactions in its merkle tree: - # - coinbase, tx, tx1, tx2, tx3, tx4 - # Merkle root calculation will duplicate as necessary. - # Result: OK. - # - # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches - # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates - # that the error was caught early, avoiding a DOS vulnerability.) - - # b57 - a good block with 2 txs, don't submit until end - tip(55) - b57 = block(57) - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - b57 = update_block(57, [tx, tx1]) - - # b56 - copy b57, add a duplicate tx - tip(55) - b56 = copy.deepcopy(b57) - self.blocks[56] = b56 - assert_equal(len(b56.vtx),3) - b56 = update_block(56, [tx1]) - assert_equal(b56.hash, b57.hash) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - # b57p2 - a good block with 6 tx'es, don't submit until end - tip(55) - b57p2 = block("57p2") - tx = create_and_sign_tx(out[16].tx, out[16].n, 1) - tx1 = create_tx(tx, 0, 1) - tx2 = create_tx(tx1, 0, 1) - tx3 = create_tx(tx2, 0, 1) - tx4 = create_tx(tx3, 0, 1) - b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) - - # b56p2 - copy b57p2, duplicate two non-consecutive tx's - tip(55) - b56p2 = copy.deepcopy(b57p2) - self.blocks["b56p2"] = b56p2 - assert_equal(b56p2.hash, b57p2.hash) - assert_equal(len(b56p2.vtx),6) - b56p2 = update_block("b56p2", [tx3, tx4]) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - - tip("57p2") - yield accepted() - - tip(57) - yield rejected() #rejected because 57p2 seen first - save_spendable_output() - - # Test a few invalid tx types - # - # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> ??? (17) - # - - # tx with prevout.n out of range - tip(57) - b58 = block(58, spend=out[17]) - tx = CTransaction() - assert(len(out[17].tx.vout) < 42) - tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) - tx.vout.append(CTxOut(0, b"")) - tx.calc_sha256() - b58 = update_block(58, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # tx with output value > input value out of range - tip(57) - b59 = block(59) - tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN) - b59 = update_block(59, [tx]) - yield rejected(RejectResult(16, b'bad-txns-in-belowout')) - - # reset to good chain - tip(57) - b60 = block(60, spend=out[17]) - yield accepted() - save_spendable_output() - - # Test BIP30 - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b61 (18) - # - # Blocks are not allowed to contain a transaction whose id matches that of an earlier, - # not-fully-spent transaction in the same chain. To test, make identical coinbases; - # the second one should be rejected. - # - tip(60) - b61 = block(61, spend=out[18]) - b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases - b61.vtx[0].rehash() - b61 = update_block(61, []) - assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) - yield rejected(RejectResult(16, b'bad-txns-BIP30')) - - - # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b62 (18) - # - tip(60) - b62 = block(62) - tx = CTransaction() - tx.nLockTime = 0xffffffff #this locktime is non-final - assert(out[18].n < len(out[18].tx.vout)) - tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence - tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) - assert(tx.vin[0].nSequence < 0xffffffff) - tx.calc_sha256() - b62 = update_block(62, [tx]) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - - - # Test a non-final coinbase is also rejected - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) - # \-> b63 (-) - # - tip(60) - b63 = block(63) - b63.vtx[0].nLockTime = 0xffffffff - b63.vtx[0].vin[0].nSequence = 0xDEADBEEF - b63.vtx[0].rehash() - b63 = update_block(63, []) - yield rejected(RejectResult(16, b'bad-txns-nonfinal')) - - - # This checks that a block with a bloated VARINT between the block_header and the array of tx such that - # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, - # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not - # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. - # - # What matters is that the receiving node should not reject the bloated block, and then reject the canonical - # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) - # - # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) - # \ - # b64a (18) - # b64a is a bloated block (non-canonical varint) - # b64 is a good block (same as b64 but w/ canonical varint) - # - tip(60) - regular_block = block("64a", spend=out[18]) - - # make it a "broken_block," with non-canonical serialization - b64a = CBrokenBlock(regular_block) - b64a.initialize(regular_block) - self.blocks["64a"] = b64a - self.tip = b64a - tx = CTransaction() - - # use canonical serialization to calculate size - script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) - b64a = update_block("64a", [tx]) - assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) - yield TestInstance([[self.tip, None]]) - - # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore - self.test.block_store.erase(b64a.sha256) - - tip(60) - b64 = CBlock(b64a) - b64.vtx = copy.deepcopy(b64a.vtx) - assert_equal(b64.hash, b64a.hash) - assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) - self.blocks[64] = b64 - update_block(64, []) - yield accepted() - save_spendable_output() - - # Spend an output created in the block itself - # - # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # - tip(64) - block(65) - tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 0) - update_block(65, [tx1, tx2]) - yield accepted() - save_spendable_output() - - # Attempt to spend an output created later in the same block - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # \-> b66 (20) - tip(65) - block(66) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - update_block(66, [tx2, tx1]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # Attempt to double-spend a transaction created in a block - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) - # \-> b67 (20) - # - # - tip(65) - block(67) - tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) - tx2 = create_and_sign_tx(tx1, 0, 1) - tx3 = create_and_sign_tx(tx1, 0, 2) - update_block(67, [tx1, tx2, tx3]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - # More tests of block subsidy - # - # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) - # \-> b68 (20) - # - # b68 - coinbase with an extra 10 satoshis, - # creates a tx that has 9 satoshis from out[20] go to fees - # this fails because the coinbase is trying to claim 1 satoshi too much in fees - # - # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee - # this succeeds - # - tip(65) - block(68, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9) - update_block(68, [tx]) - yield rejected(RejectResult(16, b'bad-cb-amount')) - - tip(65) - b69 = block(69, additional_coinbase_value=10) - tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10) - update_block(69, [tx]) - yield accepted() - save_spendable_output() - - # Test spending the outpoint of a non-existent transaction - # - # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) - # \-> b70 (21) - # - tip(69) - block(70, spend=out[21]) - bogus_tx = CTransaction() - bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) - tx.vout.append(CTxOut(1, b"")) - update_block(70, [tx]) - yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) - - - # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) - # - # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) - # \-> b71 (21) - # - # b72 is a good block. - # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. - # - tip(69) - b72 = block(72) - tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) - tx2 = create_and_sign_tx(tx1, 0, 1) - b72 = update_block(72, [tx1, tx2]) # now tip is 72 - b71 = copy.deepcopy(b72) - b71.vtx.append(tx2) # add duplicate tx2 - self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69 - self.blocks[71] = b71 - - assert_equal(len(b71.vtx), 4) - assert_equal(len(b72.vtx), 3) - assert_equal(b72.sha256, b71.sha256) - - tip(71) - yield rejected(RejectResult(16, b'bad-txns-duplicate')) - tip(72) - yield accepted() - save_spendable_output() - - - # Test some invalid scripts and MAX_BLOCK_SIGOPS - # - # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) - # \-> b** (22) - # - - # b73 - tx with excessive sigops that are placed after an excessively large script element. - # The purpose of the test is to make sure those sigops are counted. - # - # script is a bytearray of size 20,526 - # - # bytearray[0-19,998] : OP_CHECKSIG - # bytearray[19,999] : OP_PUSHDATA4 - # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) - # bytearray[20,004-20,525]: unread data (script_element) - # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) - # - tip(72) - b73 = block(73) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 - - element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 - a[MAX_BLOCK_SIGOPS] = element_size % 256 - a[MAX_BLOCK_SIGOPS+1] = element_size // 256 - a[MAX_BLOCK_SIGOPS+2] = 0 - a[MAX_BLOCK_SIGOPS+3] = 0 - - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b73 = update_block(73, [tx]) - assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - # b74/75 - if we push an invalid script element, all prevous sigops are counted, - # but sigops after the element are not counted. - # - # The invalid script element is that the push_data indicates that - # there will be a large amount of data (0xffffff bytes), but we only - # provide a much smaller number. These bytes are CHECKSIGS so they would - # cause b75 to fail for excessive sigops, if those bytes were counted. - # - # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element - # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element - # - # - tip(72) - b74 = block(74) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS] = 0x4e - a[MAX_BLOCK_SIGOPS+1] = 0xfe - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - a[MAX_BLOCK_SIGOPS+4] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b74 = update_block(74, [tx]) - yield rejected(RejectResult(16, b'bad-blk-sigops')) - - tip(72) - b75 = block(75) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e - a[MAX_BLOCK_SIGOPS] = 0xff - a[MAX_BLOCK_SIGOPS+1] = 0xff - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) - b75 = update_block(75, [tx]) - yield accepted() - save_spendable_output() - - # Check that if we push an element filled with CHECKSIGs, they are not counted - tip(75) - b76 = block(76) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 - a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs - tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) - b76 = update_block(76, [tx]) - yield accepted() - save_spendable_output() - - # Test transaction resurrection - # - # -> b77 (24) -> b78 (25) -> b79 (26) - # \-> b80 (25) -> b81 (26) -> b82 (27) - # - # b78 creates a tx, which is spent in b79. After b82, both should be in mempool - # - # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the - # rather obscure reason that the Python signature code does not distinguish between - # Low-S and High-S values (whereas the bitcoin code has custom code which does so); - # as a result of which, the odds are 50% that the python code will use the right - # value and the transaction will be accepted into the mempool. Until we modify the - # test framework to support low-S signing, we are out of luck. - # - # To get around this issue, we construct transactions which are not signed and which - # spend to OP_TRUE. If the standard-ness rules change, this test would need to be - # updated. (Perhaps to spend to a P2SH OP_TRUE script) - # - tip(76) - block(77) - tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN) - update_block(77, [tx77]) - yield accepted() - save_spendable_output() - - block(78) - tx78 = create_tx(tx77, 0, 9*COIN) - update_block(78, [tx78]) - yield accepted() - - block(79) - tx79 = create_tx(tx78, 0, 8*COIN) - update_block(79, [tx79]) - yield accepted() - - # mempool should be empty - assert_equal(len(self.nodes[0].getrawmempool()), 0) - - tip(77) - block(80, spend=out[25]) - yield rejected() - save_spendable_output() - - block(81, spend=out[26]) - yield rejected() # other chain is same length - save_spendable_output() - - block(82, spend=out[27]) - yield accepted() # now this chain is longer, triggers re-org - save_spendable_output() - - # now check that tx78 and tx79 have been put back into the peer's mempool - mempool = self.nodes[0].getrawmempool() - assert_equal(len(mempool), 2) - assert(tx78.hash in mempool) - assert(tx79.hash in mempool) - - - # Test invalid opcodes in dead execution paths. - # - # -> b81 (26) -> b82 (27) -> b83 (28) - # - block(83) - op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] - script = CScript(op_codes) - tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) - - tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) - tx2.vin[0].scriptSig = CScript([OP_FALSE]) - tx2.rehash() - - update_block(83, [tx1, tx2]) - yield accepted() - save_spendable_output() - - - # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) - # - # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) - # \-> b85 (29) -> b86 (30) \-> b89a (32) - # - # - block(84) - tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.calc_sha256() - self.sign_tx(tx1, out[29].tx, out[29].n) - tx1.rehash() - tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) - tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) - tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) - tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) - tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) - - update_block(84, [tx1,tx2,tx3,tx4,tx5]) - yield accepted() - save_spendable_output() - - tip(83) - block(85, spend=out[29]) - yield rejected() - - block(86, spend=out[30]) - yield accepted() - - tip(84) - block(87, spend=out[30]) - yield rejected() - save_spendable_output() - - block(88, spend=out[31]) - yield accepted() - save_spendable_output() - - # trying to spend the OP_RETURN output is rejected - block("89a", spend=out[32]) - tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) - update_block("89a", [tx]) - yield rejected() - - - # Test re-org of a week's worth of blocks (1088 blocks) - # This test takes a minute or two and can be accomplished in memory - # - if self.options.runbarelyexpensive: - tip(88) - LARGE_REORG_SIZE = 1088 - test1 = TestInstance(sync_every_block=False) - spend=out[32] - for i in range(89, LARGE_REORG_SIZE + 89): - b = block(i, spend) - tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 - script_output = CScript([b'\x00' * script_length]) - tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) - b = update_block(i, [tx]) - assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) - test1.blocks_and_transactions.append([self.tip, True]) - save_spendable_output() - spend = get_spendable_output() - - yield test1 - chain1_tip = i - - # now create alt chain of same length - tip(88) - test2 = TestInstance(sync_every_block=False) - for i in range(89, LARGE_REORG_SIZE + 89): - block("alt"+str(i)) - test2.blocks_and_transactions.append([self.tip, False]) - yield test2 - - # extend alt chain to trigger re-org - block("alt" + str(chain1_tip + 1)) - yield accepted() - - # ... and re-org back to the first chain - tip(chain1_tip) - block(chain1_tip + 1) - yield rejected() - block(chain1_tip + 2) - yield accepted() - - chain1_tip += 2 - - - -if __name__ == '__main__': - FullBlockTest().main() diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py deleted file mode 100755 index 0bfe94622f..0000000000 --- a/test/functional/p2p-versionbits-warning.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test version bits warning system. - -Generate chains with block versions that appear to be signalling unknown -soft-forks, and test that warning alerts are generated. -""" - -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import re -from test_framework.blocktools import create_block, create_coinbase - -VB_PERIOD = 144 # versionbits period length for regtest -VB_THRESHOLD = 108 # versionbits activation threshold for regtest -VB_TOP_BITS = 0x20000000 -VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment - -WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" -WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) -VB_PATTERN = re.compile("^Warning.*versionbit") - -class TestNode(P2PInterface): - def on_inv(self, message): - pass - -class VersionBitsWarningTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def setup_network(self): - self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") - # Open and close to create zero-length file - with open(self.alert_filename, 'w', encoding='utf8') as _: - pass - self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] - self.setup_nodes() - - # Send numblocks blocks via peer with nVersionToUse set. - def send_blocks_with_version(self, peer, numblocks, nVersionToUse): - tip = self.nodes[0].getbestblockhash() - height = self.nodes[0].getblockcount() - block_time = self.nodes[0].getblockheader(tip)["time"]+1 - tip = int(tip, 16) - - for _ in range(numblocks): - block = create_block(tip, create_coinbase(height+1), block_time) - block.nVersion = nVersionToUse - block.solve() - peer.send_message(msg_block(block)) - block_time += 1 - height += 1 - tip = block.sha256 - peer.sync_with_ping() - - def test_versionbits_in_alert_file(self): - with open(self.alert_filename, 'r', encoding='utf8') as f: - alert_text = f.read() - assert(VB_PATTERN.match(alert_text)) - - def run_test(self): - # Setup the p2p connection and start up the network thread. - self.nodes[0].add_p2p_connection(TestNode()) - - network_thread_start() - - # Test logic begins here - self.nodes[0].p2p.wait_for_verack() - - # 1. Have the node mine one period worth of blocks - self.nodes[0].generate(VB_PERIOD) - - # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD - # blocks signaling some unknown bit. - nVersion = VB_TOP_BITS | (1<= VB_THRESHOLD blocks signaling - # some unknown bit - self.send_blocks_with_version(self.nodes[0].p2p, VB_THRESHOLD, nVersion) - self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) - # Might not get a versionbits-related alert yet, as we should - # have gotten a different alert due to more than 51/100 blocks - # being of unexpected version. - # Check that get*info() shows some kind of error. - assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"]) - assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"]) - - # Mine a period worth of expected blocks so the generic block-version warning - # is cleared, and restart the node. This should move the versionbit state - # to ACTIVE. - self.nodes[0].generate(VB_PERIOD) - self.stop_nodes() - # Empty out the alert file - with open(self.alert_filename, 'w', encoding='utf8') as _: - pass - self.start_nodes() - - # Connecting one block should be enough to generate an error. - self.nodes[0].generate(1) - assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["warnings"]) - assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"]) - self.stop_nodes() - self.test_versionbits_in_alert_file() - - # Test framework expects the node to still be running... - self.start_nodes() - -if __name__ == '__main__': - VersionBitsWarningTest().main() diff --git a/test/functional/proxy_test.py b/test/functional/proxy_test.py deleted file mode 100755 index 2eb1be47a5..0000000000 --- a/test/functional/proxy_test.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test bitcoind with different proxy configuration. - -Test plan: -- Start bitcoind's with different proxy configurations -- Use addnode to initiate connections -- Verify that proxies are connected to, and the right connection command is given -- Proxy configurations to test on bitcoind side: - - `-proxy` (proxy everything) - - `-onion` (proxy just onions) - - `-proxyrandomize` Circuit randomization -- Proxy configurations to test on proxy side, - - support no authentication (other proxy) - - support no authentication + user/pass authentication (Tor) - - proxy on IPv6 - -- Create various proxies (as threads) -- Create bitcoinds that connect to them -- Manipulate the bitcoinds using addnode (onetry) an observe effects - -addnode connect to IPv4 -addnode connect to IPv6 -addnode connect to onion -addnode connect to generic DNS name -""" - -import socket -import os - -from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - PORT_MIN, - PORT_RANGE, - assert_equal, -) -from test_framework.netutil import test_ipv6_local - -RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports - -class ProxyTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - - def setup_nodes(self): - self.have_ipv6 = test_ipv6_local() - # Create two proxies on different ports - # ... one unauthenticated - self.conf1 = Socks5Configuration() - self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) - self.conf1.unauth = True - self.conf1.auth = False - # ... one supporting authenticated and unauthenticated (Tor) - self.conf2 = Socks5Configuration() - self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) - self.conf2.unauth = True - self.conf2.auth = True - if self.have_ipv6: - # ... one on IPv6 with similar configuration - self.conf3 = Socks5Configuration() - self.conf3.af = socket.AF_INET6 - self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) - self.conf3.unauth = True - self.conf3.auth = True - else: - self.log.warning("Testing without local IPv6 support") - - self.serv1 = Socks5Server(self.conf1) - self.serv1.start() - self.serv2 = Socks5Server(self.conf2) - self.serv2.start() - if self.have_ipv6: - self.serv3 = Socks5Server(self.conf3) - self.serv3.start() - - # Note: proxies are not used to connect to local nodes - # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost - args = [ - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], - ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], - [] - ] - if self.have_ipv6: - args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] - self.add_nodes(self.num_nodes, extra_args=args) - self.start_nodes() - - def node_test(self, node, proxies, auth, test_onion=True): - rv = [] - # Test: outgoing IPv4 connection through node - node.addnode("15.61.23.23:1234", "onetry") - cmd = proxies[0].queue.get() - assert(isinstance(cmd, Socks5Command)) - # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"15.61.23.23") - assert_equal(cmd.port, 1234) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - if self.have_ipv6: - # Test: outgoing IPv6 connection through node - node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") - cmd = proxies[1].queue.get() - assert(isinstance(cmd, Socks5Command)) - # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") - assert_equal(cmd.port, 5443) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - if test_onion: - # Test: outgoing onion connection through node - node.addnode("bitcoinostk4e4re.onion:8333", "onetry") - cmd = proxies[2].queue.get() - assert(isinstance(cmd, Socks5Command)) - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") - assert_equal(cmd.port, 8333) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - # Test: outgoing DNS name connection through node - node.addnode("node.noumenon:8333", "onetry") - cmd = proxies[3].queue.get() - assert(isinstance(cmd, Socks5Command)) - assert_equal(cmd.atyp, AddressType.DOMAINNAME) - assert_equal(cmd.addr, b"node.noumenon") - assert_equal(cmd.port, 8333) - if not auth: - assert_equal(cmd.username, None) - assert_equal(cmd.password, None) - rv.append(cmd) - - return rv - - def run_test(self): - # basic -proxy - self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) - - # -proxy plus -onion - self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) - - # -proxy plus -onion, -proxyrandomize - rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) - # Check that credentials as used for -proxyrandomize connections are unique - credentials = set((x.username,x.password) for x in rv) - assert_equal(len(credentials), len(rv)) - - if self.have_ipv6: - # proxy on IPv6 localhost - self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) - - def networks_dict(d): - r = {} - for x in d['networks']: - r[x['name']] = x - return r - - # test RPC getnetworkinfo - n0 = networks_dict(self.nodes[0].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: - assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) - assert_equal(n0[net]['proxy_randomize_credentials'], True) - assert_equal(n0['onion']['reachable'], True) - - n1 = networks_dict(self.nodes[1].getnetworkinfo()) - for net in ['ipv4','ipv6']: - assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) - assert_equal(n1[net]['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) - assert_equal(n1['onion']['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['reachable'], True) - - n2 = networks_dict(self.nodes[2].getnetworkinfo()) - for net in ['ipv4','ipv6','onion']: - assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) - assert_equal(n2[net]['proxy_randomize_credentials'], True) - assert_equal(n2['onion']['reachable'], True) - - if self.have_ipv6: - n3 = networks_dict(self.nodes[3].getnetworkinfo()) - for net in ['ipv4','ipv6']: - assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) - assert_equal(n3[net]['proxy_randomize_credentials'], False) - assert_equal(n3['onion']['reachable'], False) - -if __name__ == '__main__': - ProxyTest().main() - diff --git a/test/functional/pruning.py b/test/functional/pruning.py deleted file mode 100755 index 49ad7f838c..0000000000 --- a/test/functional/pruning.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the pruning code. - -WARNING: -This test uses 4GB of disk space. -This test takes 30 mins or more (up to 2 hours) -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -import time -import os - -MIN_BLOCKS_TO_KEEP = 288 - -# Rescans start at the earliest block up to 2 hours before a key timestamp, so -# the manual prune RPC avoids pruning blocks in the same window to be -# compatible with pruning based on key creation time. -TIMESTAMP_WINDOW = 2 * 60 * 60 - - -def calc_usage(blockdir): - return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) - -class PruneTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 6 - - # Create nodes 0 and 1 to mine. - # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ] - # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) - # Create nodes 5 to test wallet in prune mode, but do not connect - self.extra_args = [self.full_node_default_args, - self.full_node_default_args, - ["-maxreceivebuffer=20000", "-prune=550"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], - ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], - ["-prune=550"]] - - def setup_network(self): - self.setup_nodes() - - self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" - - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[1], 2) - connect_nodes(self.nodes[2], 0) - connect_nodes(self.nodes[0], 3) - connect_nodes(self.nodes[0], 4) - sync_blocks(self.nodes[0:5]) - - def setup_nodes(self): - self.add_nodes(self.num_nodes, self.extra_args, timewait=900) - self.start_nodes() - - def create_big_chain(self): - # Start by creating some coinbases we can spend later - self.nodes[1].generate(200) - sync_blocks(self.nodes[0:2]) - self.nodes[0].generate(150) - # Then mine enough full blocks to create more than 550MiB of data - for i in range(645): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - sync_blocks(self.nodes[0:5]) - - def test_height_min(self): - if not os.path.isfile(self.prunedir+"blk00000.dat"): - raise AssertionError("blk00000.dat is missing, pruning too early") - self.log.info("Success") - self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) - self.log.info("Mining 25 more blocks should cause the first block file to be pruned") - # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - waitstart = time.time() - while os.path.isfile(self.prunedir+"blk00000.dat"): - time.sleep(0.1) - if time.time() - waitstart > 30: - raise AssertionError("blk00000.dat not pruned when it should be") - - self.log.info("Success") - usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): - raise AssertionError("Pruning target not being met") - - def create_chain_with_staleblocks(self): - # Create stale blocks in manageable sized chunks - self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") - - for j in range(12): - # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain - # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects - # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine - self.stop_node(0) - self.start_node(0, extra_args=self.full_node_default_args) - # Mine 24 blocks in node 1 - for i in range(24): - if j == 0: - mine_large_block(self.nodes[1], self.utxo_cache_1) - else: - # Add node1's wallet transactions back to the mempool, to - # avoid the mined blocks from being too small. - self.nodes[1].resendwallettransactions() - self.nodes[1].generate(1) #tx's already in mempool from previous disconnects - - # Reorg back with 25 block chain from node 0 - for i in range(25): - mine_large_block(self.nodes[0], self.utxo_cache_0) - - # Create connections in the order so both nodes can see the reorg at the same time - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[2], 0) - sync_blocks(self.nodes[0:3]) - - self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) - - def reorg_test(self): - # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip - # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain - # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) - # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) - - height = self.nodes[1].getblockcount() - self.log.info("Current block height: %d" % height) - - invalidheight = height-287 - badhash = self.nodes[1].getblockhash(invalidheight) - self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) - self.nodes[1].invalidateblock(badhash) - - # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want - # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) - mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) - curhash = self.nodes[1].getblockhash(invalidheight - 1) - while curhash != mainchainhash: - self.nodes[1].invalidateblock(curhash) - curhash = self.nodes[1].getblockhash(invalidheight - 1) - - assert(self.nodes[1].getblockcount() == invalidheight - 1) - self.log.info("New best height: %d" % self.nodes[1].getblockcount()) - - # Reboot node1 to clear those giant tx's from mempool - self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"]) - - self.log.info("Generating new longer chain of 300 more blocks") - self.nodes[1].generate(300) - - self.log.info("Reconnect nodes") - connect_nodes(self.nodes[0], 1) - connect_nodes(self.nodes[2], 1) - sync_blocks(self.nodes[0:3], timeout=120) - - self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) - self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) - - self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") - - # Get node0's wallet transactions back in its mempool, to avoid the - # mined blocks from being too small. - self.nodes[0].resendwallettransactions() - - for i in range(22): - # This can be slow, so do this in multiple RPC calls to avoid - # RPC timeouts. - self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects - sync_blocks(self.nodes[0:3], timeout=300) - - usage = calc_usage(self.prunedir) - self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): - raise AssertionError("Pruning target not being met") - - return invalidheight,badhash - - def reorg_back(self): - # Verify that a block on the old main chain fork has been pruned away - assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - self.log.info("Will need to redownload block %d" % self.forkheight) - - # Verify that we have enough history to reorg back to the fork point - # Although this is more than 288 blocks, because this chain was written more recently - # and only its other 299 small and 220 large block are in the block files after it, - # its expected to still be retained - self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) - - first_reorg_height = self.nodes[2].getblockcount() - curchainhash = self.nodes[2].getblockhash(self.mainchainheight) - self.nodes[2].invalidateblock(curchainhash) - goalbestheight = self.mainchainheight - goalbesthash = self.mainchainhash2 - - # As of 0.10 the current block download logic is not able to reorg to the original chain created in - # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to - # redownload its missing blocks. - # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain - # because it has all the block data. - # However it must mine enough blocks to have a more work chain than the reorg_test chain in order - # to trigger node 2's block download logic. - # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg - if self.nodes[2].getblockcount() < self.mainchainheight: - blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) - self.nodes[0].invalidateblock(curchainhash) - assert(self.nodes[0].getblockcount() == self.mainchainheight) - assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) - goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] - goalbestheight = first_reorg_height + 1 - - self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") - waitstart = time.time() - while self.nodes[2].getblockcount() < goalbestheight: - time.sleep(0.1) - if time.time() - waitstart > 900: - raise AssertionError("Node 2 didn't reorg to proper height") - assert(self.nodes[2].getbestblockhash() == goalbesthash) - # Verify we can now have the data for a block previously pruned - assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) - - def manual_test(self, node_number, use_timestamp): - # at this point, node has 995 blocks and has not yet run in prune mode - self.start_node(node_number) - node = self.nodes[node_number] - assert_equal(node.getblockcount(), 995) - assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) - - # now re-start in manual pruning mode - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=1"]) - node = self.nodes[node_number] - assert_equal(node.getblockcount(), 995) - - def height(index): - if use_timestamp: - return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW - else: - return index - - def prune(index, expected_ret=None): - ret = node.pruneblockchain(height(index)) - # Check the return value. When use_timestamp is True, just check - # that the return value is less than or equal to the expected - # value, because when more than one block is generated per second, - # a timestamp will not be granular enough to uniquely identify an - # individual block. - if expected_ret is None: - expected_ret = index - if use_timestamp: - assert_greater_than(ret, 0) - assert_greater_than(expected_ret + 1, ret) - else: - assert_equal(ret, expected_ret) - - def has_block(index): - return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) - - # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) - assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) - - # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) - node.generate(6) - assert_equal(node.getblockchaininfo()["blocks"], 1001) - - # negative heights should raise an exception - assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) - - # height=100 too low to prune first block file so this is a no-op - prune(100) - if not has_block(0): - raise AssertionError("blk00000.dat is missing when should still be there") - - # Does nothing - node.pruneblockchain(height(0)) - if not has_block(0): - raise AssertionError("blk00000.dat is missing when should still be there") - - # height=500 should prune first file - prune(500) - if has_block(0): - raise AssertionError("blk00000.dat is still there, should be pruned by now") - if not has_block(1): - raise AssertionError("blk00001.dat is missing when should still be there") - - # height=650 should prune second file - prune(650) - if has_block(1): - raise AssertionError("blk00001.dat is still there, should be pruned by now") - - # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. - prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) - if not has_block(2): - raise AssertionError("blk00002.dat is still there, should be pruned by now") - - # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) - node.generate(288) - prune(1000) - if has_block(2): - raise AssertionError("blk00002.dat is still there, should be pruned by now") - if has_block(3): - raise AssertionError("blk00003.dat is still there, should be pruned by now") - - # stop node, start back up with auto-prune at 550MB, make sure still runs - self.stop_node(node_number) - self.start_node(node_number, extra_args=["-prune=550"]) - - self.log.info("Success") - - def wallet_test(self): - # check that the pruning node's wallet is still in good shape - self.log.info("Stop and start pruning node to trigger wallet rescan") - self.stop_node(2) - self.start_node(2, extra_args=["-prune=550"]) - self.log.info("Success") - - # check that wallet loads successfully when restarting a pruned node after IBD. - # this was reported to fail in #7494. - self.log.info("Syncing node 5 to test wallet") - connect_nodes(self.nodes[0], 5) - nds = [self.nodes[0], self.nodes[5]] - sync_blocks(nds, wait=5, timeout=300) - self.stop_node(5) #stop and start to trigger rescan - self.start_node(5, extra_args=["-prune=550"]) - self.log.info("Success") - - def run_test(self): - self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") - self.log.info("Mining a big blockchain of 995 blocks") - - # Determine default relay fee - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - - # Cache for utxos, as the listunspent may take a long time later in the test - self.utxo_cache_0 = [] - self.utxo_cache_1 = [] - - self.create_big_chain() - # Chain diagram key: - # * blocks on main chain - # +,&,$,@ blocks on other forks - # X invalidated block - # N1 Node 1 - # - # Start by mining a simple chain that all nodes have - # N0=N1=N2 **...*(995) - - # stop manual-pruning node with 995 blocks - self.stop_node(3) - self.stop_node(4) - - self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight") - self.test_height_min() - # Extend this chain past the PruneAfterHeight - # N0=N1=N2 **...*(1020) - - self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") - self.create_chain_with_staleblocks() - # Disconnect N0 - # And mine a 24 block chain on N1 and a separate 25 block chain on N0 - # N1=N2 **...*+...+(1044) - # N0 **...**...**(1045) - # - # reconnect nodes causing reorg on N1 and N2 - # N1=N2 **...*(1020) *...**(1045) - # \ - # +...+(1044) - # - # repeat this process until you have 12 stale forks hanging off the - # main chain on N1 and N2 - # N0 *************************...***************************(1320) - # - # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) - # \ \ \ - # +...+(1044) &.. $...$(1319) - - # Save some current chain state for later use - self.mainchainheight = self.nodes[2].getblockcount() #1320 - self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) - - self.log.info("Check that we can survive a 288 block reorg still") - (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) - # Now create a 288 block reorg by mining a longer chain on N1 - # First disconnect N1 - # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain - # N1 **...*(1020) **...**(1032)X.. - # \ - # ++...+(1031)X.. - # - # Now mine 300 more blocks on N1 - # N1 **...*(1020) **...**(1032) @@...@(1332) - # \ \ - # \ X... - # \ \ - # ++...+(1031)X.. .. - # - # Reconnect nodes and mine 220 more blocks on N1 - # N1 **...*(1020) **...**(1032) @@...@@@(1552) - # \ \ - # \ X... - # \ \ - # ++...+(1031)X.. .. - # - # N2 **...*(1020) **...**(1032) @@...@@@(1552) - # \ \ - # \ *...**(1320) - # \ \ - # ++...++(1044) .. - # - # N0 ********************(1032) @@...@@@(1552) - # \ - # *...**(1320) - - self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") - self.reorg_back() - # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) - # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to - # original main chain (*), but will require redownload of some blocks - # In order to have a peer we think we can download from, must also perform this invalidation - # on N0 and mine a new longest chain to trigger. - # Final result: - # N0 ********************(1032) **...****(1553) - # \ - # X@...@@@(1552) - # - # N2 **...*(1020) **...**(1032) **...****(1553) - # \ \ - # \ X@...@@@(1552) - # \ - # +.. - # - # N1 doesn't change because 1033 on main chain (*) is invalid - - self.log.info("Test manual pruning with block indices") - self.manual_test(3, use_timestamp=False) - - self.log.info("Test manual pruning with timestamps") - self.manual_test(4, use_timestamp=True) - - self.log.info("Test wallet re-scan") - self.wallet_test() - - self.log.info("Done") - -if __name__ == '__main__': - PruneTest().main() diff --git a/test/functional/reindex.py b/test/functional/reindex.py deleted file mode 100755 index ac67e6e9ba..0000000000 --- a/test/functional/reindex.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test running bitcoind with -reindex and -reindex-chainstate options. - -- Start a single node and generate 3 blocks. -- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. -- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. -""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal -import time - -class ReindexTest(BitcoinTestFramework): - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def reindex(self, justchainstate=False): - self.nodes[0].generate(3) - blockcount = self.nodes[0].getblockcount() - self.stop_nodes() - extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] - self.start_nodes(extra_args) - while self.nodes[0].getblockcount() < blockcount: - time.sleep(0.1) - assert_equal(self.nodes[0].getblockcount(), blockcount) - self.log.info("Success") - - def run_test(self): - self.reindex(False) - self.reindex(True) - self.reindex(False) - self.reindex(True) - -if __name__ == '__main__': - ReindexTest().main() diff --git a/test/functional/replace-by-fee.py b/test/functional/replace-by-fee.py deleted file mode 100755 index 6b7ab0f43e..0000000000 --- a/test/functional/replace-by-fee.py +++ /dev/null @@ -1,565 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the RBF code.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import * -from test_framework.mininode import * - -MAX_REPLACEMENT_LIMIT = 100 - -def txToHex(tx): - return bytes_to_hex_str(tx.serialize()) - -def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): - """Create a txout with a given amount and scriptPubKey - - Mines coins as needed. - - confirmed - txouts created will be confirmed in the blockchain; - unconfirmed otherwise. - """ - fee = 1*COIN - while node.getbalance() < satoshi_round((amount + fee)/COIN): - node.generate(100) - - new_addr = node.getnewaddress() - txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) - tx1 = node.getrawtransaction(txid, 1) - txid = int(txid, 16) - i = None - - for i, txout in enumerate(tx1['vout']): - if txout['scriptPubKey']['addresses'] == [new_addr]: - break - assert i is not None - - tx2 = CTransaction() - tx2.vin = [CTxIn(COutPoint(txid, i))] - tx2.vout = [CTxOut(amount, scriptPubKey)] - tx2.rehash() - - signed_tx = node.signrawtransaction(txToHex(tx2)) - - txid = node.sendrawtransaction(signed_tx['hex'], True) - - # If requested, ensure txouts are confirmed. - if confirmed: - mempool_size = len(node.getrawmempool()) - while mempool_size > 0: - node.generate(1) - new_size = len(node.getrawmempool()) - # Error out if we have something stuck in the mempool, as this - # would likely be a bug. - assert(new_size < mempool_size) - mempool_size = new_size - - return COutPoint(int(txid, 16), 0) - -class ReplaceByFeeTest(BitcoinTestFramework): - - def set_test_params(self): - self.num_nodes = 2 - self.extra_args= [["-maxorphantx=1000", - "-whitelist=127.0.0.1", - "-limitancestorcount=50", - "-limitancestorsize=101", - "-limitdescendantcount=200", - "-limitdescendantsize=101"], - ["-mempoolreplacement=0"]] - - def run_test(self): - # Leave IBD - self.nodes[0].generate(1) - - make_utxo(self.nodes[0], 1*COIN) - - # Ensure nodes are synced - self.sync_all() - - self.log.info("Running test simple doublespend...") - self.test_simple_doublespend() - - self.log.info("Running test doublespend chain...") - self.test_doublespend_chain() - - self.log.info("Running test doublespend tree...") - self.test_doublespend_tree() - - self.log.info("Running test replacement feeperkb...") - self.test_replacement_feeperkb() - - self.log.info("Running test spends of conflicting outputs...") - self.test_spends_of_conflicting_outputs() - - self.log.info("Running test new unconfirmed inputs...") - self.test_new_unconfirmed_inputs() - - self.log.info("Running test too many replacements...") - self.test_too_many_replacements() - - self.log.info("Running test opt-in...") - self.test_opt_in() - - self.log.info("Running test RPC...") - self.test_rpc() - - self.log.info("Running test prioritised transactions...") - self.test_prioritised_transactions() - - self.log.info("Passed") - - def test_simple_doublespend(self): - """Simple doublespend""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # make_utxo may have generated a bunch of blocks, so we need to sync - # before we can spend the coins generated, or else the resulting - # transactions might not be accepted by our peers. - self.sync_all() - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - self.sync_all() - - # Should fail because we haven't changed the fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - # This will raise an exception due to transaction replacement being disabled - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) - - # Extra 0.1 BTC fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - # Replacement still disabled even with "enough fee" - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) - # Works when enabled - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - - mempool = self.nodes[0].getrawmempool() - - assert (tx1a_txid not in mempool) - assert (tx1b_txid in mempool) - - assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) - - # Second node is running mempoolreplacement=0, will not replace originally-seen txn - mempool = self.nodes[1].getrawmempool() - assert tx1a_txid in mempool - assert tx1b_txid not in mempool - - def test_doublespend_chain(self): - """Doublespend of a long chain""" - - initial_nValue = 50*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - - prevout = tx0_outpoint - remaining_value = initial_nValue - chain_txids = [] - while remaining_value > 10*COIN: - remaining_value -= 1*COIN - tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] - tx.vout = [CTxOut(remaining_value, CScript([1]))] - tx_hex = txToHex(tx) - txid = self.nodes[0].sendrawtransaction(tx_hex, True) - chain_txids.append(txid) - prevout = COutPoint(int(txid, 16), 0) - - # Whether the double-spend is allowed is evaluated by including all - # child fees - 40 BTC - so this attempt is rejected. - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - # Accepted with sufficient fee - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - self.nodes[0].sendrawtransaction(dbl_tx_hex, True) - - mempool = self.nodes[0].getrawmempool() - for doublespent_txid in chain_txids: - assert(doublespent_txid not in mempool) - - def test_doublespend_tree(self): - """Doublespend of a big tree of transactions""" - - initial_nValue = 50*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - - def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): - if _total_txs is None: - _total_txs = [0] - if _total_txs[0] >= max_txs: - return - - txout_value = (initial_value - fee) // tree_width - if txout_value < fee: - return - - vout = [CTxOut(txout_value, CScript([i+1])) - for i in range(tree_width)] - tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] - tx.vout = vout - tx_hex = txToHex(tx) - - assert(len(tx.serialize()) < 100000) - txid = self.nodes[0].sendrawtransaction(tx_hex, True) - yield tx - _total_txs[0] += 1 - - txid = int(txid, 16) - - for i, txout in enumerate(tx.vout): - for x in branch(COutPoint(txid, i), txout_value, - max_txs, - tree_width=tree_width, fee=fee, - _total_txs=_total_txs): - yield x - - fee = int(0.0001*COIN) - n = MAX_REPLACEMENT_LIMIT - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) - assert_equal(len(tree_txs), n) - - # Attempt double-spend, will fail because too little fee paid - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - # 1 BTC fee is enough - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - self.nodes[0].sendrawtransaction(dbl_tx_hex, True) - - mempool = self.nodes[0].getrawmempool() - - for tx in tree_txs: - tx.rehash() - assert (tx.hash not in mempool) - - # Try again, but with more total transactions than the "max txs - # double-spent at once" anti-DoS limit. - for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): - fee = int(0.0001*COIN) - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) - assert_equal(len(tree_txs), n) - - dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) - # This will raise an exception - assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) - - for tx in tree_txs: - tx.rehash() - self.nodes[0].getrawtransaction(tx.hash) - - def test_replacement_feeperkb(self): - """Replacement requires fee-per-KB to be higher""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Higher fee, but the fee per KB is much lower, so the replacement is - # rejected. - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception due to insufficient fee - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - def test_spends_of_conflicting_outputs(self): - """Replacements that spend conflicting tx outputs are rejected""" - utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) - utxo2 = make_utxo(self.nodes[0], 3*COIN) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(utxo1, nSequence=0)] - tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - tx1a_txid = int(tx1a_txid, 16) - - # Direct spend an output of the transaction we're replacing. - tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] - tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) - tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) - - # Spend tx1a's output to test the indirect case. - tx1b = CTransaction() - tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] - tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1b_hex = txToHex(tx1b) - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - tx1b_txid = int(tx1b_txid, 16) - - tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), - CTxIn(COutPoint(tx1b_txid, 0))] - tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) - - def test_new_unconfirmed_inputs(self): - """Replacements that add new unconfirmed inputs are rejected""" - confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) - unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) - - tx1 = CTransaction() - tx1.vin = [CTxIn(confirmed_utxo)] - tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1_hex = txToHex(tx1) - self.nodes[0].sendrawtransaction(tx1_hex, True) - - tx2 = CTransaction() - tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] - tx2.vout = tx1.vout - tx2_hex = txToHex(tx2) - - # This will raise an exception - assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True) - - def test_too_many_replacements(self): - """Replacements that evict too many transactions are rejected""" - # Try directly replacing more than MAX_REPLACEMENT_LIMIT - # transactions - - # Start by creating a single transaction with many outputs - initial_nValue = 10*COIN - utxo = make_utxo(self.nodes[0], initial_nValue) - fee = int(0.0001*COIN) - split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) - - outputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): - outputs.append(CTxOut(split_value, CScript([1]))) - - splitting_tx = CTransaction() - splitting_tx.vin = [CTxIn(utxo, nSequence=0)] - splitting_tx.vout = outputs - splitting_tx_hex = txToHex(splitting_tx) - - txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) - txid = int(txid, 16) - - # Now spend each of those outputs individually - for i in range(MAX_REPLACEMENT_LIMIT+1): - tx_i = CTransaction() - tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] - tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] - tx_i_hex = txToHex(tx_i) - self.nodes[0].sendrawtransaction(tx_i_hex, True) - - # Now create doublespend of the whole lot; should fail. - # Need a big enough fee to cover all spending transactions and have - # a higher fee rate - double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) - inputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): - inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) - double_tx = CTransaction() - double_tx.vin = inputs - double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) - - # This will raise an exception - assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True) - - # If we remove an input, it should pass - double_tx = CTransaction() - double_tx.vin = inputs[0:-1] - double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) - self.nodes[0].sendrawtransaction(double_tx_hex, True) - - def test_opt_in(self): - """Replacing should only work if orig tx opted in""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # Create a non-opting in transaction - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Shouldn't be able to double-spend - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) - - # This will raise an exception - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - # Create a different non-opting in transaction - tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] - tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) - tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) - - # Still shouldn't be able to double-spend - tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx2b_hex = txToHex(tx2b) - - # This will raise an exception - assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True) - - # Now create a new transaction that spends from tx1a and tx2a - # opt-in on one of the inputs - # Transaction should be replaceable on either input - - tx1a_txid = int(tx1a_txid, 16) - tx2a_txid = int(tx2a_txid, 16) - - tx3a = CTransaction() - tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), - CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] - tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] - tx3a_hex = txToHex(tx3a) - - self.nodes[0].sendrawtransaction(tx3a_hex, True) - - tx3b = CTransaction() - tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] - tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] - tx3b_hex = txToHex(tx3b) - - tx3c = CTransaction() - tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] - tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] - tx3c_hex = txToHex(tx3c) - - self.nodes[0].sendrawtransaction(tx3b_hex, True) - # If tx3b was accepted, tx3c won't look like a replacement, - # but make sure it is accepted anyway - self.nodes[0].sendrawtransaction(tx3c_hex, True) - - def test_prioritised_transactions(self): - # Ensure that fee deltas used via prioritisetransaction are - # correctly used by replacement logic - - # 1. Check that feeperkb uses modified fees - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) - tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) - - # Higher fee, but the actual fee per KB is much lower. - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] - tx1b_hex = txToHex(tx1b) - - # Verify tx1b cannot replace tx1a. - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) - - # Use prioritisetransaction to set tx1a's fee to 0. - self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) - - # Now tx1b should be able to replace tx1a - tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) - - assert(tx1b_txid in self.nodes[0].getrawmempool()) - - # 2. Check that absolute fee checks use modified fee. - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) - - tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) - self.nodes[0].sendrawtransaction(tx2a_hex, True) - - # Lower fee, but we'll prioritise it - tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] - tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] - tx2b.rehash() - tx2b_hex = txToHex(tx2b) - - # Verify tx2b cannot replace tx2a. - assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True) - - # Now prioritise tx2b to have a higher modified fee - self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) - - # tx2b should now be accepted - tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) - - assert(tx2b_txid in self.nodes[0].getrawmempool()) - - def test_rpc(self): - us0 = self.nodes[0].listunspent()[0] - ins = [us0] - outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)} - rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) - rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) - json0 = self.nodes[0].decoderawtransaction(rawtx0) - json1 = self.nodes[0].decoderawtransaction(rawtx1) - assert_equal(json0["vin"][0]["sequence"], 4294967293) - assert_equal(json1["vin"][0]["sequence"], 4294967295) - - rawtx2 = self.nodes[0].createrawtransaction([], outs) - frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) - frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) - - json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) - json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) - assert_equal(json0["vin"][0]["sequence"], 4294967293) - assert_equal(json1["vin"][0]["sequence"], 4294967294) - -if __name__ == '__main__': - ReplaceByFeeTest().main() diff --git a/test/functional/segwit.py b/test/functional/segwit.py deleted file mode 100755 index ba6373fa33..0000000000 --- a/test/functional/segwit.py +++ /dev/null @@ -1,638 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the SegWit changeover logic.""" - -from test_framework.address import ( - key_to_p2sh_p2wpkh, - key_to_p2wpkh, - program_to_witness, - script_to_p2sh_p2wsh, - script_to_p2wsh, -) -from test_framework.blocktools import witness_script, send_to_witness -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex -from test_framework.address import script_to_p2sh, key_to_p2pkh -from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE -from io import BytesIO - -NODE_0 = 0 -NODE_2 = 2 -WIT_V0 = 0 -WIT_V1 = 1 - -def getutxo(txid): - utxo = {} - utxo["vout"] = 0 - utxo["txid"] = txid - return utxo - -def find_unspent(node, min_value): - for utxo in node.listunspent(): - if utxo['amount'] >= min_value: - return utxo - -class SegWitTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. - self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], - ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"], - ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]] - - def setup_network(self): - super().setup_network() - connect_nodes(self.nodes[0], 2) - self.sync_all() - - def success_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - block = node.generate(1) - assert_equal(len(node.getblock(block[0])["tx"]), 2) - sync_blocks(self.nodes) - - def skip_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - block = node.generate(1) - assert_equal(len(node.getblock(block[0])["tx"]), 1) - sync_blocks(self.nodes) - - def fail_accept(self, node, error_msg, txid, sign, redeem_script=""): - assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - - def fail_mine(self, node, txid, sign, redeem_script=""): - send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) - assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1) - sync_blocks(self.nodes) - - def run_test(self): - self.nodes[0].generate(161) #block 161 - - self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - tmpl = self.nodes[0].getblocktemplate({}) - assert(tmpl['sizelimit'] == 1000000) - assert('weightlimit' not in tmpl) - assert(tmpl['sigoplimit'] == 20000) - assert(tmpl['transactions'][0]['hash'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 2) - tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) - assert(tmpl['sizelimit'] == 1000000) - assert('weightlimit' not in tmpl) - assert(tmpl['sigoplimit'] == 20000) - assert(tmpl['transactions'][0]['hash'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 2) - self.nodes[0].generate(1) #block 162 - - balance_presetup = self.nodes[0].getbalance() - self.pubkey = [] - p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh - wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness - for i in range(3): - newaddress = self.nodes[i].getnewaddress() - self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) - multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) - p2sh_addr = self.nodes[i].addwitnessaddress(newaddress) - bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False) - p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address'] - bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address'] - assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1])) - assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1])) - assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript)) - assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) - p2sh_ids.append([]) - wit_ids.append([]) - for v in range(2): - p2sh_ids[i].append([]) - wit_ids[i].append([]) - - for i in range(5): - for n in range(3): - for v in range(2): - wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) - p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) - - self.nodes[0].generate(1) #block 163 - sync_blocks(self.nodes) - - # Make sure all nodes recognize the transactions as theirs - assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50) - assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) - assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) - - self.nodes[0].generate(260) #block 423 - sync_blocks(self.nodes) - - self.log.info("Verify default node can't accept any witness format txs before fork") - # unsigned, no scriptsig - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False) - # unsigned with redeem script - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0])) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0])) - # signed - self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True) - self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True) - - self.log.info("Verify witness txs are skipped for mining before the fork") - self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424 - self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425 - self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426 - self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427 - - # TODO: An old node would see these txs without witnesses and be able to mine them - - self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork") - self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428 - self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429 - - self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False) - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False) - - self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork") - self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430 - self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431 - - self.log.info("Verify previous witness txs skipped for mining can now be mined") - assert_equal(len(self.nodes[2].getrawmempool()), 4) - block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) - sync_blocks(self.nodes) - assert_equal(len(self.nodes[2].getrawmempool()), 0) - segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] - assert_equal(len(segwit_tx_list), 5) - - self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag") - assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False)) - assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False)) - for i in range(len(segwit_tx_list)): - tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i])) - assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i])) - assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) - assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness())) - - self.log.info("Verify witness txs without witness data are invalid after the fork") - self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False) - self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False) - self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2])) - self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2])) - - self.log.info("Verify default node can now use witness txs") - self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432 - self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433 - self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434 - self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435 - - self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork") - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) - tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) - assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data - assert(tmpl['weightlimit'] == 4000000) - assert(tmpl['sigoplimit'] == 80000) - assert(tmpl['transactions'][0]['txid'] == txid) - assert(tmpl['transactions'][0]['sigops'] == 8) - - self.nodes[0].generate(1) # Mine a block to clear the gbt cache - - self.log.info("Non-segwit miners are able to use GBT response after activation.") - # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) -> - # tx2 (segwit input, paying to a non-segwit output) -> - # tx3 (non-segwit input, paying to a non-segwit output). - # tx1 is allowed to appear in the block, but no others. - txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) - hex_tx = self.nodes[0].gettransaction(txid)['hex'] - tx = FromHex(CTransaction(), hex_tx) - assert(tx.wit.is_null()) # This should not be a segwit input - assert(txid1 in self.nodes[0].getrawmempool()) - - # Now create tx2, which will spend from txid1. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) - tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) - tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] - txid2 = self.nodes[0].sendrawtransaction(tx2_hex) - tx = FromHex(CTransaction(), tx2_hex) - assert(not tx.wit.is_null()) - - # Now create tx3, which will spend from txid2 - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) - tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee - tx.calc_sha256() - txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) - assert(tx.wit.is_null()) - assert(txid3 in self.nodes[0].getrawmempool()) - - # Now try calling getblocktemplate() without segwit support. - template = self.nodes[0].getblocktemplate() - - # Check that tx1 is the only transaction of the 3 in the template. - template_txids = [ t['txid'] for t in template['transactions'] ] - assert(txid2 not in template_txids and txid3 not in template_txids) - assert(txid1 in template_txids) - - # Check that running with segwit support results in all 3 being included. - template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) - template_txids = [ t['txid'] for t in template['transactions'] ] - assert(txid1 in template_txids) - assert(txid2 in template_txids) - assert(txid3 in template_txids) - - # Check that wtxid is properly reported in mempool entry - assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) - - # Mine a block to clear the gbt cache again. - self.nodes[0].generate(1) - - self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent") - - # Some public keys to be used later - pubkeys = [ - "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb - "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97 - "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV - "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd - "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66 - "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K - "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ - ] - - # Import a compressed key and an uncompressed key, generate some multisig addresses - self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn") - uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] - self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") - compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] - assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) - assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) - - self.nodes[0].importpubkey(pubkeys[0]) - compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] - self.nodes[0].importpubkey(pubkeys[1]) - compressed_solvable_address.append(key_to_p2pkh(pubkeys[1])) - self.nodes[0].importpubkey(pubkeys[2]) - uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])] - - spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress - spendable_after_importaddress = [] # These outputs should be seen after importaddress - solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable - unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress - solvable_anytime = [] # These outputs should be solvable after importpubkey - unseen_anytime = [] # These outputs should never be seen - - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) - compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address']) - unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"] - - # Test multisig_without_privkey - # We have 2 public keys without private keys, use addmultisigaddress to add to wallet. - # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. - - multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address'] - script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) - solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL])) - - for i in compressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # bare and p2sh multisig with compressed keys should always be spendable - spendable_anytime.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress - spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with compressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress - spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - # P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable - spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in uncompressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # bare and p2sh multisig with uncompressed keys should always be spendable - spendable_anytime.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with uncompressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress - spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) - # Witness output types with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - for i in compressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - # Multisig without private is not seen after addmultisigaddress, but seen after importaddress - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh]) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress - solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - for i in uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress - solvable_after_importaddress.extend([bare, p2sh]) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # normal P2PKH and P2PK with uncompressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk]) - # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress - solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) - # Witness output types with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) - - op1 = CScript([OP_1]) - op0 = CScript([OP_0]) - # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V - unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)] - unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") - unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG]) - unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)]) - p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL]) - p2wshop1 = CScript([OP_0, sha256(op1)]) - unsolvable_after_importaddress.append(unsolvablep2pkh) - unsolvable_after_importaddress.append(unsolvablep2wshp2pkh) - unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script - unsolvable_after_importaddress.append(p2wshop1) - unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided - unsolvable_after_importaddress.append(p2shop0) - - spendable_txid = [] - solvable_txid = [] - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1)) - self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0) - - importlist = [] - for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - bare = hex_str_to_bytes(v['hex']) - importlist.append(bytes_to_hex_str(bare)) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) - else: - pubkey = hex_str_to_bytes(v['pubkey']) - p2pk = CScript([pubkey, OP_CHECKSIG]) - p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) - importlist.append(bytes_to_hex_str(p2pk)) - importlist.append(bytes_to_hex_str(p2pkh)) - importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)]))) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)]))) - importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)]))) - - importlist.append(bytes_to_hex_str(unsolvablep2pkh)) - importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh)) - importlist.append(bytes_to_hex_str(op1)) - importlist.append(bytes_to_hex_str(p2wshop1)) - - for i in importlist: - # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC - # exceptions and continue. - try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True) - - self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only - self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey - - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) - self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # addwitnessaddress should refuse to return a witness address if an uncompressed key is used - # note that no witness address should be returned by unsolvable addresses - for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address: - assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) - - # addwitnessaddress should return a witness addresses even if keys are not in the wallet - self.nodes[0].addwitnessaddress(multisig_without_privkey_address) - - for i in compressed_spendable_address + compressed_solvable_address: - witaddress = self.nodes[0].addwitnessaddress(i) - # addwitnessaddress should return the same address if it is a known P2SH-witness address - assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) - - spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) - self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # Repeat some tests. This time we don't add witness scripts with importaddress - # Import a compressed key and an uncompressed key, generate some multisig addresses - self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH") - uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"] - self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw") - compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"] - - self.nodes[0].importpubkey(pubkeys[5]) - compressed_solvable_address = [key_to_p2pkh(pubkeys[5])] - self.nodes[0].importpubkey(pubkeys[6]) - uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])] - - spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress - solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable - unseen_anytime = [] # These outputs should never be seen - solvable_anytime = [] # These outputs should be solvable after importpubkey - unseen_anytime = [] # These outputs should never be seen - - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address']) - compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address']) - uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address']) - compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address']) - - premature_witaddress = [] - - for i in compressed_spendable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress - spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) - premature_witaddress.append(script_to_p2sh(p2wsh)) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2WPKH, P2SH_P2WPKH are always spendable - spendable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in uncompressed_spendable_address + uncompressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen - unseen_anytime.extend([p2wsh, p2sh_p2wsh]) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen - unseen_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - for i in compressed_solvable_address: - v = self.nodes[0].validateaddress(i) - if (v['isscript']): - # P2WSH multisig without private key are seen after addwitnessaddress - [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) - premature_witaddress.append(script_to_p2sh(p2wsh)) - else: - [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) - # P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable - solvable_anytime.extend([p2wpkh, p2sh_p2wpkh]) - - self.mine_and_test_listunspent(spendable_anytime, 2) - self.mine_and_test_listunspent(solvable_anytime, 1) - self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0) - - # addwitnessaddress should refuse to return a witness address if an uncompressed key is used - # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress - # premature_witaddress are not accepted until the script is added with addwitnessaddress first - for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress: - # This will raise an exception - assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) - - # after importaddress it should pass addwitnessaddress - v = self.nodes[0].validateaddress(compressed_solvable_address[1]) - self.nodes[0].importaddress(v['hex'],"",False,True) - for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress: - witaddress = self.nodes[0].addwitnessaddress(i) - assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) - - spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2)) - solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1)) - self.mine_and_test_listunspent(unseen_anytime, 0) - - # Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works - v1_addr = program_to_witness(1, [3,5]) - v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1}) - v1_decoded = self.nodes[1].decoderawtransaction(v1_tx) - assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr) - assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305") - - # Check that spendable outputs are really spendable - self.create_and_mine_tx_from_txids(spendable_txid) - - # import all the private keys so solvable addresses become spendable - self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb") - self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97") - self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV") - self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd") - self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66") - self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K") - self.create_and_mine_tx_from_txids(solvable_txid) - - # Test that importing native P2WPKH/P2WSH scripts works - for use_p2wsh in [False, True]: - if use_p2wsh: - scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a" - transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000" - else: - scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87" - transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000" - - self.nodes[1].importaddress(scriptPubKey, "", False) - rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex'] - rawtxfund = self.nodes[1].signrawtransaction(rawtxfund)["hex"] - txid = self.nodes[1].sendrawtransaction(rawtxfund) - - assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) - assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) - - # Assert it is properly saved - self.stop_node(1) - self.start_node(1) - assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid) - assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid) - - def mine_and_test_listunspent(self, script_list, ismine): - utxo = find_unspent(self.nodes[0], 50) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout']))) - for i in script_list: - tx.vout.append(CTxOut(10000000, i)) - tx.rehash() - signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] - txid = self.nodes[0].sendrawtransaction(signresults, True) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - watchcount = 0 - spendcount = 0 - for i in self.nodes[0].listunspent(): - if (i['txid'] == txid): - watchcount += 1 - if (i['spendable'] == True): - spendcount += 1 - if (ismine == 2): - assert_equal(spendcount, len(script_list)) - elif (ismine == 1): - assert_equal(watchcount, len(script_list)) - assert_equal(spendcount, 0) - else: - assert_equal(watchcount, 0) - return txid - - def p2sh_address_to_script(self,v): - bare = CScript(hex_str_to_bytes(v['hex'])) - p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) - p2wsh = CScript([OP_0, sha256(bare)]) - p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) - return([bare, p2sh, p2wsh, p2sh_p2wsh]) - - def p2pkh_address_to_script(self,v): - pubkey = hex_str_to_bytes(v['pubkey']) - p2wpkh = CScript([OP_0, hash160(pubkey)]) - p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) - p2pk = CScript([pubkey, OP_CHECKSIG]) - p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) - p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL]) - p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL]) - p2wsh_p2pk = CScript([OP_0, sha256(p2pk)]) - p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)]) - p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL]) - p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) - return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] - - def create_and_mine_tx_from_txids(self, txids, success = True): - tx = CTransaction() - for i in txids: - txtmp = CTransaction() - txraw = self.nodes[0].getrawtransaction(i) - f = BytesIO(hex_str_to_bytes(txraw)) - txtmp.deserialize(f) - for j in range(len(txtmp.vout)): - tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j))) - tx.vout.append(CTxOut(0, CScript())) - tx.rehash() - signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] - self.nodes[0].sendrawtransaction(signresults, True) - self.nodes[0].generate(1) - sync_blocks(self.nodes) - - -if __name__ == '__main__': - SegWitTest().main() diff --git a/test/functional/smartfees.py b/test/functional/smartfees.py deleted file mode 100755 index 68453e50f4..0000000000 --- a/test/functional/smartfees.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test fee estimation code.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE -from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN - -# Construct 2 trivial P2SH's and the ScriptSigs that spend them -# So we can create many transactions without needing to spend -# time signing. -redeem_script_1 = CScript([OP_1, OP_DROP]) -redeem_script_2 = CScript([OP_2, OP_DROP]) -P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL]) -P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL]) - -# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 -SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] - -global log - -def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): - """ - Create and send a transaction with a random fee. - The transaction pays to a trivial P2SH script, and assumes that its inputs - are of the same form. - The function takes a list of confirmed outputs and unconfirmed outputs - and attempts to use the confirmed list first for its inputs. - It adds the newly created outputs to the unconfirmed list. - Returns (raw transaction, fee) - """ - # It's best to exponentially distribute our random fees - # because the buckets are exponentially spaced. - # Exponentially distributed from 1-128 * fee_increment - rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) - # Total fee ranges from min_fee to min_fee + 127*fee_increment - fee = min_fee - fee_increment + satoshi_round(rand_fee) - tx = CTransaction() - total_in = Decimal("0.00000000") - while total_in <= (amount + fee) and len(conflist) > 0: - t = conflist.pop(0) - total_in += t["amount"] - tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) - if total_in <= amount + fee: - while total_in <= (amount + fee) and len(unconflist) > 0: - t = unconflist.pop(0) - total_in += t["amount"] - tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b"")) - if total_in <= amount + fee: - raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) - tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1)) - tx.vout.append(CTxOut(int(amount*COIN), P2SH_2)) - # These transactions don't need to be signed, but we still have to insert - # the ScriptSig that will satisfy the ScriptPubKey. - for inp in tx.vin: - inp.scriptSig = SCRIPT_SIG[inp.prevout.n] - txid = from_node.sendrawtransaction(ToHex(tx), True) - unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) - unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) - - return (ToHex(tx), fee) - -def split_inputs(from_node, txins, txouts, initial_split = False): - """ - We need to generate a lot of inputs so we can generate a ton of transactions. - This function takes an input from txins, and creates and sends a transaction - which splits the value into 2 outputs which are appended to txouts. - Previously this was designed to be small inputs so they wouldn't have - a high coin age when the notion of priority still existed. - """ - prevtxout = txins.pop() - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b"")) - - half_change = satoshi_round(prevtxout["amount"]/2) - rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") - tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1)) - tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2)) - - # If this is the initial split we actually need to sign the transaction - # Otherwise we just need to insert the proper ScriptSig - if (initial_split) : - completetx = from_node.signrawtransaction(ToHex(tx))["hex"] - else : - tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] - completetx = ToHex(tx) - txid = from_node.sendrawtransaction(completetx, True) - txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) - txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) - -def check_estimates(node, fees_seen, max_invalid, print_estimates = True): - """ - This function calls estimatefee and verifies that the estimates - meet certain invariants. - """ - all_estimates = [ node.estimatefee(i) for i in range(1,26) ] - if print_estimates: - log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) - delta = 1.0e-6 # account for rounding error - last_e = max(fees_seen) - for e in [x for x in all_estimates if x >= 0]: - # Estimates should be within the bounds of what transactions fees actually were: - if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): - raise AssertionError("Estimated fee (%f) out of range (%f,%f)" - %(float(e), min(fees_seen), max(fees_seen))) - # Estimates should be monotonically decreasing - if float(e)-delta > last_e: - raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" - %(float(e),float(last_e))) - last_e = e - valid_estimate = False - invalid_estimates = 0 - for i,e in enumerate(all_estimates): # estimate is for i+1 - if e >= 0: - valid_estimate = True - if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n) - assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta) - - else: - invalid_estimates += 1 - - # estimatesmartfee should still be valid - approx_estimate = node.estimatesmartfee(i+1)["feerate"] - answer_found = node.estimatesmartfee(i+1)["blocks"] - assert(approx_estimate > 0) - assert(answer_found > i+1) - - # Once we're at a high enough confirmation count that we can give an estimate - # We should have estimates for all higher confirmation counts - if valid_estimate: - raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") - - # Check on the expected number of different confirmation counts - # that we might not have valid estimates for - if invalid_estimates > max_invalid: - raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) - return all_estimates - - -class EstimateFeeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 3 - - def setup_network(self): - """ - We'll setup the network to have 3 nodes that all mine with different parameters. - But first we need to use one node to create a lot of outputs - which we will use to generate our transactions. - """ - self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], - ["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"], - ["-blockmaxsize=8000", "-maxorphantx=1000"]]) - # Use node0 to mine blocks for input splitting - # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, - # (17k is room enough for 110 or so transactions) - # Node2 is a stingy miner, that - # produces too small blocks (room for only 55 or so transactions) - - - def transact_and_mine(self, numblocks, mining_node): - min_fee = Decimal("0.00001") - # We will now mine numblocks blocks generating on average 100 transactions between each block - # We shuffle our confirmed txout set before each set of transactions - # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible - # resorting to tx's that depend on the mempool when those run out - for i in range(numblocks): - random.shuffle(self.confutxo) - for j in range(random.randrange(100-50,100+50)): - from_index = random.randint(1,2) - (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, - self.memutxo, Decimal("0.005"), min_fee, min_fee) - tx_kbytes = (len(txhex) // 2) / 1000.0 - self.fees_per_kb.append(float(fee)/tx_kbytes) - sync_mempools(self.nodes[0:3], wait=.1) - mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] - sync_blocks(self.nodes[0:3], wait=.1) - # update which txouts are confirmed - newmem = [] - for utx in self.memutxo: - if utx["txid"] in mined: - self.confutxo.append(utx) - else: - newmem.append(utx) - self.memutxo = newmem - - def run_test(self): - self.log.info("This test is time consuming, please be patient") - self.log.info("Splitting inputs so we can generate tx's") - - # Make log handler available to helper functions - global log - log = self.log - - # Start node0 - self.start_node(0) - self.txouts = [] - self.txouts2 = [] - # Split a coinbase into two transaction puzzle outputs - split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) - - # Mine - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - - # Repeatedly split those 2 outputs, doubling twice for each rep - # Use txouts to monitor the available utxo, since these won't be tracked in wallet - reps = 0 - while (reps < 5): - #Double txouts to txouts2 - while (len(self.txouts)>0): - split_inputs(self.nodes[0], self.txouts, self.txouts2) - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - #Double txouts2 to txouts - while (len(self.txouts2)>0): - split_inputs(self.nodes[0], self.txouts2, self.txouts) - while (len(self.nodes[0].getrawmempool()) > 0): - self.nodes[0].generate(1) - reps += 1 - self.log.info("Finished splitting") - - # Now we can connect the other nodes, didn't want to connect them earlier - # so the estimates would not be affected by the splitting transactions - self.start_node(1) - self.start_node(2) - connect_nodes(self.nodes[1], 0) - connect_nodes(self.nodes[0], 2) - connect_nodes(self.nodes[2], 1) - - self.sync_all() - - self.fees_per_kb = [] - self.memutxo = [] - self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting - self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") - - for i in range(2): - self.log.info("Creating transactions and mining them with a block size that can't keep up") - # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine - self.transact_and_mine(10, self.nodes[2]) - check_estimates(self.nodes[1], self.fees_per_kb, 14) - - self.log.info("Creating transactions and mining them at a block size that is just big enough") - # Generate transactions while mining 10 more blocks, this time with node1 - # which mines blocks with capacity just above the rate that transactions are being created - self.transact_and_mine(10, self.nodes[1]) - check_estimates(self.nodes[1], self.fees_per_kb, 2) - - # Finish by mining a normal-sized block: - while len(self.nodes[1].getrawmempool()) > 0: - self.nodes[1].generate(1) - - sync_blocks(self.nodes[0:3], wait=.1) - self.log.info("Final estimates after emptying mempools") - check_estimates(self.nodes[1], self.fees_per_kb, 2) - -if __name__ == '__main__': - EstimateFeeTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index b8b6ee98bf..6aad0f9b9d 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -58,10 +58,10 @@ BASE_SCRIPTS= [ 'wallet-hd.py', 'walletbackup.py', # vv Tests less than 5m vv - 'p2p-fullblocktest.py', + 'feature_block.py', 'fundrawtransaction.py', 'p2p-compactblocks.py', - 'segwit.py', + 'feature_segwit.py', # vv Tests less than 2m vv 'wallet.py', 'wallet-accounts.py', @@ -76,10 +76,10 @@ BASE_SCRIPTS= [ 'merkle_blocks.py', 'receivedby.py', 'abandonconflict.py', - 'bip68-112-113-p2p.py', + 'feature_csv_activation.py', 'rawtransactions.py', 'address_types.py', - 'reindex.py', + 'feature_reindex.py', # vv Tests less than 30s vv 'keypool-topup.py', 'zmq_test.py', @@ -97,7 +97,7 @@ BASE_SCRIPTS= [ 'multiwallet.py --usecli', 'httpbasics.py', 'multi_rpc.py', - 'proxy_test.py', + 'feature_proxy.py', 'signrawtransactions.py', 'disconnect_ban.py', 'decodescript.py', @@ -110,11 +110,11 @@ BASE_SCRIPTS= [ 'prioritise_transaction.py', 'invalidblockrequest.py', 'invalidtxrequest.py', - 'p2p-versionbits-warning.py', + 'feature_versionbits_warning.py', 'preciousblock.py', 'importprunedfunds.py', 'signmessages.py', - 'nulldummy.py', + 'feature_nulldummy.py', 'import-rescan.py', 'mining.py', 'bumpfee.py', @@ -122,17 +122,17 @@ BASE_SCRIPTS= [ 'listsinceblock.py', 'p2p-leaktests.py', 'wallet-encryption.py', - 'bipdersig-p2p.py', - 'bip65-cltv-p2p.py', + 'feature_dersig.py', + 'feature_cltv.py', 'uptime.py', 'resendwallettransactions.py', - 'minchainwork.py', + 'feature_minchainwork.py', 'p2p-fingerprint.py', - 'uacomment.py', + 'feature_uacomment.py', 'p2p-acceptblock.py', 'feature_logging.py', 'node_network_limited.py', - 'conf_args.py', + 'feature_config_args.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] @@ -140,29 +140,29 @@ BASE_SCRIPTS= [ EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel - 'pruning.py', + 'feature_pruning.py', # vv Tests less than 20m vv - 'smartfees.py', + 'feature_fee_estimation.py', # vv Tests less than 5m vv - 'maxuploadtarget.py', + 'feature_maxuploadtarget.py', 'mempool_packages.py', - 'dbcrash.py', + 'feature_dbcrash.py', # vv Tests less than 2m vv - 'bip68-sequence.py', + 'feature_bip68_sequence.py', 'getblocktemplate_longpoll.py', 'p2p-timeouts.py', # vv Tests less than 60s vv - 'bip9-softforks.py', + 'feature_bip9_softforks.py', 'p2p-feefilter.py', 'rpcbind_test.py', # vv Tests less than 30s vv - 'assumevalid.py', + 'feature_assumevalid.py', 'example_test.py', 'txn_doublespend.py', 'txn_clone.py --mineblock', - 'notifications.py', + 'feature_notifications.py', 'invalidateblock.py', - 'replace-by-fee.py', + 'feature_rbf.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests @@ -474,7 +474,7 @@ class TestResult(): def check_script_prefixes(): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" - EXPECTED_VIOLATION_COUNT = 77 + EXPECTED_VIOLATION_COUNT = 60 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming diff --git a/test/functional/uacomment.py b/test/functional/uacomment.py deleted file mode 100755 index 0b2c64ab69..0000000000 --- a/test/functional/uacomment.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the -uacomment option.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - -class UacommentTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - self.log.info("test multiple -uacomment") - test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1] - assert_equal(test_uacomment, "(testnode0)") - - self.restart_node(0, ["-uacomment=foo"]) - foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1] - assert_equal(foo_uacomment, "(testnode0; foo)") - - self.log.info("test -uacomment max length") - self.stop_node(0) - expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments." - self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected) - - self.log.info("test -uacomment unsafe characters") - for unsafe_char in ['/', ':', '(', ')']: - expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters" - self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected) - -if __name__ == '__main__': - UacommentTest().main() -- cgit v1.2.3