diff options
Diffstat (limited to 'test')
46 files changed, 1091 insertions, 260 deletions
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 3e882f47b8..2842d82d80 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -11,13 +11,8 @@ The assumeutxo value generated and used here is committed to in ## Possible test improvements -- TODO: test what happens with -reindex and -reindex-chainstate before the - snapshot is validated, and make sure it's deleted successfully. - Interesting test cases could be loading an assumeutxo snapshot file with: -- TODO: Valid hash but invalid snapshot file (bad coin height or - bad other serialization) - TODO: Valid snapshot file, but referencing a snapshot block that turns out to be invalid, or has an invalid parent - TODO: Valid snapshot file and snapshot block, but the block is not on the @@ -101,18 +96,23 @@ class AssumeutxoTest(BitcoinTestFramework): self.log.info(" - snapshot file with alternated UTXO data") cases = [ - [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b"], # wrong outpoint hash - [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad"], # wrong outpoint index - [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8"], # wrong coin code VARINT((coinbase ? 1 : 0) | (height << 1)) - [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5"], # another wrong coin code + # (content, offset, wrong_hash, custom_message) + [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash + [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index + [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT + [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code + [b"\x84\x58", 36, None, "[snapshot] bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0 + [b"\xCA\xD2\x8F\x5A", 41, None, "[snapshot] bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY ] - for content, offset, wrong_hash in cases: + for content, offset, wrong_hash, custom_message in cases: with open(bad_snapshot_path, "wb") as f: f.write(valid_snapshot_contents[:(32 + 8 + offset)]) f.write(content) f.write(valid_snapshot_contents[(32 + 8 + offset + len(content)):]) - expected_error(log_msg=f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}") + + log_msg = custom_message if custom_message is not None else f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}" + expected_error(log_msg=log_msg) def test_headers_not_synced(self, valid_snapshot_path): for node in self.nodes[1:]: @@ -379,6 +379,17 @@ class AssumeutxoTest(BitcoinTestFramework): assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']: + self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate") + self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]]) + assert_equal(1, len(n2.getchainstates()["chainstates"])) + for i in range(1, 300): + block = n0.getblock(n0.getblockhash(i), 0) + n2.submitheader(block) + loaded = n2.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + normal, snapshot = n2.getchainstates()['chainstates'] assert_equal(normal['blocks'], START_HEIGHT) assert_equal(normal.get('snapshot_blockhash'), None) diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 613d2eab14..982fa79915 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -159,7 +159,7 @@ class AssumeValidTest(BitcoinTestFramework): for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. - p2p1.sync_with_ping(960) + p2p1.sync_with_ping(timeout=960) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py new file mode 100755 index 0000000000..c9754e083c --- /dev/null +++ b/test/functional/feature_framework_unit_tests.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Framework unit tests + +Unit tests for the test framework. +""" + +import sys +import unittest + +from test_framework.test_framework import TEST_EXIT_PASSED, TEST_EXIT_FAILED + +# List of framework modules containing unit tests. Should be kept in sync with +# the output of `git grep unittest.TestCase ./test/functional/test_framework` +TEST_FRAMEWORK_MODULES = [ + "address", + "crypto.bip324_cipher", + "blocktools", + "crypto.chacha20", + "crypto.ellswift", + "key", + "messages", + "crypto.muhash", + "crypto.poly1305", + "crypto.ripemd160", + "script", + "segwit_addr", + "wallet_util", +] + + +def run_unit_tests(): + test_framework_tests = unittest.TestSuite() + for module in TEST_FRAMEWORK_MODULES: + test_framework_tests.addTest( + unittest.TestLoader().loadTestsFromName(f"test_framework.{module}") + ) + result = unittest.TextTestRunner(stream=sys.stdout, verbosity=1, failfast=True).run( + test_framework_tests + ) + if not result.wasSuccessful(): + sys.exit(TEST_EXIT_FAILED) + sys.exit(TEST_EXIT_PASSED) + + +if __name__ == "__main__": + run_unit_tests() + diff --git a/test/functional/feature_index_prune.py b/test/functional/feature_index_prune.py index b3bf35b524..66c0a4f615 100755 --- a/test/functional/feature_index_prune.py +++ b/test/functional/feature_index_prune.py @@ -31,7 +31,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework): expected_stats = { 'coinstatsindex': {'synced': True, 'best_block_height': height} } - self.wait_until(lambda: self.nodes[1].getindexinfo() == expected_stats) + self.wait_until(lambda: self.nodes[1].getindexinfo() == expected_stats, timeout=150) expected = {**expected_filter, **expected_stats} self.wait_until(lambda: self.nodes[2].getindexinfo() == expected) diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py index 51f37ef1e0..a1774a5395 100755 --- a/test/functional/feature_maxtipage.py +++ b/test/functional/feature_maxtipage.py @@ -43,6 +43,10 @@ class MaxTipAgeTest(BitcoinTestFramework): self.generate(node_miner, 1) assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False) + # reset time to system time so we don't have a time offset with the ibd node the next + # time we connect to it, ensuring TimeOffsets::WarnIfOutOfSync() doesn't output to stderr + node_miner.setmocktime(0) + def run_test(self): self.log.info("Test IBD with maximum tip age of 24 hours (default).") self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index e85541d0ec..e7d65b4539 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -10,7 +10,6 @@ from test_framework.blocktools import ( create_block, add_witness_commitment, MAX_BLOCK_SIGOPS_WEIGHT, - WITNESS_SCALE_FACTOR, ) from test_framework.messages import ( COutPoint, @@ -20,6 +19,7 @@ from test_framework.messages import ( CTxOut, SEQUENCE_FINAL, tx_from_hex, + WITNESS_SCALE_FACTOR, ) from test_framework.script import ( ANNEX_TAG, diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index 3c3ff1e4a0..9f6f8919de 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -3,7 +3,9 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the ZMQ notification interface.""" +import os import struct +import tempfile from time import sleep from io import BytesIO @@ -30,7 +32,7 @@ from test_framework.util import ( from test_framework.wallet import ( MiniWallet, ) -from test_framework.netutil import test_ipv6_local +from test_framework.netutil import test_ipv6_local, test_unix_socket # Test may be skipped and not have zmq installed @@ -119,6 +121,10 @@ class ZMQTest (BitcoinTestFramework): self.ctx = zmq.Context() try: self.test_basic() + if test_unix_socket(): + self.test_basic(unix=True) + else: + self.log.info("Skipping ipc test, because UNIX sockets are not supported.") self.test_sequence() self.test_mempool_sync() self.test_reorg() @@ -139,7 +145,7 @@ class ZMQTest (BitcoinTestFramework): socket.setsockopt(zmq.IPV6, 1) subscribers.append(ZMQSubscriber(socket, topic.encode())) - self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services]) + self.restart_node(0, [f"-zmqpub{topic}={address.replace('ipc://', 'unix:')}" for topic, address in services]) for i, sub in enumerate(subscribers): sub.socket.connect(services[i][1]) @@ -176,12 +182,19 @@ class ZMQTest (BitcoinTestFramework): return subscribers - def test_basic(self): + def test_basic(self, unix = False): + self.log.info(f"Running basic test with {'ipc' if unix else 'tcp'} protocol") # Invalid zmq arguments don't take down the node, see #17185. self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"]) address = f"tcp://127.0.0.1:{self.zmq_port_base}" + + if unix: + # Use the shortest temp path possible since paths may have as little as 92-char limit + socket_path = tempfile.NamedTemporaryFile().name + address = f"ipc://{socket_path}" + subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]]) hashblock = subs[0] @@ -247,6 +260,8 @@ class ZMQTest (BitcoinTestFramework): ]) assert_equal(self.nodes[1].getzmqnotifications(), []) + if unix: + os.unlink(socket_path) def test_reorg(self): diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 272e932fcc..b00be5f4f0 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -28,6 +28,8 @@ from test_framework.script import ( OP_HASH160, OP_RETURN, OP_TRUE, + SIGHASH_ALL, + sign_input_legacy, ) from test_framework.script_util import ( DUMMY_MIN_OP_RETURN_SCRIPT, @@ -386,5 +388,24 @@ class MempoolAcceptanceTest(BitcoinTestFramework): maxfeerate=0, ) + self.log.info('Spending a confirmed bare multisig is okay') + address = self.wallet.get_address() + tx = tx_from_hex(raw_tx_reference) + privkey, pubkey = generate_keypair() + tx.vout[0].scriptPubKey = keys_to_multisig_script([pubkey] * 3, k=1) # Some bare multisig script (1-of-3) + tx.rehash() + self.generateblock(node, address, [tx.serialize().hex()]) + tx_spend = CTransaction() + tx_spend.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx_spend.vout.append(CTxOut(tx.vout[0].nValue - int(fee*COIN), script_to_p2wsh_script(CScript([OP_TRUE])))) + tx_spend.rehash() + sign_input_legacy(tx_spend, 0, tx.vout[0].scriptPubKey, privkey, sighash_type=SIGHASH_ALL) + tx_spend.vin[0].scriptSig = bytes(CScript([OP_0])) + tx_spend.vin[0].scriptSig + self.check_mempool_result( + result_expected=[{'txid': tx_spend.rehash(), 'allowed': True, 'vsize': tx_spend.get_vsize(), 'fees': { 'base': Decimal('0.00000700')}}], + rawtxs=[tx_spend.serialize().hex()], + maxfeerate=0, + ) + if __name__ == '__main__': MempoolAcceptanceTest().main() diff --git a/test/functional/mempool_accept_v3.py b/test/functional/mempool_accept_v3.py index 1b55cd0a0d..8285b82c19 100755 --- a/test/functional/mempool_accept_v3.py +++ b/test/functional/mempool_accept_v3.py @@ -533,10 +533,10 @@ class MempoolAcceptV3(BitcoinTestFramework): tx_unrelated_replacee = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_unrelated_conflict) assert tx_unrelated_replacee["txid"] in node.getrawmempool() - fee_to_beat_child2 = int(tx_v3_child_2["fee"] * COIN) + fee_to_beat = max(int(tx_v3_child_2["fee"] * COIN), int(tx_unrelated_replacee["fee"]*COIN)) tx_v3_child_3 = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat_child2*5, version=3 + utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat*2, version=3 ) node.sendrawtransaction(tx_v3_child_3["hex"]) self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]]) diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 6215610c31..e8a568f7ab 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -6,7 +6,6 @@ from decimal import Decimal -from test_framework.blocktools import COINBASE_MATURITY from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -14,8 +13,7 @@ from test_framework.util import ( assert_fee_amount, assert_greater_than, assert_raises_rpc_error, - create_lots_of_big_transactions, - gen_return_txouts, + fill_mempool, ) from test_framework.wallet import ( COIN, @@ -34,50 +32,6 @@ class MempoolLimitTest(BitcoinTestFramework): ]] self.supports_cli = False - def fill_mempool(self): - """Fill mempool until eviction.""" - self.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises") - txouts = gen_return_txouts() - node = self.nodes[0] - miniwallet = self.wallet - relayfee = node.getnetworkinfo()['relayfee'] - - tx_batch_size = 1 - num_of_batches = 75 - # Generate UTXOs to flood the mempool - # 1 to create a tx initially that will be evicted from the mempool later - # 75 transactions each with a fee rate higher than the previous one - # And 1 more to verify that this tx does not get added to the mempool with a fee rate less than the mempoolminfee - # And 2 more for the package cpfp test - self.generate(miniwallet, 1 + (num_of_batches * tx_batch_size)) - - # Mine 99 blocks so that the UTXOs are allowed to be spent - self.generate(node, COINBASE_MATURITY - 1) - - self.log.debug("Create a mempool tx that will be evicted") - tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, fee_rate=relayfee)["txid"] - - # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool - # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB) - # by 130 should result in a fee that corresponds to 2x of that fee rate - base_fee = relayfee * 130 - - self.log.debug("Fill up the mempool with txs with higher fee rate") - with node.assert_debug_log(["rolling minimum fee bumped"]): - for batch_of_txid in range(num_of_batches): - fee = (batch_of_txid + 1) * base_fee - create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts) - - self.log.debug("The tx should be evicted by now") - # The number of transactions created should be greater than the ones present in the mempool - assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool())) - # Initial tx created should not be present in the mempool anymore as it had a lower fee rate - assert tx_to_be_evicted_id not in node.getrawmempool() - - self.log.debug("Check that mempoolminfee is larger than minrelaytxfee") - assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) - assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - def test_rbf_carveout_disallowed(self): node = self.nodes[0] @@ -139,7 +93,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - self.fill_mempool() + fill_mempool(self, node, self.wallet) current_info = node.getmempoolinfo() mempoolmin_feerate = current_info["mempoolminfee"] @@ -229,7 +183,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - self.fill_mempool() + fill_mempool(self, node, self.wallet) current_info = node.getmempoolinfo() mempoolmin_feerate = current_info["mempoolminfee"] @@ -303,7 +257,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - self.fill_mempool() + fill_mempool(self, node, self.wallet) # Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool self.log.info('Create a mempool tx that will not pass mempoolminfee') diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index da796d3f70..5f2dde8eac 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -308,7 +308,7 @@ class MiningTest(BitcoinTestFramework): # Should ask for the block from a p2p node, if they announce the header as well: peer = node.add_p2p_connection(P2PDataStore()) - peer.wait_for_getheaders(timeout=5) # Drop the first getheaders + peer.wait_for_getheaders(timeout=5, block_hash=block.hashPrevBlock) peer.send_blocks_and_test(blocks=[block], node=node) # Must be active now: assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips() diff --git a/test/functional/mocks/signer.py b/test/functional/mocks/signer.py index 5f4fad6380..23d163aac3 100755 --- a/test/functional/mocks/signer.py +++ b/test/functional/mocks/signer.py @@ -25,35 +25,36 @@ def getdescriptors(args): sys.stdout.write(json.dumps({ "receive": [ - "pkh([00000001/44'/1'/" + args.account + "']" + xpub + "/0/*)#vt6w3l3j", - "sh(wpkh([00000001/49'/1'/" + args.account + "']" + xpub + "/0/*))#r0grqw5x", - "wpkh([00000001/84'/1'/" + args.account + "']" + xpub + "/0/*)#x30uthjs", - "tr([00000001/86'/1'/" + args.account + "']" + xpub + "/0/*)#sng9rd4t" + "pkh([00000001/44h/1h/" + args.account + "']" + xpub + "/0/*)#aqllu46s", + "sh(wpkh([00000001/49h/1h/" + args.account + "']" + xpub + "/0/*))#5dh56mgg", + "wpkh([00000001/84h/1h/" + args.account + "']" + xpub + "/0/*)#h62dxaej", + "tr([00000001/86h/1h/" + args.account + "']" + xpub + "/0/*)#pcd5w87f" ], "internal": [ - "pkh([00000001/44'/1'/" + args.account + "']" + xpub + "/1/*)#all0v2p2", - "sh(wpkh([00000001/49'/1'/" + args.account + "']" + xpub + "/1/*))#kwx4c3pe", - "wpkh([00000001/84'/1'/" + args.account + "']" + xpub + "/1/*)#h92akzzg", - "tr([00000001/86'/1'/" + args.account + "']" + xpub + "/1/*)#p8dy7c9n" + "pkh([00000001/44h/1h/" + args.account + "']" + xpub + "/1/*)#v567pq2g", + "sh(wpkh([00000001/49h/1h/" + args.account + "']" + xpub + "/1/*))#pvezzyah", + "wpkh([00000001/84h/1h/" + args.account + "']" + xpub + "/1/*)#xw0vmgf2", + "tr([00000001/86h/1h/" + args.account + "']" + xpub + "/1/*)#svg4njw3" ] })) def displayaddress(args): - # Several descriptor formats are acceptable, so allowing for potential - # changes to InferDescriptor: if args.fingerprint != "00000001": return sys.stdout.write(json.dumps({"error": "Unexpected fingerprint", "fingerprint": args.fingerprint})) - expected_desc = [ - "wpkh([00000001/84'/1'/0'/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#0yneg42r", - "tr([00000001/86'/1'/0'/0/0]c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#4vdj9jqk", - ] + expected_desc = { + "wpkh([00000001/84h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#3te6hhy7": "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g", + "sh(wpkh([00000001/49h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7))#kz9y5w82": "2N2gQKzjUe47gM8p1JZxaAkTcoHPXV6YyVp", + "pkh([00000001/44h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#q3pqd8wh": "n1LKejAadN6hg2FrBXoU1KrwX4uK16mco9", + "tr([00000001/86h/1h/0h/0/0]c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#puqqa90m": "tb1phw4cgpt6cd30kz9k4wkpwm872cdvhss29jga2xpmftelhqll62mscq0k4g", + "wpkh([00000001/84h/1h/0h/0/1]03a20a46308be0b8ded6dff0a22b10b4245c587ccf23f3b4a303885be3a524f172)#aqpjv5xr": "wrong_address", + } if args.desc not in expected_desc: return sys.stdout.write(json.dumps({"error": "Unexpected descriptor", "desc": args.desc})) - return sys.stdout.write(json.dumps({"address": "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g"})) + return sys.stdout.write(json.dumps({"address": expected_desc[args.desc]})) def signtx(args): if args.fingerprint != "00000001": diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py new file mode 100755 index 0000000000..e88c826962 --- /dev/null +++ b/test/functional/p2p_1p1c_network.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test that 1p1c package submission allows a 1p1c package to propagate in a "network" of nodes. Send +various packages from different nodes on a network in which some nodes have already received some of +the transactions (and submitted them to mempool, kept them as orphans or rejected them as +too-low-feerate transactions). The packages should be received and accepted by all nodes. +""" + +from decimal import Decimal +from math import ceil + +from test_framework.messages import ( + msg_tx, +) +from test_framework.p2p import ( + P2PInterface, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, + fill_mempool, +) +from test_framework.wallet import ( + MiniWallet, + MiniWalletMode, +) + +# 1sat/vB feerate denominated in BTC/KvB +FEERATE_1SAT_VB = Decimal("0.00001000") + +class PackageRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 4 + # hugely speeds up the test, as it involves multiple hops of tx relay. + self.noban_tx_relay = True + self.extra_args = [[ + "-datacarriersize=100000", + "-maxmempool=5", + ]] * self.num_nodes + self.supports_cli = False + + def raise_network_minfee(self): + filler_wallet = MiniWallet(self.nodes[0]) + fill_mempool(self, self.nodes[0], filler_wallet) + + self.log.debug("Wait for the network to sync mempools") + self.sync_mempools() + + self.log.debug("Check that all nodes' mempool minimum feerates are above min relay feerate") + for node in self.nodes: + assert_equal(node.getmempoolinfo()['minrelaytxfee'], FEERATE_1SAT_VB) + assert_greater_than(node.getmempoolinfo()['mempoolminfee'], FEERATE_1SAT_VB) + + def create_basic_1p1c(self, wallet): + low_fee_parent = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, confirmed_only=True) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + package_hex_basic = [low_fee_parent["hex"], high_fee_child["hex"]] + return package_hex_basic, low_fee_parent["tx"], high_fee_child["tx"] + + def create_package_2outs(self, wallet): + # First create a tester tx to see the vsize, and then adjust the fees + utxo_for_2outs = wallet.get_utxo(confirmed_only=True) + + low_fee_parent_2outs_tester = wallet.create_self_transfer_multi( + utxos_to_spend=[utxo_for_2outs], + num_outputs=2, + ) + + # Target 1sat/vB so the number of satoshis is equal to the vsize. + # Round up. The goal is to be between min relay feerate and mempool min feerate. + fee_2outs = ceil(low_fee_parent_2outs_tester["tx"].get_vsize() / 2) + + low_fee_parent_2outs = wallet.create_self_transfer_multi( + utxos_to_spend=[utxo_for_2outs], + num_outputs=2, + fee_per_output=fee_2outs, + ) + + # Now create the child + high_fee_child_2outs = wallet.create_self_transfer_multi( + utxos_to_spend=low_fee_parent_2outs["new_utxos"][::-1], + fee_per_output=fee_2outs*100, + ) + return [low_fee_parent_2outs["hex"], high_fee_child_2outs["hex"]], low_fee_parent_2outs["tx"], high_fee_child_2outs["tx"] + + def create_package_2p1c(self, wallet): + parent1 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True) + parent2 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*20, confirmed_only=True) + child = wallet.create_self_transfer_multi( + utxos_to_spend=[parent1["new_utxo"], parent2["new_utxo"]], + fee_per_output=999*parent1["tx"].get_vsize(), + ) + return [parent1["hex"], parent2["hex"], child["hex"]], parent1["tx"], parent2["tx"], child["tx"] + + def create_packages(self): + # 1: Basic 1-parent-1-child package, parent 1sat/vB, child 999sat/vB + package_hex_1, parent_1, child_1 = self.create_basic_1p1c(self.wallet) + + # 2: same as 1, parent's txid is the same as its wtxid. + package_hex_2, parent_2, child_2 = self.create_basic_1p1c(self.wallet_nonsegwit) + + # 3: 2-parent-1-child package. Both parents are above mempool min feerate. No package submission happens. + # We require packages to be child-with-unconfirmed-parents and only allow 1-parent-1-child packages. + package_hex_3, parent_31, parent_32, child_3 = self.create_package_2p1c(self.wallet) + + # 4: parent + child package where the child spends 2 different outputs from the parent. + package_hex_4, parent_4, child_4 = self.create_package_2outs(self.wallet) + + # Assemble return results + packages_to_submit = [package_hex_1, package_hex_2, package_hex_3, package_hex_4] + # node0: sender + # node1: pre-received the children (orphan) + # node3: pre-received the parents (too low fee) + # All nodes receive parent_31 ahead of time. + txns_to_send = [ + [], + [child_1, child_2, parent_31, child_3, child_4], + [parent_31], + [parent_1, parent_2, parent_31, parent_4] + ] + + return packages_to_submit, txns_to_send + + def run_test(self): + self.wallet = MiniWallet(self.nodes[1]) + self.wallet_nonsegwit = MiniWallet(self.nodes[2], mode=MiniWalletMode.RAW_P2PK) + self.generate(self.wallet_nonsegwit, 10) + self.generate(self.wallet, 120) + + self.log.info("Fill mempools with large transactions to raise mempool minimum feerates") + self.raise_network_minfee() + + # Create the transactions. + self.wallet.rescan_utxos(include_mempool=True) + packages_to_submit, transactions_to_presend = self.create_packages() + + self.peers = [self.nodes[i].add_p2p_connection(P2PInterface()) for i in range(self.num_nodes)] + + self.log.info("Pre-send some transactions to nodes") + for (i, peer) in enumerate(self.peers): + for tx in transactions_to_presend[i]: + peer.send_and_ping(msg_tx(tx)) + # This disconnect removes any sent orphans from the orphanage (EraseForPeer) and times + # out the in-flight requests. It is currently required for the test to pass right now, + # because the node will not reconsider an orphan tx and will not (re)try requesting + # orphan parents from multiple peers if the first one didn't respond. + # TODO: remove this in the future if the node tries orphan resolution with multiple peers. + peer.peer_disconnect() + + self.log.info("Submit full packages to node0") + for package_hex in packages_to_submit: + submitpackage_result = self.nodes[0].submitpackage(package_hex) + assert_equal(submitpackage_result["package_msg"], "success") + + self.log.info("Wait for mempools to sync") + self.sync_mempools(timeout=20) + + +if __name__ == '__main__': + PackageRelayTest().main() diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py index f9a8c44be2..ea114e7d70 100755 --- a/test/functional/p2p_addrv2_relay.py +++ b/test/functional/p2p_addrv2_relay.py @@ -11,6 +11,7 @@ import time from test_framework.messages import ( CAddress, msg_addrv2, + msg_sendaddrv2, ) from test_framework.p2p import ( P2PInterface, @@ -75,6 +76,12 @@ class AddrTest(BitcoinTestFramework): self.extra_args = [["-whitelist=addr@127.0.0.1"]] def run_test(self): + self.log.info('Check disconnection when sending sendaddrv2 after verack') + conn = self.nodes[0].add_p2p_connection(P2PInterface()) + with self.nodes[0].assert_debug_log(['sendaddrv2 received after verack from peer=0; disconnecting']): + conn.send_message(msg_sendaddrv2()) + conn.wait_for_disconnect() + self.log.info('Create connection that sends addrv2 messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addrv2() @@ -89,8 +96,8 @@ class AddrTest(BitcoinTestFramework): msg.addrs = ADDRS msg_size = calc_addrv2_msg_size(ADDRS) with self.nodes[0].assert_debug_log([ - f'received: addrv2 ({msg_size} bytes) peer=0', - f'sending addrv2 ({msg_size} bytes) peer=1', + f'received: addrv2 ({msg_size} bytes) peer=1', + f'sending addrv2 ({msg_size} bytes) peer=2', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) diff --git a/test/functional/p2p_block_sync.py b/test/functional/p2p_block_sync.py index d821edc1b1..6c7f08364e 100755 --- a/test/functional/p2p_block_sync.py +++ b/test/functional/p2p_block_sync.py @@ -22,7 +22,7 @@ class BlockSyncTest(BitcoinTestFramework): # node0 -> node1 -> node2 # So node1 has both an inbound and outbound peer. # In our test, we will mine a block on node0, and ensure that it makes - # to to both node1 and node2. + # to both node1 and node2. self.connect_nodes(0, 1) self.connect_nodes(1, 2) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index d6c06fdeed..9e314db110 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -139,7 +139,7 @@ class TestP2PConn(P2PInterface): This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) - self.wait_for_disconnect(timeout) + self.wait_for_disconnect(timeout=timeout) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): @@ -387,7 +387,7 @@ class CompactBlocksTest(BitcoinTestFramework): if announce == "inv": test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)])) - test_node.wait_until(lambda: "getheaders" in test_node.last_message, timeout=30) + test_node.wait_for_getheaders(timeout=30) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) diff --git a/test/functional/p2p_compactblocks_hb.py b/test/functional/p2p_compactblocks_hb.py index c985a1f98d..023b33ff6d 100755 --- a/test/functional/p2p_compactblocks_hb.py +++ b/test/functional/p2p_compactblocks_hb.py @@ -32,10 +32,15 @@ class CompactBlocksConnectionTest(BitcoinTestFramework): self.connect_nodes(peer, 0) self.generate(self.nodes[0], 1) self.disconnect_nodes(peer, 0) - status_to = [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)] - status_from = [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)] - assert_equal(status_to, status_from) - return status_to + + def status_to(): + return [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)] + + def status_from(): + return [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)] + + self.wait_until(lambda: status_to() == status_from()) + return status_to() def run_test(self): self.log.info("Testing reserved high-bandwidth mode slot for outbound peer...") diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py index c389ff732f..678b006886 100755 --- a/test/functional/p2p_disconnect_ban.py +++ b/test/functional/p2p_disconnect_ban.py @@ -77,6 +77,7 @@ class DisconnectBanTest(BitcoinTestFramework): self.nodes[1].setmocktime(old_time) self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") + self.nodes[1].setban("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "add") self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds listBeforeShutdown = self.nodes[1].listbanned() @@ -85,13 +86,13 @@ class DisconnectBanTest(BitcoinTestFramework): self.log.info("setban: test banning with absolute timestamp") self.nodes[1].setban("192.168.0.2", "add", old_time + 120, True) - # Move time forward by 3 seconds so the third ban has expired + # Move time forward by 3 seconds so the fourth ban has expired self.nodes[1].setmocktime(old_time + 3) - assert_equal(len(self.nodes[1].listbanned()), 4) + assert_equal(len(self.nodes[1].listbanned()), 5) self.log.info("Test ban_duration and time_remaining") for ban in self.nodes[1].listbanned(): - if ban["address"] in ["127.0.0.0/32", "127.0.0.0/24"]: + if ban["address"] in ["127.0.0.0/32", "127.0.0.0/24", "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"]: assert_equal(ban["ban_duration"], 86400) assert_equal(ban["time_remaining"], 86397) elif ban["address"] == "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19": @@ -108,6 +109,7 @@ class DisconnectBanTest(BitcoinTestFramework): assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("192.168.0.2/32", listAfterShutdown[2]['address']) assert_equal("/19" in listAfterShutdown[3]['address'], True) + assert_equal("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", listAfterShutdown[4]['address']) # Clear ban lists self.nodes[1].clearbanned() diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index f0b62e291d..dd19fe9333 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -41,6 +41,7 @@ class P2PHandshakeTest(BitcoinTestFramework): peer.sync_with_ping() peer.peer_disconnect() peer.wait_for_disconnect() + self.wait_until(lambda: len(node.getpeerinfo()) == 0) def test_desirable_service_flags(self, node, service_flag_tests, desirable_service_flags, expect_disconnect): """Check that connecting to a peer either fails or succeeds depending on its offered diff --git a/test/functional/p2p_initial_headers_sync.py b/test/functional/p2p_initial_headers_sync.py index e67c384da7..bc6e0fb355 100755 --- a/test/functional/p2p_initial_headers_sync.py +++ b/test/functional/p2p_initial_headers_sync.py @@ -38,9 +38,10 @@ class HeadersSyncTest(BitcoinTestFramework): def run_test(self): self.log.info("Adding a peer to node0") peer1 = self.nodes[0].add_p2p_connection(P2PInterface()) + best_block_hash = int(self.nodes[0].getbestblockhash(), 16) # Wait for peer1 to receive a getheaders - peer1.wait_for_getheaders() + peer1.wait_for_getheaders(block_hash=best_block_hash) # An empty reply will clear the outstanding getheaders request, # allowing additional getheaders requests to be sent to this peer in # the future. @@ -60,17 +61,12 @@ class HeadersSyncTest(BitcoinTestFramework): assert "getheaders" not in peer2.last_message assert "getheaders" not in peer3.last_message - with p2p_lock: - peer1.last_message.pop("getheaders", None) - self.log.info("Have all peers announce a new block") self.announce_random_block(all_peers) self.log.info("Check that peer1 receives a getheaders in response") - peer1.wait_for_getheaders() + peer1.wait_for_getheaders(block_hash=best_block_hash) peer1.send_message(msg_headers()) # Send empty response, see above - with p2p_lock: - peer1.last_message.pop("getheaders", None) self.log.info("Check that exactly 1 of {peer2, peer3} received a getheaders in response") count = 0 @@ -80,7 +76,6 @@ class HeadersSyncTest(BitcoinTestFramework): if "getheaders" in p.last_message: count += 1 peer_receiving_getheaders = p - p.last_message.pop("getheaders", None) p.send_message(msg_headers()) # Send empty response, see above assert_equal(count, 1) @@ -89,14 +84,14 @@ class HeadersSyncTest(BitcoinTestFramework): self.announce_random_block(all_peers) self.log.info("Check that peer1 receives a getheaders in response") - peer1.wait_for_getheaders() + peer1.wait_for_getheaders(block_hash=best_block_hash) self.log.info("Check that the remaining peer received a getheaders as well") expected_peer = peer2 if peer2 == peer_receiving_getheaders: expected_peer = peer3 - expected_peer.wait_for_getheaders() + expected_peer.wait_for_getheaders(block_hash=best_block_hash) self.log.info("Success!") diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index 467bbad09c..8b63d8ee26 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -92,7 +92,8 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): # Wait until the full_node is headers-wise sync best_block_hash = pruned_node.getbestblockhash() - self.wait_until(lambda: next(filter(lambda x: x['hash'] == best_block_hash, full_node.getchaintips()))['status'] == "headers-only") + default_value = {'status': ''} # No status + self.wait_until(lambda: next(filter(lambda x: x['hash'] == best_block_hash, full_node.getchaintips()), default_value)['status'] == "headers-only") # Now, since the node aims to download a window of 1024 blocks, # ensure it requests the blocks below the threshold only (with a @@ -137,7 +138,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework): self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).") node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit - node.wait_for_disconnect(5) + node.wait_for_disconnect(timeout=5) self.nodes[0].disconnect_p2ps() # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py new file mode 100755 index 0000000000..e07acd5481 --- /dev/null +++ b/test/functional/p2p_opportunistic_1p1c.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test opportunistic 1p1c package submission logic. +""" + +from decimal import Decimal +import time +from test_framework.messages import ( + CInv, + CTxInWitness, + MAX_BIP125_RBF_SEQUENCE, + MSG_WTX, + msg_inv, + msg_tx, + tx_from_hex, +) +from test_framework.p2p import ( + P2PInterface, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, + fill_mempool, +) +from test_framework.wallet import ( + MiniWallet, + MiniWalletMode, +) + +# 1sat/vB feerate denominated in BTC/KvB +FEERATE_1SAT_VB = Decimal("0.00001000") +# Number of seconds to wait to ensure no getdata is received +GETDATA_WAIT = 60 + +def cleanup(func): + def wrapper(self, *args, **kwargs): + try: + func(self, *args, **kwargs) + finally: + self.nodes[0].disconnect_p2ps() + # Do not clear the node's mempool, as each test requires mempool min feerate > min + # relay feerate. However, do check that this is the case. + assert self.nodes[0].getmempoolinfo()["mempoolminfee"] > self.nodes[0].getnetworkinfo()["relayfee"] + # Ensure we do not try to spend the same UTXOs in subsequent tests, as they will look like RBF attempts. + self.wallet.rescan_utxos(include_mempool=True) + + # Resets if mocktime was used + self.nodes[0].setmocktime(0) + return wrapper + +class PackageRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [[ + "-datacarriersize=100000", + "-maxmempool=5", + ]] + self.supports_cli = False + + def create_tx_below_mempoolminfee(self, wallet): + """Create a 1-input 1sat/vB transaction using a confirmed UTXO. Decrement and use + self.sequence so that subsequent calls to this function result in unique transactions.""" + + self.sequence -= 1 + assert_greater_than(self.nodes[0].getmempoolinfo()["mempoolminfee"], FEERATE_1SAT_VB) + + return wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, sequence=self.sequence, confirmed_only=True) + + @cleanup + def test_basic_child_then_parent(self): + node = self.nodes[0] + self.log.info("Check that opportunistic 1p1c logic works when child is received before parent") + + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB) + + peer_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first (perhaps the low feerate parent didn't meet feefilter or the requests were sent to different nodes). It is missing an input. + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 3. Sender relays the parent. Parent+Child are evaluated as a package and accepted. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 4. Both transactions should now be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + node.disconnect_p2ps() + + @cleanup + def test_basic_parent_then_child(self, wallet): + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(wallet) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay") + + # 1. Parent is relayed first. It is too low feerate. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + peer_sender.wait_for_getdata([parent_wtxid_int]) + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + assert low_fee_parent["txid"] not in node.getrawmempool() + + # Send again from peer_ignored, check that it is ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + + # 2. Child is relayed next. It is missing an input. + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 3. Node requests the missing parent by txid. + # It should do so even if it has previously rejected that parent for being too low feerate. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 4. Sender re-relays the parent. Parent+Child are evaluated as a package and accepted. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 5. Both transactions should now be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + @cleanup + def test_low_and_high_child(self, wallet): + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(wallet) + # This feerate is above mempoolminfee, but not enough to also bump the low feerate parent. + feerate_just_above = node.getmempoolinfo()["mempoolminfee"] + med_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=feerate_just_above) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay") + + self.log.info("Check that tx caches low fee parent + low fee child package rejections") + + # 1. Send parent, rejected for being low feerate. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + peer_sender.wait_for_getdata([parent_wtxid_int]) + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + assert low_fee_parent["txid"] not in node.getrawmempool() + + # Send again from peer_ignored, check that it is ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + + # 2. Send an (orphan) child that has a higher feerate, but not enough to bump the parent. + med_child_wtxid_int = int(med_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)])) + peer_sender.wait_for_getdata([med_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(med_fee_child["tx"])) + + # 3. Node requests the orphan's missing parent. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 4. The low parent + low child are submitted as a package. They are not accepted due to low package feerate. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + assert low_fee_parent["txid"] not in node.getrawmempool() + assert med_fee_child["txid"] not in node.getrawmempool() + + # If peer_ignored announces the low feerate child, it should be ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + # If either peer sends the parent again, package evaluation should not be attempted + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + peer_ignored.send_and_ping(msg_tx(low_fee_parent["tx"])) + + assert low_fee_parent["txid"] not in node.getrawmempool() + assert med_fee_child["txid"] not in node.getrawmempool() + + # 5. Send the high feerate (orphan) child + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 6. Node requests the orphan's parent, even though it has already been rejected, both by + # itself and with a child. This is necessary, otherwise high_fee_child can be censored. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 7. The low feerate parent + high feerate child are submitted as a package. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 8. Both transactions should now be in mempool + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + assert med_fee_child["txid"] not in node_mempool + + @cleanup + def test_orphan_consensus_failure(self): + self.log.info("Check opportunistic 1p1c logic with consensus-invalid orphan causes disconnect of the correct peer") + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + coin = low_fee_parent["new_utxo"] + address = node.get_deterministic_priv_key().address + # Create raw transaction spending the parent, but with no signature (a consensus error). + hex_orphan_no_sig = node.createrawtransaction([{"txid": coin["txid"], "vout": coin["vout"]}], {address : coin["value"] - Decimal("0.0001")}) + tx_orphan_bad_wit = tx_from_hex(hex_orphan_no_sig) + tx_orphan_bad_wit.wit.vtxinwit.append(CTxInWitness()) + tx_orphan_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + bad_orphan_sender = node.add_p2p_connection(P2PInterface()) + parent_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first. It is missing an input. + child_wtxid_int = int(tx_orphan_bad_wit.getwtxid(), 16) + bad_orphan_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)])) + bad_orphan_sender.wait_for_getdata([child_wtxid_int]) + bad_orphan_sender.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(low_fee_parent["txid"], 16) + bad_orphan_sender.wait_for_getdata([parent_txid_int]) + + # 3. A different peer relays the parent. Parent+Child are evaluated as a package and rejected. + parent_sender.send_message(msg_tx(low_fee_parent["tx"])) + + # 4. Transactions should not be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] not in node_mempool + assert tx_orphan_bad_wit.rehash() not in node_mempool + + # 5. Peer that sent a consensus-invalid transaction should be disconnected. + bad_orphan_sender.wait_for_disconnect() + + # The peer that didn't provide the orphan should not be disconnected. + parent_sender.sync_with_ping() + + @cleanup + def test_parent_consensus_failure(self): + self.log.info("Check opportunistic 1p1c logic with consensus-invalid parent causes disconnect of the correct peer") + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + + # Create invalid version of parent with a bad signature. + tx_parent_bad_wit = tx_from_hex(low_fee_parent["hex"]) + tx_parent_bad_wit.wit.vtxinwit.append(CTxInWitness()) + tx_parent_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + package_sender = node.add_p2p_connection(P2PInterface()) + fake_parent_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first. It is missing an input. + child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)])) + package_sender.wait_for_getdata([child_wtxid_int]) + package_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent_bad_wit.rehash(), 16) + package_sender.wait_for_getdata([parent_txid_int]) + + # 3. A different node relays the parent. The parent is first evaluated by itself and + # rejected for being too low feerate. Then it is evaluated as a package and, after passing + # feerate checks, rejected for having a bad signature (consensus error). + fake_parent_sender.send_message(msg_tx(tx_parent_bad_wit)) + + # 4. Transactions should not be in mempool. + node_mempool = node.getrawmempool() + assert tx_parent_bad_wit.rehash() not in node_mempool + assert high_fee_child["txid"] not in node_mempool + + # 5. Peer sent a consensus-invalid transaction. + fake_parent_sender.wait_for_disconnect() + + self.log.info("Check that fake parent does not cause orphan to be deleted and real package can still be submitted") + # 6. Child-sending should not have been punished and the orphan should remain in orphanage. + # It can send the "real" parent transaction, and the package is accepted. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + package_sender.wait_for_getdata([parent_wtxid_int]) + package_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + @cleanup + def test_multiple_parents(self): + self.log.info("Check that node does not request more than 1 previously-rejected low feerate parent") + + node = self.nodes[0] + node.setmocktime(int(time.time())) + + # 2-parent-1-child package where both parents are below mempool min feerate + parent_low_1 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit) + parent_low_2 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit) + child_bumping = self.wallet_nonsegwit.create_self_transfer_multi( + utxos_to_spend=[parent_low_1["new_utxo"], parent_low_2["new_utxo"]], + fee_per_output=999*parent_low_1["tx"].get_vsize(), + ) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + + # 1. Send both parents. Each should be rejected for being too low feerate. + # Send unsolicited so that we can later check that no "getdata" was ever received. + peer_sender.send_and_ping(msg_tx(parent_low_1["tx"])) + peer_sender.send_and_ping(msg_tx(parent_low_2["tx"])) + + # parent_low_1 and parent_low_2 are rejected for being low feerate. + assert parent_low_1["txid"] not in node.getrawmempool() + assert parent_low_2["txid"] not in node.getrawmempool() + + # 2. Send child. + peer_sender.send_and_ping(msg_tx(child_bumping["tx"])) + + # 3. Node should not request any parents, as it should recognize that it will not accept + # multi-parent-1-child packages. + node.bumpmocktime(GETDATA_WAIT) + peer_sender.sync_with_ping() + assert "getdata" not in peer_sender.last_message + + @cleanup + def test_other_parent_in_mempool(self): + self.log.info("Check opportunistic 1p1c fails if child already has another parent in mempool") + node = self.nodes[0] + + # This parent needs CPFP + parent_low = self.create_tx_below_mempoolminfee(self.wallet) + # This parent does not need CPFP and can be submitted alone ahead of time + parent_high = self.wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True) + child = self.wallet.create_self_transfer_multi( + utxos_to_spend=[parent_high["new_utxo"], parent_low["new_utxo"]], + fee_per_output=999*parent_low["tx"].get_vsize(), + ) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + + # 1. Send first parent which will be accepted. + peer_sender.send_and_ping(msg_tx(parent_high["tx"])) + assert parent_high["txid"] in node.getrawmempool() + + # 2. Send child. + peer_sender.send_and_ping(msg_tx(child["tx"])) + + # 3. Node requests parent_low. However, 1p1c fails because package-not-child-with-unconfirmed-parents + parent_low_txid_int = int(parent_low["txid"], 16) + peer_sender.wait_for_getdata([parent_low_txid_int]) + peer_sender.send_and_ping(msg_tx(parent_low["tx"])) + + node_mempool = node.getrawmempool() + assert parent_high["txid"] in node_mempool + assert parent_low["txid"] not in node_mempool + assert child["txid"] not in node_mempool + + # Same error if submitted through submitpackage without parent_high + package_hex_missing_parent = [parent_low["hex"], child["hex"]] + result_missing_parent = node.submitpackage(package_hex_missing_parent) + assert_equal(result_missing_parent["package_msg"], "package-not-child-with-unconfirmed-parents") + + def run_test(self): + node = self.nodes[0] + # To avoid creating transactions with the same txid (can happen if we set the same feerate + # and reuse the same input as a previous transaction that wasn't successfully submitted), + # we give each subtest a different nSequence for its transactions. + self.sequence = MAX_BIP125_RBF_SEQUENCE + + self.wallet = MiniWallet(node) + self.wallet_nonsegwit = MiniWallet(node, mode=MiniWalletMode.RAW_P2PK) + self.generate(self.wallet_nonsegwit, 10) + self.generate(self.wallet, 20) + + filler_wallet = MiniWallet(node) + fill_mempool(self, node, filler_wallet) + + self.log.info("Check opportunistic 1p1c logic when parent (txid != wtxid) is received before child") + self.test_basic_parent_then_child(self.wallet) + + self.log.info("Check opportunistic 1p1c logic when parent (txid == wtxid) is received before child") + self.test_basic_parent_then_child(self.wallet_nonsegwit) + + self.log.info("Check opportunistic 1p1c logic when child is received before parent") + self.test_basic_child_then_parent() + + self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid != wtxid)") + self.test_low_and_high_child(self.wallet) + + self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid == wtxid)") + self.test_low_and_high_child(self.wallet_nonsegwit) + + self.test_orphan_consensus_failure() + self.test_parent_consensus_failure() + self.test_multiple_parents() + self.test_other_parent_in_mempool() + + +if __name__ == '__main__': + PackageRelayTest().main() diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 1c0c11d74c..45bbd7f1c3 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -191,22 +191,21 @@ class TestP2PConn(P2PInterface): def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): with p2p_lock: self.last_message.pop("getdata", None) - self.last_message.pop("getheaders", None) msg = msg_headers() msg.headers = [CBlockHeader(block)] if use_header: self.send_message(msg) else: self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)])) - self.wait_for_getheaders() + self.wait_for_getheaders(block_hash=block.hashPrevBlock, timeout=timeout) self.send_message(msg) - self.wait_for_getdata([block.sha256]) + self.wait_for_getdata([block.sha256], timeout=timeout) def request_block(self, blockhash, inv_type, timeout=60): with p2p_lock: self.last_message.pop("block", None) self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) - self.wait_for_block(blockhash, timeout) + self.wait_for_block(blockhash, timeout=timeout) return self.last_message["block"].block class SegWitTest(BitcoinTestFramework): @@ -1055,7 +1054,7 @@ class SegWitTest(BitcoinTestFramework): @subtest def test_max_witness_push_length(self): - """Test that witness stack can only allow up to 520 byte pushes.""" + """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes.""" block = self.build_next_block() @@ -2056,7 +2055,7 @@ class SegWitTest(BitcoinTestFramework): test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False) # Expect a request for parent (tx) by txid despite use of WTX peer - self.wtx_node.wait_for_getdata([tx.sha256], 60) + self.wtx_node.wait_for_getdata([tx.sha256], timeout=60) with p2p_lock: lgd = self.wtx_node.lastgetdata[:] assert_equal(lgd, [CInv(MSG_WITNESS_TX, tx.sha256)]) diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index 508d6fe403..27a3aa8fb9 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -311,6 +311,7 @@ class SendHeadersTest(BitcoinTestFramework): # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) + expected_hash = tip inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) @@ -334,7 +335,10 @@ class SendHeadersTest(BitcoinTestFramework): if j == 0: # Announce via inv test_node.send_block_inv(tip) - test_node.wait_for_getheaders() + if i == 0: + test_node.wait_for_getheaders(block_hash=expected_hash) + else: + assert "getheaders" not in test_node.last_message # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate @@ -521,6 +525,7 @@ class SendHeadersTest(BitcoinTestFramework): self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. + expected_hash = tip for i in range(10): self.log.debug("Part 5.{}: starting...".format(i)) test_node.last_message.pop("getdata", None) @@ -533,15 +538,14 @@ class SendHeadersTest(BitcoinTestFramework): block_time += 1 height += 1 # Send the header of the second block -> this won't connect. - with p2p_lock: - test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) - test_node.wait_for_getheaders() + test_node.wait_for_getheaders(block_hash=expected_hash) test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) + expected_hash = blocks[1].sha256 blocks = [] # Now we test that if we repeatedly don't send connecting headers, we @@ -556,13 +560,12 @@ class SendHeadersTest(BitcoinTestFramework): for i in range(1, MAX_NUM_UNCONNECTING_HEADERS_MSGS): # Send a header that doesn't connect, check that we get a getheaders. - with p2p_lock: - test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) - test_node.wait_for_getheaders() + test_node.wait_for_getheaders(block_hash=expected_hash) # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) + expected_hash = blocks[0].sha256 # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] @@ -571,10 +574,8 @@ class SendHeadersTest(BitcoinTestFramework): # before we get disconnected. Should be 5*MAX_NUM_UNCONNECTING_HEADERS_MSGS for i in range(5 * MAX_NUM_UNCONNECTING_HEADERS_MSGS - 1): # Send a header that doesn't connect, check that we get a getheaders. - with p2p_lock: - test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) - test_node.wait_for_getheaders() + test_node.wait_for_getheaders(block_hash=expected_hash) # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 0e463c5072..7a50f1e605 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -5,6 +5,7 @@ """ Test transaction download behavior """ +from decimal import Decimal import time from test_framework.messages import ( @@ -14,6 +15,7 @@ from test_framework.messages import ( MSG_WTX, msg_inv, msg_notfound, + msg_tx, ) from test_framework.p2p import ( P2PInterface, @@ -22,6 +24,7 @@ from test_framework.p2p import ( from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + fill_mempool, ) from test_framework.wallet import MiniWallet @@ -54,6 +57,7 @@ MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RE class TxDownloadTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + self.extra_args= [['-datacarriersize=100000', '-maxmempool=5', '-persistmempool=0']] * self.num_nodes def test_tx_requests(self): self.log.info("Test that we request transactions from all our peers, eventually") @@ -241,6 +245,29 @@ class TxDownloadTest(BitcoinTestFramework): self.log.info('Check that spurious notfound is ignored') self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)])) + def test_rejects_filter_reset(self): + self.log.info('Check that rejected tx is not requested again') + node = self.nodes[0] + fill_mempool(self, node, self.wallet) + self.wallet.rescan_utxos() + mempoolminfee = node.getmempoolinfo()['mempoolminfee'] + peer = node.add_p2p_connection(TestP2PConn()) + low_fee_tx = self.wallet.create_self_transfer(fee_rate=Decimal("0.9")*mempoolminfee) + assert_equal(node.testmempoolaccept([low_fee_tx['hex']])[0]["reject-reason"], "mempool min fee not met") + peer.send_and_ping(msg_tx(low_fee_tx['tx'])) + peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(low_fee_tx['wtxid'], 16))])) + node.setmocktime(int(time.time())) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + peer.sync_with_ping() + assert_equal(peer.tx_getdata_count, 0) + + self.log.info('Check that rejection filter is cleared after new block comes in') + self.generate(self.wallet, 1, sync_fun=self.no_op) + peer.sync_with_ping() + peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(low_fee_tx['wtxid'], 16))])) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + peer.wait_for_getdata([int(low_fee_tx['wtxid'], 16)]) + def run_test(self): self.wallet = MiniWallet(self.nodes[0]) @@ -257,16 +284,22 @@ class TxDownloadTest(BitcoinTestFramework): # Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when # the next trickle relay event happens. - for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]: + for test, with_inbounds in [ + (self.test_in_flight_max, True), + (self.test_inv_block, True), + (self.test_tx_requests, True), + (self.test_rejects_filter_reset, False), + ]: self.stop_nodes() self.start_nodes() self.connect_nodes(1, 0) # Setup the p2p connections self.peers = [] - for node in self.nodes: - for _ in range(NUM_INBOUND): - self.peers.append(node.add_p2p_connection(TestP2PConn())) - self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND)) + if with_inbounds: + for node in self.nodes: + for _ in range(NUM_INBOUND): + self.peers.append(node.add_p2p_connection(TestP2PConn())) + self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND)) test() diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 029e368166..37c42f2533 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -18,6 +18,7 @@ from test_framework.util import ( assert_equal, assert_fee_amount, assert_raises_rpc_error, + fill_mempool, ) from test_framework.wallet import ( DEFAULT_FEE, @@ -82,7 +83,8 @@ class RPCPackagesTest(BitcoinTestFramework): self.test_conflicting() self.test_rbf() self.test_submitpackage() - self.test_maxfeerate_maxburn_submitpackage() + self.test_maxfeerate_submitpackage() + self.test_maxburn_submitpackage() def test_independent(self, coin): self.log.info("Test multiple independent transactions in a package") @@ -358,7 +360,7 @@ class RPCPackagesTest(BitcoinTestFramework): assert_equal(res["tx-results"][sec_wtxid]["error"], "version") peer.wait_for_broadcast([first_wtxid]) - def test_maxfeerate_maxburn_submitpackage(self): + def test_maxfeerate_submitpackage(self): node = self.nodes[0] # clear mempool deterministic_address = node.get_deterministic_priv_key().address @@ -369,23 +371,78 @@ class RPCPackagesTest(BitcoinTestFramework): minrate_btc_kvb = min([chained_txn["fee"] / chained_txn["tx"].get_vsize() * 1000 for chained_txn in chained_txns]) chain_hex = [t["hex"] for t in chained_txns] pkg_result = node.submitpackage(chain_hex, maxfeerate=minrate_btc_kvb - Decimal("0.00000001")) + + # First tx failed in single transaction evaluation, so package message is generic + assert_equal(pkg_result["package_msg"], "transaction failed") assert_equal(pkg_result["tx-results"][chained_txns[0]["wtxid"]]["error"], "max feerate exceeded") assert_equal(pkg_result["tx-results"][chained_txns[1]["wtxid"]]["error"], "bad-txns-inputs-missingorspent") assert_equal(node.getrawmempool(), []) + # Make chain of two transactions where parent doesn't make minfee threshold + # but child is too high fee + # Lower mempool limit to make it easier to fill_mempool + self.restart_node(0, extra_args=[ + "-datacarriersize=100000", + "-maxmempool=5", + "-persistmempool=0", + ]) + self.wallet.rescan_utxos() + + fill_mempool(self, node, self.wallet) + + minrelay = node.getmempoolinfo()["minrelaytxfee"] + parent = self.wallet.create_self_transfer( + fee_rate=minrelay, + confirmed_only=True, + ) + + child = self.wallet.create_self_transfer( + fee_rate=DEFAULT_FEE, + utxo_to_spend=parent["new_utxo"], + ) + + pkg_result = node.submitpackage([parent["hex"], child["hex"]], maxfeerate=DEFAULT_FEE - Decimal("0.00000001")) + + # Child is connected even though parent is invalid and still reports fee exceeded + # this implies sub-package evaluation of both entries together. + assert_equal(pkg_result["package_msg"], "transaction failed") + assert "mempool min fee not met" in pkg_result["tx-results"][parent["wtxid"]]["error"] + assert_equal(pkg_result["tx-results"][child["wtxid"]]["error"], "max feerate exceeded") + assert parent["txid"] not in node.getrawmempool() + assert child["txid"] not in node.getrawmempool() + + # Reset maxmempool, datacarriersize, reset dynamic mempool minimum feerate, and empty mempool. + self.restart_node(0) + self.wallet.rescan_utxos() + + assert_equal(node.getrawmempool(), []) + + def test_maxburn_submitpackage(self): + node = self.nodes[0] + + assert_equal(node.getrawmempool(), []) + self.log.info("Submitpackage maxburnamount arg testing") - tx = tx_from_hex(chain_hex[1]) + chained_txns_burn = self.wallet.create_self_transfer_chain( + chain_length=2, + utxo_to_spend=self.wallet.get_utxo(confirmed_only=True), + ) + chained_burn_hex = [t["hex"] for t in chained_txns_burn] + + tx = tx_from_hex(chained_burn_hex[1]) tx.vout[-1].scriptPubKey = b'a' * 10001 # scriptPubKey bigger than 10k IsUnspendable - chain_hex = [chain_hex[0], tx.serialize().hex()] + chained_burn_hex = [chained_burn_hex[0], tx.serialize().hex()] # burn test is run before any package evaluation; nothing makes it in and we get broader exception - assert_raises_rpc_error(-25, "Unspendable output exceeds maximum configured by user", node.submitpackage, chain_hex, 0, chained_txns[1]["new_utxo"]["value"] - Decimal("0.00000001")) + assert_raises_rpc_error(-25, "Unspendable output exceeds maximum configured by user", node.submitpackage, chained_burn_hex, 0, chained_txns_burn[1]["new_utxo"]["value"] - Decimal("0.00000001")) assert_equal(node.getrawmempool(), []) + minrate_btc_kvb_burn = min([chained_txn_burn["fee"] / chained_txn_burn["tx"].get_vsize() * 1000 for chained_txn_burn in chained_txns_burn]) + # Relax the restrictions for both and send it; parent gets through as own subpackage - pkg_result = node.submitpackage(chain_hex, maxfeerate=minrate_btc_kvb, maxburnamount=chained_txns[1]["new_utxo"]["value"]) - assert "error" not in pkg_result["tx-results"][chained_txns[0]["wtxid"]] + pkg_result = node.submitpackage(chained_burn_hex, maxfeerate=minrate_btc_kvb_burn, maxburnamount=chained_txns_burn[1]["new_utxo"]["value"]) + assert "error" not in pkg_result["tx-results"][chained_txns_burn[0]["wtxid"]] assert_equal(pkg_result["tx-results"][tx.getwtxid()]["error"], "scriptpubkey") - assert_equal(node.getrawmempool(), [chained_txns[0]["txid"]]) + assert_equal(node.getrawmempool(), [chained_txns_burn[0]["txid"]]) if __name__ == "__main__": RPCPackagesTest().main() diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 016aa3ba11..6ee7e56886 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -16,8 +16,6 @@ from test_framework.messages import ( CTxIn, CTxOut, MAX_BIP125_RBF_SEQUENCE, - WITNESS_SCALE_FACTOR, - ser_compact_size, ) from test_framework.psbt import ( PSBT, @@ -42,6 +40,7 @@ from test_framework.util import ( find_vout_for_address, ) from test_framework.wallet_util import ( + calculate_input_weight, generate_keypair, get_generate_key, ) @@ -752,17 +751,9 @@ class PSBTTest(BitcoinTestFramework): input_idx = i break psbt_in = dec["inputs"][input_idx] - # Calculate the input weight - # (prevout + sequence + length of scriptSig + scriptsig) * WITNESS_SCALE_FACTOR + len of num scriptWitness stack items + (length of stack item + stack item) * N stack items - # Note that occasionally this weight estimate may be slightly larger or smaller than the real weight - # as sometimes ECDSA signatures are one byte shorter than expected with a probability of 1/128 - len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0 - len_scriptsig += len(ser_compact_size(len_scriptsig)) - len_scriptwitness = (sum([(len(x) // 2) + len(ser_compact_size(len(x) // 2)) for x in psbt_in["final_scriptwitness"]]) + len(ser_compact_size(len(psbt_in["final_scriptwitness"])))) if "final_scriptwitness" in psbt_in else 0 - len_prevout_txid = 32 - len_prevout_index = 4 - len_sequence = 4 - input_weight = ((len_prevout_txid + len_prevout_index + len_sequence + len_scriptsig) * WITNESS_SCALE_FACTOR) + len_scriptwitness + scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "" + witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None + input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex) low_input_weight = input_weight // 2 high_input_weight = input_weight * 2 @@ -886,7 +877,7 @@ class PSBTTest(BitcoinTestFramework): assert_equal(comb_psbt, psbt) self.log.info("Test walletprocesspsbt raises if an invalid sighashtype is passed") - assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all") + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all") self.log.info("Test decoding PSBT with per-input preimage types") # note that the decodepsbt RPC doesn't check whether preimages and hashes match @@ -992,7 +983,7 @@ class PSBTTest(BitcoinTestFramework): self.nodes[2].sendrawtransaction(processed_psbt['hex']) self.log.info("Test descriptorprocesspsbt raises if an invalid sighashtype is passed") - assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all") + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all") if __name__ == '__main__': diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py index bc426d7371..ba86b278bd 100755 --- a/test/functional/rpc_setban.py +++ b/test/functional/rpc_setban.py @@ -64,20 +64,10 @@ class SetBanTests(BitcoinTestFramework): assert self.is_banned(node, tor_addr) assert not self.is_banned(node, ip_addr) - self.log.info("Test the ban list is preserved through restart") - - self.restart_node(1) - assert self.is_banned(node, tor_addr) - assert not self.is_banned(node, ip_addr) - node.setban(tor_addr, "remove") assert not self.is_banned(self.nodes[1], tor_addr) assert not self.is_banned(node, ip_addr) - self.restart_node(1) - assert not self.is_banned(node, tor_addr) - assert not self.is_banned(node, ip_addr) - self.log.info("Test -bantime") self.restart_node(1, ["-bantime=1234"]) self.nodes[1].setban("127.0.0.1", "add") diff --git a/test/functional/rpc_signrawtransactionwithkey.py b/test/functional/rpc_signrawtransactionwithkey.py index 0913f5057e..268584331e 100755 --- a/test/functional/rpc_signrawtransactionwithkey.py +++ b/test/functional/rpc_signrawtransactionwithkey.py @@ -124,7 +124,7 @@ class SignRawTransactionWithKeyTest(BitcoinTestFramework): self.log.info("Test signing transaction with invalid sighashtype") tx = self.nodes[0].createrawtransaction(INPUTS, OUTPUTS) privkeys = [self.nodes[0].get_deterministic_priv_key().key] - assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithkey, tx, privkeys, sighashtype="all") + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithkey, tx, privkeys, sighashtype="all") def run_test(self): self.successful_signing_test() diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index cb99e483ec..f8df59d02a 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -23,7 +23,7 @@ class UptimeTest(BitcoinTestFramework): self._test_uptime() def _test_negative_time(self): - assert_raises_rpc_error(-8, "Mocktime cannot be negative: -1.", self.nodes[0].setmocktime, -1) + assert_raises_rpc_error(-8, "Mocktime must be in the range [0, 9223372036], not -1.", self.nodes[0].setmocktime, -1) def _test_uptime(self): wait_time = 10 diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index cfd923bab3..f0dc866f69 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -28,6 +28,7 @@ from .messages import ( ser_uint256, tx_from_hex, uint256_from_str, + WITNESS_SCALE_FACTOR, ) from .script import ( CScript, @@ -45,7 +46,6 @@ from .script_util import ( ) from .util import assert_equal -WITNESS_SCALE_FACTOR = 4 MAX_BLOCK_SIGOPS = 20000 MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index dc04696114..00bd1e4017 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -585,22 +585,22 @@ class P2PInterface(P2PConnection): wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) - def wait_for_connect(self, timeout=60): + def wait_for_connect(self, *, timeout=60): test_function = lambda: self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) - def wait_for_disconnect(self, timeout=60): + def wait_for_disconnect(self, *, timeout=60): test_function = lambda: not self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) - def wait_for_reconnect(self, timeout=60): + def wait_for_reconnect(self, *, timeout=60): def test_function(): return self.is_connected and self.last_message.get('version') and not self.supports_v2_p2p self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods - def wait_for_tx(self, txid, timeout=60): + def wait_for_tx(self, txid, *, timeout=60): def test_function(): if not self.last_message.get('tx'): return False @@ -608,13 +608,13 @@ class P2PInterface(P2PConnection): self.wait_until(test_function, timeout=timeout) - def wait_for_block(self, blockhash, timeout=60): + def wait_for_block(self, blockhash, *, timeout=60): def test_function(): return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) - def wait_for_header(self, blockhash, timeout=60): + def wait_for_header(self, blockhash, *, timeout=60): def test_function(): last_headers = self.last_message.get('headers') if not last_headers: @@ -623,7 +623,7 @@ class P2PInterface(P2PConnection): self.wait_until(test_function, timeout=timeout) - def wait_for_merkleblock(self, blockhash, timeout=60): + def wait_for_merkleblock(self, blockhash, *, timeout=60): def test_function(): last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: @@ -632,7 +632,7 @@ class P2PInterface(P2PConnection): self.wait_until(test_function, timeout=timeout) - def wait_for_getdata(self, hash_list, timeout=60): + def wait_for_getdata(self, hash_list, *, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" @@ -644,19 +644,21 @@ class P2PInterface(P2PConnection): self.wait_until(test_function, timeout=timeout) - def wait_for_getheaders(self, timeout=60): - """Waits for a getheaders message. + def wait_for_getheaders(self, block_hash=None, *, timeout=60): + """Waits for a getheaders message containing a specific block hash. - Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] - value must be explicitly cleared before calling this method, or this will return - immediately with success. TODO: change this method to take a hash value and only - return true if the correct block header has been requested.""" + If no block hash is provided, checks whether any getheaders message has been received by the node.""" def test_function(): - return self.last_message.get("getheaders") + last_getheaders = self.last_message.pop("getheaders", None) + if block_hash is None: + return last_getheaders + if last_getheaders is None: + return False + return block_hash == last_getheaders.locator.vHave[0] self.wait_until(test_function, timeout=timeout) - def wait_for_inv(self, expected_inv, timeout=60): + def wait_for_inv(self, expected_inv, *, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError("wait_for_inv() will only verify the first inv object") @@ -668,7 +670,7 @@ class P2PInterface(P2PConnection): self.wait_until(test_function, timeout=timeout) - def wait_for_verack(self, timeout=60): + def wait_for_verack(self, *, timeout=60): def test_function(): return "verack" in self.last_message @@ -681,11 +683,11 @@ class P2PInterface(P2PConnection): self.send_message(self.on_connection_send_msg) self.on_connection_send_msg = None # Never used again - def send_and_ping(self, message, timeout=60): + def send_and_ping(self, message, *, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) - def sync_with_ping(self, timeout=60): + def sync_with_ping(self, *, timeout=60): """Ensure ProcessMessages and SendMessages is called on this connection""" # Sending two pings back-to-back, requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have @@ -726,7 +728,7 @@ class NetworkThread(threading.Thread): """Start the network thread.""" self.network_event_loop.run_forever() - def close(self, timeout=10): + def close(self, *, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout) @@ -933,7 +935,7 @@ class P2PTxInvStore(P2PInterface): with p2p_lock: return list(self.tx_invs_received.keys()) - def wait_for_broadcast(self, txns, timeout=60): + def wait_for_broadcast(self, txns, *, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 3275517888..7b19d31e17 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -483,7 +483,7 @@ class CScript(bytes): i = 0 while i < len(self): sop_idx = i - opcode = self[i] + opcode = CScriptOp(self[i]) i += 1 if opcode > OP_PUSHDATA4: @@ -590,7 +590,7 @@ class CScript(bytes): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() + n += lastOpcode.decode_op_n() else: n += 20 lastOpcode = opcode @@ -782,6 +782,20 @@ class TestFrameworkScript(unittest.TestCase): for value in values: self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value) + def test_legacy_sigopcount(self): + # test repeated single sig ops + for n_ops in range(1, 100, 10): + for singlesig_op in (OP_CHECKSIG, OP_CHECKSIGVERIFY): + singlesigs_script = CScript([singlesig_op]*n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=False), n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=True), n_ops) + # test multisig op (including accurate counting, i.e. BIP16) + for n in range(1, 16+1): + for multisig_op in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): + multisig_script = CScript([CScriptOp.encode_op_n(n), multisig_op]) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=False), 20) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=True), n) + def BIP341_sha_prevouts(txTo): return sha256(b"".join(i.prevout.serialize() for i in txTo.vin)) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index c3884270da..a2f767cc98 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -164,7 +164,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): help="Don't stop bitcoinds after the test execution") parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") + parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs (must not exist)") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index c5b69a3954..0de09b6440 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -496,6 +496,65 @@ def check_node_connections(*, node, num_in, num_out): assert_equal(info["connections_in"], num_in) assert_equal(info["connections_out"], num_out) +def fill_mempool(test_framework, node, miniwallet): + """Fill mempool until eviction. + + Allows for simpler testing of scenarios with floating mempoolminfee > minrelay + Requires -datacarriersize=100000 and + -maxmempool=5. + It will not ensure mempools become synced as it + is based on a single node and assumes -minrelaytxfee + is 1 sat/vbyte. + To avoid unintentional tx dependencies, it is recommended to use separate miniwallets for + mempool filling vs transactions in tests. + """ + test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises") + txouts = gen_return_txouts() + relayfee = node.getnetworkinfo()['relayfee'] + + assert_equal(relayfee, Decimal('0.00001000')) + + tx_batch_size = 1 + num_of_batches = 75 + # Generate UTXOs to flood the mempool + # 1 to create a tx initially that will be evicted from the mempool later + # 75 transactions each with a fee rate higher than the previous one + test_framework.generate(miniwallet, 1 + (num_of_batches * tx_batch_size)) + + # Mine COINBASE_MATURITY - 1 blocks so that the UTXOs are allowed to be spent + test_framework.generate(node, 100 - 1) + + # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may + # change their effective feerate and thus the order in which they are selected for eviction. + confirmed_utxos = [miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)] + assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1) + + test_framework.log.debug("Create a mempool tx that will be evicted") + tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, utxo_to_spend=confirmed_utxos[0], fee_rate=relayfee)["txid"] + del confirmed_utxos[0] + + # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool + # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB) + # by 130 should result in a fee that corresponds to 2x of that fee rate + base_fee = relayfee * 130 + + test_framework.log.debug("Fill up the mempool with txs with higher fee rate") + with node.assert_debug_log(["rolling minimum fee bumped"]): + for batch_of_txid in range(num_of_batches): + fee = (batch_of_txid + 1) * base_fee + utxos = confirmed_utxos[:tx_batch_size] + create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts, utxos) + del confirmed_utxos[:tx_batch_size] + + test_framework.log.debug("The tx should be evicted by now") + # The number of transactions created should be greater than the ones present in the mempool + assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool())) + # Initial tx created should not be present in the mempool anymore as it had a lower fee rate + assert tx_to_be_evicted_id not in node.getrawmempool() + + test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee") + assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) + assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) # Transaction/Block functions ############################# diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py index 44811918bf..2168e607b2 100755 --- a/test/functional/test_framework/wallet_util.py +++ b/test/functional/test_framework/wallet_util.py @@ -4,6 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Useful util functions for testing the wallet""" from collections import namedtuple +import unittest from test_framework.address import ( byte_to_base58, @@ -15,6 +16,11 @@ from test_framework.address import ( script_to_p2wsh, ) from test_framework.key import ECKey +from test_framework.messages import ( + CTxIn, + CTxInWitness, + WITNESS_SCALE_FACTOR, +) from test_framework.script_util import ( key_to_p2pkh_script, key_to_p2wpkh_script, @@ -123,6 +129,19 @@ def generate_keypair(compressed=True, wif=False): privkey = bytes_to_wif(privkey.get_bytes(), compressed) return privkey, pubkey +def calculate_input_weight(scriptsig_hex, witness_stack_hex=None): + """Given a scriptSig and a list of witness stack items for an input in hex format, + calculate the total input weight. If the input has no witness data, + `witness_stack_hex` can be set to None.""" + tx_in = CTxIn(scriptSig=bytes.fromhex(scriptsig_hex)) + witness_size = 0 + if witness_stack_hex is not None: + tx_inwit = CTxInWitness() + for witness_item_hex in witness_stack_hex: + tx_inwit.scriptWitness.stack.append(bytes.fromhex(witness_item_hex)) + witness_size = len(tx_inwit.serialize()) + return len(tx_in.serialize()) * WITNESS_SCALE_FACTOR + witness_size + class WalletUnlock(): """ A context manager for unlocking a wallet with a passphrase and automatically locking it afterward. @@ -141,3 +160,42 @@ class WalletUnlock(): def __exit__(self, *args): _ = args self.wallet.walletlock() + + +class TestFrameworkWalletUtil(unittest.TestCase): + def test_calculate_input_weight(self): + SKELETON_BYTES = 32 + 4 + 4 # prevout-txid, prevout-index, sequence + SMALL_LEN_BYTES = 1 # bytes needed for encoding scriptSig / witness item lengths < 253 + LARGE_LEN_BYTES = 3 # bytes needed for encoding scriptSig / witness item lengths >= 253 + + # empty scriptSig, no witness + self.assertEqual(calculate_input_weight(""), + (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + self.assertEqual(calculate_input_weight("", None), + (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + # small scriptSig, no witness + scriptSig_small = "00"*252 + self.assertEqual(calculate_input_weight(scriptSig_small, None), + (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR) + # small scriptSig, empty witness stack + self.assertEqual(calculate_input_weight(scriptSig_small, []), + (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES) + # large scriptSig, no witness + scriptSig_large = "00"*253 + self.assertEqual(calculate_input_weight(scriptSig_large, None), + (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR) + # large scriptSig, empty witness stack + self.assertEqual(calculate_input_weight(scriptSig_large, []), + (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES) + # empty scriptSig, 5 small witness stack items + self.assertEqual(calculate_input_weight("", ["00", "11", "22", "33", "44"]), + ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 5 * SMALL_LEN_BYTES + 5) + # empty scriptSig, 253 small witness stack items + self.assertEqual(calculate_input_weight("", ["00"]*253), + ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + LARGE_LEN_BYTES + 253 * SMALL_LEN_BYTES + 253) + # small scriptSig, 3 large witness stack items + self.assertEqual(calculate_input_weight(scriptSig_small, ["00"*253]*3), + ((SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253) + # large scriptSig, 3 large witness stack items + self.assertEqual(calculate_input_weight(scriptSig_large, ["00"*253]*3), + ((SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 3f6e47d410..2b0b24ec05 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -26,7 +26,6 @@ import sys import tempfile import re import logging -import unittest os.environ["REQUIRE_WALLET_TYPE_SET"] = "1" @@ -70,22 +69,7 @@ if platform.system() != 'Windows' or sys.getwindowsversion() >= (10, 0, 14393): TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 -# List of framework modules containing unit tests. Should be kept in sync with -# the output of `git grep unittest.TestCase ./test/functional/test_framework` -TEST_FRAMEWORK_MODULES = [ - "address", - "crypto.bip324_cipher", - "blocktools", - "crypto.chacha20", - "crypto.ellswift", - "key", - "messages", - "crypto.muhash", - "crypto.poly1305", - "crypto.ripemd160", - "script", - "segwit_addr", -] +TEST_FRAMEWORK_UNIT_TESTS = 'feature_framework_unit_tests.py' EXTENDED_SCRIPTS = [ # These tests are not run by default. @@ -199,6 +183,8 @@ BASE_SCRIPTS = [ 'wallet_txn_clone.py --segwit', 'rpc_getchaintips.py', 'rpc_misc.py', + 'p2p_1p1c_network.py', + 'p2p_opportunistic_1p1c.py', 'interface_rest.py', 'mempool_spend_coinbase.py', 'wallet_avoid_mixing_output_types.py --descriptors', @@ -254,6 +240,7 @@ BASE_SCRIPTS = [ 'wallet_keypool.py --descriptors', 'wallet_descriptor.py --descriptors', 'p2p_nobloomfilter_messages.py', + TEST_FRAMEWORK_UNIT_TESTS, 'p2p_filter.py', 'rpc_setban.py --v1transport', 'rpc_setban.py --v2transport', @@ -439,7 +426,6 @@ def main(): parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") parser.add_argument('--failfast', '-F', action='store_true', help='stop execution after the first test failure') parser.add_argument('--filter', help='filter scripts to run by regular expression') - parser.add_argument('--skipunit', '-u', action='store_true', help='skip unit tests for the test framework') args, unknown_args = parser.parse_known_args() @@ -551,10 +537,9 @@ def main(): combined_logs_len=args.combinedlogslen, failfast=args.failfast, use_term_control=args.ansi, - skipunit=args.skipunit, ) -def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, skipunit=False): +def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control): args = args or [] # Warn if bitcoind is already running @@ -577,15 +562,6 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage= # a hard link or a copy on any platform. See https://github.com/bitcoin/bitcoin/pull/27561. sys.path.append(tests_dir) - if not skipunit: - print("Running Unit Tests for Test Framework Modules") - test_framework_tests = unittest.TestSuite() - for module in TEST_FRAMEWORK_MODULES: - test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module))) - result = unittest.TextTestRunner(verbosity=1, failfast=True).run(test_framework_tests) - if not result.wasSuccessful(): - sys.exit("Early exiting after failure in TestFramework unit tests") - flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py index 3b407c285d..26477131cf 100755 --- a/test/functional/wallet_groups.py +++ b/test/functional/wallet_groups.py @@ -42,11 +42,6 @@ class WalletGroupTest(BitcoinTestFramework): def run_test(self): self.log.info("Setting up") - # To take full use of immediate tx relay, all nodes need to be reachable - # via inbound peers, i.e. connect first to last to close the circle - # (the default test network topology looks like this: - # node0 <-- node1 <-- node2 <-- node3 <-- node4 <-- node5) - self.connect_nodes(0, self.num_nodes - 1) # Mine some coins self.generate(self.nodes[0], COINBASE_MATURITY + 1) diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py index 420bdffc49..f9d05a2fe4 100755 --- a/test/functional/wallet_importdescriptors.py +++ b/test/functional/wallet_importdescriptors.py @@ -688,7 +688,7 @@ class ImportDescriptorsTest(BitcoinTestFramework): encrypted_wallet.walletpassphrase("passphrase", 99999) with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread: - with self.nodes[0].assert_debug_log(expected_msgs=["Rescan started from block 0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206... (slow variant inspecting all blocks)"], timeout=5): + with self.nodes[0].assert_debug_log(expected_msgs=["Rescan started from block 0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206... (slow variant inspecting all blocks)"], timeout=10): importing = thread.submit(encrypted_wallet.importdescriptors, requests=[descriptor]) # Set the passphrase timeout to 1 to test that the wallet remains unlocked during the rescan diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py index e4ca341b49..0a0a8dba0d 100755 --- a/test/functional/wallet_send.py +++ b/test/functional/wallet_send.py @@ -9,10 +9,6 @@ from itertools import product from test_framework.authproxy import JSONRPCException from test_framework.descriptors import descsum_create -from test_framework.messages import ( - ser_compact_size, - WITNESS_SCALE_FACTOR, -) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -21,7 +17,10 @@ from test_framework.util import ( assert_raises_rpc_error, count_bytes, ) -from test_framework.wallet_util import generate_keypair +from test_framework.wallet_util import ( + calculate_input_weight, + generate_keypair, +) class WalletSendTest(BitcoinTestFramework): @@ -543,17 +542,9 @@ class WalletSendTest(BitcoinTestFramework): input_idx = i break psbt_in = dec["inputs"][input_idx] - # Calculate the input weight - # (prevout + sequence + length of scriptSig + scriptsig) * WITNESS_SCALE_FACTOR + len of num scriptWitness stack items + (length of stack item + stack item) * N stack items - # Note that occasionally this weight estimate may be slightly larger or smaller than the real weight - # as sometimes ECDSA signatures are one byte shorter than expected with a probability of 1/128 - len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0 - len_scriptsig += len(ser_compact_size(len_scriptsig)) - len_scriptwitness = (sum([(len(x) // 2) + len(ser_compact_size(len(x) // 2)) for x in psbt_in["final_scriptwitness"]]) + len(ser_compact_size(len(psbt_in["final_scriptwitness"])))) if "final_scriptwitness" in psbt_in else 0 - len_prevout_txid = 32 - len_prevout_index = 4 - len_sequence = 4 - input_weight = ((len_prevout_txid + len_prevout_index + len_sequence + len_scriptsig) * WITNESS_SCALE_FACTOR) + len_scriptwitness + scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "" + witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None + input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex) # Input weight error conditions assert_raises_rpc_error( diff --git a/test/functional/wallet_signer.py b/test/functional/wallet_signer.py index 32a1887153..abfc3c1ba1 100755 --- a/test/functional/wallet_signer.py +++ b/test/functional/wallet_signer.py @@ -130,8 +130,9 @@ class WalletSignerTest(BitcoinTestFramework): assert_equal(address_info['hdkeypath'], "m/86h/1h/0h/0/0") self.log.info('Test walletdisplayaddress') - result = hww.walletdisplayaddress(address1) - assert_equal(result, {"address": address1}) + for address in [address1, address2, address3]: + result = hww.walletdisplayaddress(address) + assert_equal(result, {"address": address}) # Handle error thrown by script self.set_mock_result(self.nodes[1], "2") @@ -140,6 +141,13 @@ class WalletSignerTest(BitcoinTestFramework): ) self.clear_mock_result(self.nodes[1]) + # Returned address MUST match: + address_fail = hww.getnewaddress(address_type="bech32") + assert_equal(address_fail, "bcrt1ql7zg7ukh3dwr25ex2zn9jse926f27xy2jz58tm") + assert_raises_rpc_error(-1, 'Signer echoed unexpected address wrong_address', + hww.walletdisplayaddress, address_fail + ) + self.log.info('Prepare mock PSBT') self.nodes[0].sendtoaddress(address4, 1) self.generate(self.nodes[0], 1) diff --git a/test/functional/wallet_signrawtransactionwithwallet.py b/test/functional/wallet_signrawtransactionwithwallet.py index b0517f951d..612a2542e7 100755 --- a/test/functional/wallet_signrawtransactionwithwallet.py +++ b/test/functional/wallet_signrawtransactionwithwallet.py @@ -55,7 +55,7 @@ class SignRawTransactionWithWalletTest(BitcoinTestFramework): def test_with_invalid_sighashtype(self): self.log.info("Test signrawtransactionwithwallet raises if an invalid sighashtype is passed") - assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithwallet, hexstring=RAW_TX, sighashtype="all") + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithwallet, hexstring=RAW_TX, sighashtype="all") def script_verification_error_test(self): """Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script. diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index b3edb0e253..a635175e7c 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -11,6 +11,7 @@ import argparse import configparser import logging import os +import platform import random import subprocess import sys @@ -18,7 +19,7 @@ import sys def get_fuzz_env(*, target, source_dir): symbolizer = os.environ.get('LLVM_SYMBOLIZER_PATH', "/usr/bin/llvm-symbolizer") - return { + fuzz_env = { 'FUZZ': target, 'UBSAN_OPTIONS': f'suppressions={source_dir}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1', @@ -27,6 +28,10 @@ def get_fuzz_env(*, target, source_dir): 'ASAN_SYMBOLIZER_PATH':symbolizer, 'MSAN_SYMBOLIZER_PATH':symbolizer, } + if platform.system() == "Windows": + # On Windows, `env` option must include valid `SystemRoot`. + fuzz_env = {**fuzz_env, 'SystemRoot': os.environ.get('SystemRoot')} + return fuzz_env def main(): @@ -104,9 +109,11 @@ def main(): logging.error("Must have fuzz executable built") sys.exit(1) + fuzz_bin=os.getenv("BITCOINFUZZ", default=os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz')) + # Build list of tests test_list_all = parse_test_list( - fuzz_bin=os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'), + fuzz_bin=fuzz_bin, source_dir=config['environment']['SRCDIR'], ) @@ -151,7 +158,7 @@ def main(): try: help_output = subprocess.run( args=[ - os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'), + fuzz_bin, '-help=1', ], env=get_fuzz_env(target=test_list_selection[0], source_dir=config['environment']['SRCDIR']), @@ -173,7 +180,7 @@ def main(): return generate_corpus( fuzz_pool=fuzz_pool, src_dir=config['environment']['SRCDIR'], - build_dir=config["environment"]["BUILDDIR"], + fuzz_bin=fuzz_bin, corpus_dir=args.corpus_dir, targets=test_list_selection, ) @@ -184,7 +191,7 @@ def main(): corpus=args.corpus_dir, test_list=test_list_selection, src_dir=config['environment']['SRCDIR'], - build_dir=config["environment"]["BUILDDIR"], + fuzz_bin=fuzz_bin, merge_dirs=[Path(m_dir) for m_dir in args.m_dir], ) return @@ -194,7 +201,7 @@ def main(): corpus=args.corpus_dir, test_list=test_list_selection, src_dir=config['environment']['SRCDIR'], - build_dir=config["environment"]["BUILDDIR"], + fuzz_bin=fuzz_bin, using_libfuzzer=using_libfuzzer, use_valgrind=args.valgrind, empty_min_time=args.empty_min_time, @@ -237,7 +244,7 @@ def transform_rpc_target(targets, src_dir): return targets -def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets): +def generate_corpus(*, fuzz_pool, src_dir, fuzz_bin, corpus_dir, targets): """Generates new corpus. Run {targets} without input, and outputs the generated corpus to @@ -270,7 +277,7 @@ def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets): os.makedirs(target_corpus_dir, exist_ok=True) use_value_profile = int(random.random() < .3) command = [ - os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), + fuzz_bin, "-rss_limit_mb=8000", "-max_total_time=6000", "-reload=0", @@ -283,12 +290,12 @@ def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets): future.result() -def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dirs): +def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, fuzz_bin, merge_dirs): logging.info(f"Merge the inputs from the passed dir into the corpus_dir. Passed dirs {merge_dirs}") jobs = [] for t in test_list: args = [ - os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), + fuzz_bin, '-rss_limit_mb=8000', '-set_cover_merge=1', # set_cover_merge is used instead of -merge=1 to reduce the overall @@ -325,13 +332,13 @@ def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dirs future.result() -def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, using_libfuzzer, use_valgrind, empty_min_time): +def run_once(*, fuzz_pool, corpus, test_list, src_dir, fuzz_bin, using_libfuzzer, use_valgrind, empty_min_time): jobs = [] for t in test_list: corpus_path = corpus / t os.makedirs(corpus_path, exist_ok=True) args = [ - os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), + fuzz_bin, ] empty_dir = not any(corpus_path.iterdir()) if using_libfuzzer: diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh index 55c9528dea..fe845ed19e 100755 --- a/test/lint/commit-script-check.sh +++ b/test/lint/commit-script-check.sh @@ -22,6 +22,11 @@ if ! sed --help 2>&1 | grep -q 'GNU'; then exit 1; fi +if ! grep --help 2>&1 | grep -q 'GNU'; then + echo "Error: the installed grep package is not compatible. Please make sure you have GNU grep installed in your system."; + exit 1; +fi + RET=0 PREV_BRANCH=$(git name-rev --name-only HEAD) PREV_HEAD=$(git rev-parse HEAD) diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py index 81ed4c0840..90884299d5 100755 --- a/test/lint/lint-includes.py +++ b/test/lint/lint-includes.py @@ -30,7 +30,6 @@ EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp", "boost/multi_index/tag.hpp", "boost/multi_index_container.hpp", "boost/operators.hpp", - "boost/process.hpp", "boost/signals2/connection.hpp", "boost/signals2/optional_last_value.hpp", "boost/signals2/signal.hpp", diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs index e22e047e4b..f054f99011 100644 --- a/test/lint/test_runner/src/main.rs +++ b/test/lint/test_runner/src/main.rs @@ -137,9 +137,9 @@ fn lint_trailing_whitespace() -> LintResult { if trailing_space { Err(r#" ^^^ -Trailing whitespace is problematic, because git may warn about it, or editors may remove it by -default, forcing developers in the future to either undo the changes manually or spend time on -review. +Trailing whitespace (including Windows line endings [CR LF]) is problematic, because git may warn +about it, or editors may remove it by default, forcing developers in the future to either undo the +changes manually or spend time on review. Thus, it is best to remove the trailing space now. |