diff options
Diffstat (limited to 'test')
35 files changed, 1516 insertions, 197 deletions
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 19cbbcffdb..0d6c92c9fa 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -13,8 +13,6 @@ The assumeutxo value generated and used here is committed to in Interesting test cases could be loading an assumeutxo snapshot file with: -- TODO: Valid hash but invalid snapshot file (bad coin height or - bad other serialization) - TODO: Valid snapshot file, but referencing a snapshot block that turns out to be invalid, or has an invalid parent - TODO: Valid snapshot file and snapshot block, but the block is not on the @@ -98,18 +96,23 @@ class AssumeutxoTest(BitcoinTestFramework): self.log.info(" - snapshot file with alternated UTXO data") cases = [ - [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b"], # wrong outpoint hash - [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad"], # wrong outpoint index - [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8"], # wrong coin code VARINT((coinbase ? 1 : 0) | (height << 1)) - [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5"], # another wrong coin code + # (content, offset, wrong_hash, custom_message) + [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash + [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index + [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT + [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code + [b"\x84\x58", 36, None, "[snapshot] bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0 + [b"\xCA\xD2\x8F\x5A", 41, None, "[snapshot] bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY ] - for content, offset, wrong_hash in cases: + for content, offset, wrong_hash, custom_message in cases: with open(bad_snapshot_path, "wb") as f: f.write(valid_snapshot_contents[:(32 + 8 + offset)]) f.write(content) f.write(valid_snapshot_contents[(32 + 8 + offset + len(content)):]) - expected_error(log_msg=f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}") + + log_msg = custom_message if custom_message is not None else f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}" + expected_error(log_msg=log_msg) def test_headers_not_synced(self, valid_snapshot_path): for node in self.nodes[1:]: @@ -152,6 +155,12 @@ class AssumeutxoTest(BitcoinTestFramework): self.restart_node(2, extra_args=self.extra_args[2]) + def test_invalid_file_path(self): + self.log.info("Test bitcoind should fail when file path is invalid.") + node = self.nodes[0] + path = node.datadir_path / node.chain / "invalid" / "path" + assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path) + def run_test(self): """ Bring up two (disconnected) nodes, mine some new blocks on the first, @@ -236,6 +245,7 @@ class AssumeutxoTest(BitcoinTestFramework): self.test_invalid_mempool_state(dump_output['path']) self.test_invalid_snapshot_scenarios(dump_output['path']) self.test_invalid_chainstate_scenarios() + self.test_invalid_file_path() self.log.info(f"Loading snapshot into second node from {dump_output['path']}") loaded = n1.loadtxoutset(dump_output['path']) @@ -395,6 +405,10 @@ class AssumeutxoTest(BitcoinTestFramework): assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) assert_equal(snapshot['validated'], False) + self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.") + with n2.assert_debug_log(expected_msgs=["[snapshot] can't activate a snapshot-based chainstate more than once"]): + assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", n2.loadtxoutset, dump_output['path']) + self.connect_nodes(0, 2) self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) self.sync_blocks() diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py new file mode 100755 index 0000000000..c9754e083c --- /dev/null +++ b/test/functional/feature_framework_unit_tests.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Framework unit tests + +Unit tests for the test framework. +""" + +import sys +import unittest + +from test_framework.test_framework import TEST_EXIT_PASSED, TEST_EXIT_FAILED + +# List of framework modules containing unit tests. Should be kept in sync with +# the output of `git grep unittest.TestCase ./test/functional/test_framework` +TEST_FRAMEWORK_MODULES = [ + "address", + "crypto.bip324_cipher", + "blocktools", + "crypto.chacha20", + "crypto.ellswift", + "key", + "messages", + "crypto.muhash", + "crypto.poly1305", + "crypto.ripemd160", + "script", + "segwit_addr", + "wallet_util", +] + + +def run_unit_tests(): + test_framework_tests = unittest.TestSuite() + for module in TEST_FRAMEWORK_MODULES: + test_framework_tests.addTest( + unittest.TestLoader().loadTestsFromName(f"test_framework.{module}") + ) + result = unittest.TextTestRunner(stream=sys.stdout, verbosity=1, failfast=True).run( + test_framework_tests + ) + if not result.wasSuccessful(): + sys.exit(TEST_EXIT_FAILED) + sys.exit(TEST_EXIT_PASSED) + + +if __name__ == "__main__": + run_unit_tests() + diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py index 268009b0f4..22ae0c307b 100755 --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -85,7 +85,7 @@ class InitStressTest(BitcoinTestFramework): for terminate_line in lines_to_terminate_after: self.log.info(f"Starting node and will exit after line {terminate_line}") - with node.wait_for_debug_log([terminate_line]): + with node.busy_wait_for_debug_log([terminate_line]): node.start(extra_args=['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1']) self.log.debug("Terminating node after terminate line was found") sigterm_node() diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py index 51f37ef1e0..a1774a5395 100755 --- a/test/functional/feature_maxtipage.py +++ b/test/functional/feature_maxtipage.py @@ -43,6 +43,10 @@ class MaxTipAgeTest(BitcoinTestFramework): self.generate(node_miner, 1) assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False) + # reset time to system time so we don't have a time offset with the ibd node the next + # time we connect to it, ensuring TimeOffsets::WarnIfOutOfSync() doesn't output to stderr + node_miner.setmocktime(0) + def run_test(self): self.log.info("Test IBD with maximum tip age of 24 hours (default).") self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False) diff --git a/test/functional/feature_reindex_readonly.py b/test/functional/feature_reindex_readonly.py index 25cff87a3b..52c0bb26a6 100755 --- a/test/functional/feature_reindex_readonly.py +++ b/test/functional/feature_reindex_readonly.py @@ -75,7 +75,7 @@ class BlockstoreReindexTest(BitcoinTestFramework): if undo_immutable: self.log.debug("Attempt to restart and reindex the node with the unwritable block file") - with self.nodes[0].wait_for_debug_log([b"Reindexing finished"]): + with self.nodes[0].assert_debug_log(["Reindexing finished"], timeout=60): self.start_node(0, extra_args=['-reindex', '-fastprune']) assert block_count == self.nodes[0].getblockcount() undo_immutable() diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index e85541d0ec..e7d65b4539 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -10,7 +10,6 @@ from test_framework.blocktools import ( create_block, add_witness_commitment, MAX_BLOCK_SIGOPS_WEIGHT, - WITNESS_SCALE_FACTOR, ) from test_framework.messages import ( COutPoint, @@ -20,6 +19,7 @@ from test_framework.messages import ( CTxOut, SEQUENCE_FINAL, tx_from_hex, + WITNESS_SCALE_FACTOR, ) from test_framework.script import ( ANNEX_TAG, diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py index 073d3de812..2c330eb681 100755 --- a/test/functional/feature_versionbits_warning.py +++ b/test/functional/feature_versionbits_warning.py @@ -73,8 +73,8 @@ class VersionBitsWarningTest(BitcoinTestFramework): self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address) # Check that we're not getting any versionbit-related errors in get*info() - assert not VB_PATTERN.match(node.getmininginfo()["warnings"]) - assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"]) + assert not VB_PATTERN.match(",".join(node.getmininginfo()["warnings"])) + assert not VB_PATTERN.match(",".join(node.getnetworkinfo()["warnings"])) # Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION) @@ -94,8 +94,8 @@ class VersionBitsWarningTest(BitcoinTestFramework): # Generating one more block will be enough to generate an error. self.generatetoaddress(node, 1, node_deterministic_address) # Check that get*info() shows the versionbits unknown rules warning - assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"] - assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"] + assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getmininginfo()["warnings"]) + assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getnetworkinfo()["warnings"]) # Check that the alert file shows the versionbits unknown rules warning self.wait_until(lambda: self.versionbits_in_alert_file()) diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index e873e2da0b..b08ca42796 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -4,22 +4,80 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests some generic aspects of the RPC interface.""" +import json import os -from test_framework.authproxy import JSONRPCException +from dataclasses import dataclass from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_greater_than_or_equal from threading import Thread +from typing import Optional import subprocess -def expect_http_status(expected_http_status, expected_rpc_code, - fcn, *args): - try: - fcn(*args) - raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none") - except JSONRPCException as exc: - assert_equal(exc.error["code"], expected_rpc_code) - assert_equal(exc.http_status, expected_http_status) +RPC_INVALID_ADDRESS_OR_KEY = -5 +RPC_INVALID_PARAMETER = -8 +RPC_METHOD_NOT_FOUND = -32601 +RPC_INVALID_REQUEST = -32600 +RPC_PARSE_ERROR = -32700 + + +@dataclass +class BatchOptions: + version: Optional[int] = None + notification: bool = False + request_fields: Optional[dict] = None + response_fields: Optional[dict] = None + + +def format_request(options, idx, fields): + request = {} + if options.version == 1: + request.update(version="1.1") + elif options.version == 2: + request.update(jsonrpc="2.0") + elif options.version is not None: + raise NotImplementedError(f"Unknown JSONRPC version {options.version}") + if not options.notification: + request.update(id=idx) + request.update(fields) + if options.request_fields: + request.update(options.request_fields) + return request + + +def format_response(options, idx, fields): + if options.version == 2 and options.notification: + return None + response = {} + if not options.notification: + response.update(id=idx) + if options.version == 2: + response.update(jsonrpc="2.0") + else: + response.update(result=None, error=None) + response.update(fields) + if options.response_fields: + response.update(options.response_fields) + return response + + +def send_raw_rpc(node, raw_body: bytes) -> tuple[object, int]: + return node._request("POST", "/", raw_body) + + +def send_json_rpc(node, body: object) -> tuple[object, int]: + raw = json.dumps(body).encode("utf-8") + return send_raw_rpc(node, raw) + + +def expect_http_rpc_status(expected_http_status, expected_rpc_error_code, node, method, params, version=1, notification=False): + req = format_request(BatchOptions(version, notification), 0, {"method": method, "params": params}) + response, status = send_json_rpc(node, req) + + if expected_rpc_error_code is not None: + assert_equal(response["error"]["code"], expected_rpc_error_code) + + assert_equal(status, expected_http_status) def test_work_queue_getblock(node, got_exceeded_error): @@ -48,37 +106,126 @@ class RPCInterfaceTest(BitcoinTestFramework): assert_greater_than_or_equal(command['duration'], 0) assert_equal(info['logpath'], os.path.join(self.nodes[0].chain_path, 'debug.log')) - def test_batch_request(self): - self.log.info("Testing basic JSON-RPC batch request...") - - results = self.nodes[0].batch([ + def test_batch_request(self, call_options): + calls = [ # A basic request that will work fine. - {"method": "getblockcount", "id": 1}, + {"method": "getblockcount"}, # Request that will fail. The whole batch request should still # work fine. - {"method": "invalidmethod", "id": 2}, + {"method": "invalidmethod"}, # Another call that should succeed. - {"method": "getblockhash", "id": 3, "params": [0]}, - ]) - - result_by_id = {} - for res in results: - result_by_id[res["id"]] = res - - assert_equal(result_by_id[1]['error'], None) - assert_equal(result_by_id[1]['result'], 0) - - assert_equal(result_by_id[2]['error']['code'], -32601) - assert_equal(result_by_id[2]['result'], None) - - assert_equal(result_by_id[3]['error'], None) - assert result_by_id[3]['result'] is not None + {"method": "getblockhash", "params": [0]}, + # Invalid request format + {"pizza": "sausage"} + ] + results = [ + {"result": 0}, + {"error": {"code": RPC_METHOD_NOT_FOUND, "message": "Method not found"}}, + {"result": "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"}, + {"error": {"code": RPC_INVALID_REQUEST, "message": "Missing method"}}, + ] + + request = [] + response = [] + for idx, (call, result) in enumerate(zip(calls, results), 1): + options = call_options(idx) + if options is None: + continue + request.append(format_request(options, idx, call)) + r = format_response(options, idx, result) + if r is not None: + response.append(r) + + rpc_response, http_status = send_json_rpc(self.nodes[0], request) + if len(response) == 0 and len(request) > 0: + assert_equal(http_status, 204) + assert_equal(rpc_response, None) + else: + assert_equal(http_status, 200) + assert_equal(rpc_response, response) + + def test_batch_requests(self): + self.log.info("Testing empty batch request...") + self.test_batch_request(lambda idx: None) + + self.log.info("Testing basic JSON-RPC 2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2)) + + self.log.info("Testing JSON-RPC 2.0 batch with notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=idx < 2)) + + self.log.info("Testing JSON-RPC 2.0 batch of ALL notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=True)) + + # JSONRPC 1.1 does not support batch requests, but test them for backwards compatibility. + self.log.info("Testing nonstandard JSON-RPC 1.1 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=1)) + + self.log.info("Testing nonstandard mixed JSON-RPC 1.1/2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2 if idx % 2 else 1)) + + self.log.info("Testing nonstandard batch request without version numbers...") + self.test_batch_request(lambda idx: BatchOptions()) + + self.log.info("Testing nonstandard batch request without version numbers or ids...") + self.test_batch_request(lambda idx: BatchOptions(notification=True)) + + self.log.info("Testing nonstandard jsonrpc 1.0 version number is accepted...") + self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "1.0"})) + + self.log.info("Testing unrecognized jsonrpc version number is rejected...") + self.test_batch_request(lambda idx: BatchOptions( + request_fields={"jsonrpc": "2.1"}, + response_fields={"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}})) def test_http_status_codes(self): - self.log.info("Testing HTTP status codes for JSON-RPC requests...") - - expect_http_status(404, -32601, self.nodes[0].invalidmethod) - expect_http_status(500, -8, self.nodes[0].getblockhash, 42) + self.log.info("Testing HTTP status codes for JSON-RPC 1.1 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0]) + # Errors + expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", []) + expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42]) + # force-send empty request + response, status = send_raw_rpc(self.nodes[0], b"") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + # force-send invalidly formatted request + response, status = send_raw_rpc(self.nodes[0], b"this is bad") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0], 2, False) + # RPC errors but not HTTP errors + expect_http_rpc_status(200, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", [], 2, False) + expect_http_rpc_status(200, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) + # force-send invalidly formatted requests + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) + assert_equal(status, 400) + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "3.0", "method": "getblockcount"}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}}) + assert_equal(status, 400) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 notifications...") + # Not notification: id exists + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "2.0", "id": None, "method": "getblockcount"}) + assert_equal(response["result"], 0) + assert_equal(status, 200) + # Not notification: JSON 1.1 + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 1) + # Not notification: has "id" field + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 2, False) + block_count = self.nodes[0].getblockcount() + # Notification response status code: HTTP_NO_CONTENT + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqdku202"], 2, True) + # The command worked even though there was no response + assert_equal(block_count + 1, self.nodes[0].getblockcount()) + # No error response for notifications even if they are invalid + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) + # Sanity check: command was not executed + assert_equal(block_count + 1, self.nodes[0].getblockcount()) def test_work_queue_exceeded(self): self.log.info("Testing work queue exceeded...") @@ -94,7 +241,7 @@ class RPCInterfaceTest(BitcoinTestFramework): def run_test(self): self.test_getrpcinfo() - self.test_batch_request() + self.test_batch_requests() self.test_http_status_codes() self.test_work_queue_exceeded() diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 272e932fcc..b00be5f4f0 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -28,6 +28,8 @@ from test_framework.script import ( OP_HASH160, OP_RETURN, OP_TRUE, + SIGHASH_ALL, + sign_input_legacy, ) from test_framework.script_util import ( DUMMY_MIN_OP_RETURN_SCRIPT, @@ -386,5 +388,24 @@ class MempoolAcceptanceTest(BitcoinTestFramework): maxfeerate=0, ) + self.log.info('Spending a confirmed bare multisig is okay') + address = self.wallet.get_address() + tx = tx_from_hex(raw_tx_reference) + privkey, pubkey = generate_keypair() + tx.vout[0].scriptPubKey = keys_to_multisig_script([pubkey] * 3, k=1) # Some bare multisig script (1-of-3) + tx.rehash() + self.generateblock(node, address, [tx.serialize().hex()]) + tx_spend = CTransaction() + tx_spend.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx_spend.vout.append(CTxOut(tx.vout[0].nValue - int(fee*COIN), script_to_p2wsh_script(CScript([OP_TRUE])))) + tx_spend.rehash() + sign_input_legacy(tx_spend, 0, tx.vout[0].scriptPubKey, privkey, sighash_type=SIGHASH_ALL) + tx_spend.vin[0].scriptSig = bytes(CScript([OP_0])) + tx_spend.vin[0].scriptSig + self.check_mempool_result( + result_expected=[{'txid': tx_spend.rehash(), 'allowed': True, 'vsize': tx_spend.get_vsize(), 'fees': { 'base': Decimal('0.00000700')}}], + rawtxs=[tx_spend.serialize().hex()], + maxfeerate=0, + ) + if __name__ == '__main__': MempoolAcceptanceTest().main() diff --git a/test/functional/mempool_accept_v3.py b/test/functional/mempool_accept_v3.py index 1b55cd0a0d..8285b82c19 100755 --- a/test/functional/mempool_accept_v3.py +++ b/test/functional/mempool_accept_v3.py @@ -533,10 +533,10 @@ class MempoolAcceptV3(BitcoinTestFramework): tx_unrelated_replacee = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_unrelated_conflict) assert tx_unrelated_replacee["txid"] in node.getrawmempool() - fee_to_beat_child2 = int(tx_v3_child_2["fee"] * COIN) + fee_to_beat = max(int(tx_v3_child_2["fee"] * COIN), int(tx_unrelated_replacee["fee"]*COIN)) tx_v3_child_3 = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat_child2*5, version=3 + utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat*2, version=3 ) node.sendrawtransaction(tx_v3_child_3["hex"]) self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]]) diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index e8a568f7ab..d46924f4ce 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -6,6 +6,9 @@ from decimal import Decimal +from test_framework.mempool_util import ( + fill_mempool, +) from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -13,7 +16,6 @@ from test_framework.util import ( assert_fee_amount, assert_greater_than, assert_raises_rpc_error, - fill_mempool, ) from test_framework.wallet import ( COIN, @@ -93,7 +95,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - fill_mempool(self, node, self.wallet) + fill_mempool(self, node) current_info = node.getmempoolinfo() mempoolmin_feerate = current_info["mempoolminfee"] @@ -183,7 +185,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - fill_mempool(self, node, self.wallet) + fill_mempool(self, node) current_info = node.getmempoolinfo() mempoolmin_feerate = current_info["mempoolminfee"] @@ -257,7 +259,7 @@ class MempoolLimitTest(BitcoinTestFramework): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) - fill_mempool(self, node, self.wallet) + fill_mempool(self, node) # Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool self.log.info('Create a mempool tx that will not pass mempoolminfee') diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index dcb66b2ca1..e83c62915e 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -199,13 +199,13 @@ class MempoolPackagesTest(BitcoinTestFramework): assert set(mempool1).issubset(set(mempool0)) for tx in chain[:CUSTOM_ANCESTOR_LIMIT]: assert tx in mempool1 - # TODO: more detailed check of node1's mempool (fees etc.) - # check transaction unbroadcast info (should be false if in both mempools) - mempool = self.nodes[0].getrawmempool(True) - for tx in mempool: - assert_equal(mempool[tx]['unbroadcast'], False) - - # TODO: test ancestor size limits + entry0 = self.nodes[0].getmempoolentry(tx) + entry1 = self.nodes[1].getmempoolentry(tx) + assert not entry0['unbroadcast'] + assert not entry1['unbroadcast'] + assert_equal(entry1['fees']['base'], entry0['fees']['base']) + assert_equal(entry1['vsize'], entry0['vsize']) + assert_equal(entry1['depends'], entry0['depends']) # Now test descendant chain limits @@ -251,10 +251,14 @@ class MempoolPackagesTest(BitcoinTestFramework): assert tx in mempool1 for tx in chain[CUSTOM_DESCENDANT_LIMIT:]: assert tx not in mempool1 - # TODO: more detailed check of node1's mempool (fees etc.) - - # TODO: test descendant size limits - + for tx in mempool1: + entry0 = self.nodes[0].getmempoolentry(tx) + entry1 = self.nodes[1].getmempoolentry(tx) + assert not entry0['unbroadcast'] + assert not entry1['unbroadcast'] + assert_equal(entry1['fees']['base'], entry0['fees']['base']) + assert_equal(entry1['vsize'], entry0['vsize']) + assert_equal(entry1['depends'], entry0['depends']) # Test reorg handling # First, the basics: self.generate(self.nodes[0], 1) diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py new file mode 100755 index 0000000000..ea59248506 --- /dev/null +++ b/test/functional/p2p_1p1c_network.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test that 1p1c package submission allows a 1p1c package to propagate in a "network" of nodes. Send +various packages from different nodes on a network in which some nodes have already received some of +the transactions (and submitted them to mempool, kept them as orphans or rejected them as +too-low-feerate transactions). The packages should be received and accepted by all nodes. +""" + +from decimal import Decimal +from math import ceil + +from test_framework.mempool_util import ( + fill_mempool, +) +from test_framework.messages import ( + msg_tx, +) +from test_framework.p2p import ( + P2PInterface, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, +) +from test_framework.wallet import ( + MiniWallet, + MiniWalletMode, +) + +# 1sat/vB feerate denominated in BTC/KvB +FEERATE_1SAT_VB = Decimal("0.00001000") + +class PackageRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 4 + # hugely speeds up the test, as it involves multiple hops of tx relay. + self.noban_tx_relay = True + self.extra_args = [[ + "-datacarriersize=100000", + "-maxmempool=5", + ]] * self.num_nodes + self.supports_cli = False + + def raise_network_minfee(self): + fill_mempool(self, self.nodes[0]) + + self.log.debug("Wait for the network to sync mempools") + self.sync_mempools() + + self.log.debug("Check that all nodes' mempool minimum feerates are above min relay feerate") + for node in self.nodes: + assert_equal(node.getmempoolinfo()['minrelaytxfee'], FEERATE_1SAT_VB) + assert_greater_than(node.getmempoolinfo()['mempoolminfee'], FEERATE_1SAT_VB) + + def create_basic_1p1c(self, wallet): + low_fee_parent = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, confirmed_only=True) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + package_hex_basic = [low_fee_parent["hex"], high_fee_child["hex"]] + return package_hex_basic, low_fee_parent["tx"], high_fee_child["tx"] + + def create_package_2outs(self, wallet): + # First create a tester tx to see the vsize, and then adjust the fees + utxo_for_2outs = wallet.get_utxo(confirmed_only=True) + + low_fee_parent_2outs_tester = wallet.create_self_transfer_multi( + utxos_to_spend=[utxo_for_2outs], + num_outputs=2, + ) + + # Target 1sat/vB so the number of satoshis is equal to the vsize. + # Round up. The goal is to be between min relay feerate and mempool min feerate. + fee_2outs = ceil(low_fee_parent_2outs_tester["tx"].get_vsize() / 2) + + low_fee_parent_2outs = wallet.create_self_transfer_multi( + utxos_to_spend=[utxo_for_2outs], + num_outputs=2, + fee_per_output=fee_2outs, + ) + + # Now create the child + high_fee_child_2outs = wallet.create_self_transfer_multi( + utxos_to_spend=low_fee_parent_2outs["new_utxos"][::-1], + fee_per_output=fee_2outs*100, + ) + return [low_fee_parent_2outs["hex"], high_fee_child_2outs["hex"]], low_fee_parent_2outs["tx"], high_fee_child_2outs["tx"] + + def create_package_2p1c(self, wallet): + parent1 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True) + parent2 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*20, confirmed_only=True) + child = wallet.create_self_transfer_multi( + utxos_to_spend=[parent1["new_utxo"], parent2["new_utxo"]], + fee_per_output=999*parent1["tx"].get_vsize(), + ) + return [parent1["hex"], parent2["hex"], child["hex"]], parent1["tx"], parent2["tx"], child["tx"] + + def create_packages(self): + # 1: Basic 1-parent-1-child package, parent 1sat/vB, child 999sat/vB + package_hex_1, parent_1, child_1 = self.create_basic_1p1c(self.wallet) + + # 2: same as 1, parent's txid is the same as its wtxid. + package_hex_2, parent_2, child_2 = self.create_basic_1p1c(self.wallet_nonsegwit) + + # 3: 2-parent-1-child package. Both parents are above mempool min feerate. No package submission happens. + # We require packages to be child-with-unconfirmed-parents and only allow 1-parent-1-child packages. + package_hex_3, parent_31, parent_32, child_3 = self.create_package_2p1c(self.wallet) + + # 4: parent + child package where the child spends 2 different outputs from the parent. + package_hex_4, parent_4, child_4 = self.create_package_2outs(self.wallet) + + # Assemble return results + packages_to_submit = [package_hex_1, package_hex_2, package_hex_3, package_hex_4] + # node0: sender + # node1: pre-received the children (orphan) + # node3: pre-received the parents (too low fee) + # All nodes receive parent_31 ahead of time. + txns_to_send = [ + [], + [child_1, child_2, parent_31, child_3, child_4], + [parent_31], + [parent_1, parent_2, parent_31, parent_4] + ] + + return packages_to_submit, txns_to_send + + def run_test(self): + self.wallet = MiniWallet(self.nodes[1]) + self.wallet_nonsegwit = MiniWallet(self.nodes[2], mode=MiniWalletMode.RAW_P2PK) + self.generate(self.wallet_nonsegwit, 10) + self.generate(self.wallet, 120) + + self.log.info("Fill mempools with large transactions to raise mempool minimum feerates") + self.raise_network_minfee() + + # Create the transactions. + self.wallet.rescan_utxos(include_mempool=True) + packages_to_submit, transactions_to_presend = self.create_packages() + + self.peers = [self.nodes[i].add_p2p_connection(P2PInterface()) for i in range(self.num_nodes)] + + self.log.info("Pre-send some transactions to nodes") + for (i, peer) in enumerate(self.peers): + for tx in transactions_to_presend[i]: + peer.send_and_ping(msg_tx(tx)) + # This disconnect removes any sent orphans from the orphanage (EraseForPeer) and times + # out the in-flight requests. It is currently required for the test to pass right now, + # because the node will not reconsider an orphan tx and will not (re)try requesting + # orphan parents from multiple peers if the first one didn't respond. + # TODO: remove this in the future if the node tries orphan resolution with multiple peers. + peer.peer_disconnect() + + self.log.info("Submit full packages to node0") + for package_hex in packages_to_submit: + submitpackage_result = self.nodes[0].submitpackage(package_hex) + assert_equal(submitpackage_result["package_msg"], "success") + + self.log.info("Wait for mempools to sync") + self.sync_mempools(timeout=20) + + +if __name__ == '__main__': + PackageRelayTest().main() diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index ae9dc816ab..0ae05d4b0b 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -165,7 +165,7 @@ class InvalidTxRequestTest(BitcoinTestFramework): node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False) self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool') - with node.assert_debug_log(['Erased 100 orphan tx from peer=25']): + with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=25']): self.reconnect_p2p(num_connections=1) self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool') @@ -190,7 +190,7 @@ class InvalidTxRequestTest(BitcoinTestFramework): block_A.solve() self.log.info('Send the block that includes the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_A], node, success=True) self.log.info('Test that a transaction in the orphan pool conflicts with a new tip block causes erase this transaction from the orphan pool') @@ -219,7 +219,7 @@ class InvalidTxRequestTest(BitcoinTestFramework): block_B.solve() self.log.info('Send the block that includes a transaction which conflicts with the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_B], node, success=True) diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py new file mode 100755 index 0000000000..aec6e95fbc --- /dev/null +++ b/test/functional/p2p_opportunistic_1p1c.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test opportunistic 1p1c package submission logic. +""" + +from decimal import Decimal +import time +from test_framework.mempool_util import ( + fill_mempool, +) +from test_framework.messages import ( + CInv, + CTxInWitness, + MAX_BIP125_RBF_SEQUENCE, + MSG_WTX, + msg_inv, + msg_tx, + tx_from_hex, +) +from test_framework.p2p import ( + P2PInterface, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, +) +from test_framework.wallet import ( + MiniWallet, + MiniWalletMode, +) + +# 1sat/vB feerate denominated in BTC/KvB +FEERATE_1SAT_VB = Decimal("0.00001000") +# Number of seconds to wait to ensure no getdata is received +GETDATA_WAIT = 60 + +def cleanup(func): + def wrapper(self, *args, **kwargs): + try: + func(self, *args, **kwargs) + finally: + self.nodes[0].disconnect_p2ps() + # Do not clear the node's mempool, as each test requires mempool min feerate > min + # relay feerate. However, do check that this is the case. + assert self.nodes[0].getmempoolinfo()["mempoolminfee"] > self.nodes[0].getnetworkinfo()["relayfee"] + # Ensure we do not try to spend the same UTXOs in subsequent tests, as they will look like RBF attempts. + self.wallet.rescan_utxos(include_mempool=True) + + # Resets if mocktime was used + self.nodes[0].setmocktime(0) + return wrapper + +class PackageRelayTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [[ + "-datacarriersize=100000", + "-maxmempool=5", + ]] + self.supports_cli = False + + def create_tx_below_mempoolminfee(self, wallet): + """Create a 1-input 1sat/vB transaction using a confirmed UTXO. Decrement and use + self.sequence so that subsequent calls to this function result in unique transactions.""" + + self.sequence -= 1 + assert_greater_than(self.nodes[0].getmempoolinfo()["mempoolminfee"], FEERATE_1SAT_VB) + + return wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, sequence=self.sequence, confirmed_only=True) + + @cleanup + def test_basic_child_then_parent(self): + node = self.nodes[0] + self.log.info("Check that opportunistic 1p1c logic works when child is received before parent") + + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB) + + peer_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first (perhaps the low feerate parent didn't meet feefilter or the requests were sent to different nodes). It is missing an input. + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 3. Sender relays the parent. Parent+Child are evaluated as a package and accepted. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 4. Both transactions should now be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + node.disconnect_p2ps() + + @cleanup + def test_basic_parent_then_child(self, wallet): + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(wallet) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay") + + # 1. Parent is relayed first. It is too low feerate. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + peer_sender.wait_for_getdata([parent_wtxid_int]) + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + assert low_fee_parent["txid"] not in node.getrawmempool() + + # Send again from peer_ignored, check that it is ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + + # 2. Child is relayed next. It is missing an input. + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 3. Node requests the missing parent by txid. + # It should do so even if it has previously rejected that parent for being too low feerate. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 4. Sender re-relays the parent. Parent+Child are evaluated as a package and accepted. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 5. Both transactions should now be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + @cleanup + def test_low_and_high_child(self, wallet): + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(wallet) + # This feerate is above mempoolminfee, but not enough to also bump the low feerate parent. + feerate_just_above = node.getmempoolinfo()["mempoolminfee"] + med_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=feerate_just_above) + high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay") + + self.log.info("Check that tx caches low fee parent + low fee child package rejections") + + # 1. Send parent, rejected for being low feerate. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + peer_sender.wait_for_getdata([parent_wtxid_int]) + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + assert low_fee_parent["txid"] not in node.getrawmempool() + + # Send again from peer_ignored, check that it is ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + + # 2. Send an (orphan) child that has a higher feerate, but not enough to bump the parent. + med_child_wtxid_int = int(med_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)])) + peer_sender.wait_for_getdata([med_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(med_fee_child["tx"])) + + # 3. Node requests the orphan's missing parent. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 4. The low parent + low child are submitted as a package. They are not accepted due to low package feerate. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + assert low_fee_parent["txid"] not in node.getrawmempool() + assert med_fee_child["txid"] not in node.getrawmempool() + + # If peer_ignored announces the low feerate child, it should be ignored + peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)])) + assert "getdata" not in peer_ignored.last_message + # If either peer sends the parent again, package evaluation should not be attempted + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + peer_ignored.send_and_ping(msg_tx(low_fee_parent["tx"])) + + assert low_fee_parent["txid"] not in node.getrawmempool() + assert med_fee_child["txid"] not in node.getrawmempool() + + # 5. Send the high feerate (orphan) child + high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)])) + peer_sender.wait_for_getdata([high_child_wtxid_int]) + peer_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 6. Node requests the orphan's parent, even though it has already been rejected, both by + # itself and with a child. This is necessary, otherwise high_fee_child can be censored. + parent_txid_int = int(low_fee_parent["txid"], 16) + peer_sender.wait_for_getdata([parent_txid_int]) + + # 7. The low feerate parent + high feerate child are submitted as a package. + peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # 8. Both transactions should now be in mempool + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + assert med_fee_child["txid"] not in node_mempool + + @cleanup + def test_orphan_consensus_failure(self): + self.log.info("Check opportunistic 1p1c logic with consensus-invalid orphan causes disconnect of the correct peer") + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + coin = low_fee_parent["new_utxo"] + address = node.get_deterministic_priv_key().address + # Create raw transaction spending the parent, but with no signature (a consensus error). + hex_orphan_no_sig = node.createrawtransaction([{"txid": coin["txid"], "vout": coin["vout"]}], {address : coin["value"] - Decimal("0.0001")}) + tx_orphan_bad_wit = tx_from_hex(hex_orphan_no_sig) + tx_orphan_bad_wit.wit.vtxinwit.append(CTxInWitness()) + tx_orphan_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + bad_orphan_sender = node.add_p2p_connection(P2PInterface()) + parent_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first. It is missing an input. + child_wtxid_int = int(tx_orphan_bad_wit.getwtxid(), 16) + bad_orphan_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)])) + bad_orphan_sender.wait_for_getdata([child_wtxid_int]) + bad_orphan_sender.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(low_fee_parent["txid"], 16) + bad_orphan_sender.wait_for_getdata([parent_txid_int]) + + # 3. A different peer relays the parent. Parent+Child are evaluated as a package and rejected. + parent_sender.send_message(msg_tx(low_fee_parent["tx"])) + + # 4. Transactions should not be in mempool. + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] not in node_mempool + assert tx_orphan_bad_wit.rehash() not in node_mempool + + # 5. Peer that sent a consensus-invalid transaction should be disconnected. + bad_orphan_sender.wait_for_disconnect() + + # The peer that didn't provide the orphan should not be disconnected. + parent_sender.sync_with_ping() + + @cleanup + def test_parent_consensus_failure(self): + self.log.info("Check opportunistic 1p1c logic with consensus-invalid parent causes disconnect of the correct peer") + node = self.nodes[0] + low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet) + high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB) + + # Create invalid version of parent with a bad signature. + tx_parent_bad_wit = tx_from_hex(low_fee_parent["hex"]) + tx_parent_bad_wit.wit.vtxinwit.append(CTxInWitness()) + tx_parent_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + package_sender = node.add_p2p_connection(P2PInterface()) + fake_parent_sender = node.add_p2p_connection(P2PInterface()) + + # 1. Child is received first. It is missing an input. + child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16) + package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)])) + package_sender.wait_for_getdata([child_wtxid_int]) + package_sender.send_and_ping(msg_tx(high_fee_child["tx"])) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent_bad_wit.rehash(), 16) + package_sender.wait_for_getdata([parent_txid_int]) + + # 3. A different node relays the parent. The parent is first evaluated by itself and + # rejected for being too low feerate. Then it is evaluated as a package and, after passing + # feerate checks, rejected for having a bad signature (consensus error). + fake_parent_sender.send_message(msg_tx(tx_parent_bad_wit)) + + # 4. Transactions should not be in mempool. + node_mempool = node.getrawmempool() + assert tx_parent_bad_wit.rehash() not in node_mempool + assert high_fee_child["txid"] not in node_mempool + + # 5. Peer sent a consensus-invalid transaction. + fake_parent_sender.wait_for_disconnect() + + self.log.info("Check that fake parent does not cause orphan to be deleted and real package can still be submitted") + # 6. Child-sending should not have been punished and the orphan should remain in orphanage. + # It can send the "real" parent transaction, and the package is accepted. + parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16) + package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)])) + package_sender.wait_for_getdata([parent_wtxid_int]) + package_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + node_mempool = node.getrawmempool() + assert low_fee_parent["txid"] in node_mempool + assert high_fee_child["txid"] in node_mempool + + @cleanup + def test_multiple_parents(self): + self.log.info("Check that node does not request more than 1 previously-rejected low feerate parent") + + node = self.nodes[0] + node.setmocktime(int(time.time())) + + # 2-parent-1-child package where both parents are below mempool min feerate + parent_low_1 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit) + parent_low_2 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit) + child_bumping = self.wallet_nonsegwit.create_self_transfer_multi( + utxos_to_spend=[parent_low_1["new_utxo"], parent_low_2["new_utxo"]], + fee_per_output=999*parent_low_1["tx"].get_vsize(), + ) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + + # 1. Send both parents. Each should be rejected for being too low feerate. + # Send unsolicited so that we can later check that no "getdata" was ever received. + peer_sender.send_and_ping(msg_tx(parent_low_1["tx"])) + peer_sender.send_and_ping(msg_tx(parent_low_2["tx"])) + + # parent_low_1 and parent_low_2 are rejected for being low feerate. + assert parent_low_1["txid"] not in node.getrawmempool() + assert parent_low_2["txid"] not in node.getrawmempool() + + # 2. Send child. + peer_sender.send_and_ping(msg_tx(child_bumping["tx"])) + + # 3. Node should not request any parents, as it should recognize that it will not accept + # multi-parent-1-child packages. + node.bumpmocktime(GETDATA_WAIT) + peer_sender.sync_with_ping() + assert "getdata" not in peer_sender.last_message + + @cleanup + def test_other_parent_in_mempool(self): + self.log.info("Check opportunistic 1p1c fails if child already has another parent in mempool") + node = self.nodes[0] + + # This parent needs CPFP + parent_low = self.create_tx_below_mempoolminfee(self.wallet) + # This parent does not need CPFP and can be submitted alone ahead of time + parent_high = self.wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True) + child = self.wallet.create_self_transfer_multi( + utxos_to_spend=[parent_high["new_utxo"], parent_low["new_utxo"]], + fee_per_output=999*parent_low["tx"].get_vsize(), + ) + + peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay") + + # 1. Send first parent which will be accepted. + peer_sender.send_and_ping(msg_tx(parent_high["tx"])) + assert parent_high["txid"] in node.getrawmempool() + + # 2. Send child. + peer_sender.send_and_ping(msg_tx(child["tx"])) + + # 3. Node requests parent_low. However, 1p1c fails because package-not-child-with-unconfirmed-parents + parent_low_txid_int = int(parent_low["txid"], 16) + peer_sender.wait_for_getdata([parent_low_txid_int]) + peer_sender.send_and_ping(msg_tx(parent_low["tx"])) + + node_mempool = node.getrawmempool() + assert parent_high["txid"] in node_mempool + assert parent_low["txid"] not in node_mempool + assert child["txid"] not in node_mempool + + # Same error if submitted through submitpackage without parent_high + package_hex_missing_parent = [parent_low["hex"], child["hex"]] + result_missing_parent = node.submitpackage(package_hex_missing_parent) + assert_equal(result_missing_parent["package_msg"], "package-not-child-with-unconfirmed-parents") + + def run_test(self): + node = self.nodes[0] + # To avoid creating transactions with the same txid (can happen if we set the same feerate + # and reuse the same input as a previous transaction that wasn't successfully submitted), + # we give each subtest a different nSequence for its transactions. + self.sequence = MAX_BIP125_RBF_SEQUENCE + + self.wallet = MiniWallet(node) + self.wallet_nonsegwit = MiniWallet(node, mode=MiniWalletMode.RAW_P2PK) + self.generate(self.wallet_nonsegwit, 10) + self.generate(self.wallet, 20) + + fill_mempool(self, node) + + self.log.info("Check opportunistic 1p1c logic when parent (txid != wtxid) is received before child") + self.test_basic_parent_then_child(self.wallet) + + self.log.info("Check opportunistic 1p1c logic when parent (txid == wtxid) is received before child") + self.test_basic_parent_then_child(self.wallet_nonsegwit) + + self.log.info("Check opportunistic 1p1c logic when child is received before parent") + self.test_basic_child_then_parent() + + self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid != wtxid)") + self.test_low_and_high_child(self.wallet) + + self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid == wtxid)") + self.test_low_and_high_child(self.wallet_nonsegwit) + + self.test_orphan_consensus_failure() + self.test_parent_consensus_failure() + self.test_multiple_parents() + self.test_other_parent_in_mempool() + + +if __name__ == '__main__': + PackageRelayTest().main() diff --git a/test/functional/p2p_orphan_handling.py b/test/functional/p2p_orphan_handling.py index 6166c62aa2..f525d87cca 100755 --- a/test/functional/p2p_orphan_handling.py +++ b/test/functional/p2p_orphan_handling.py @@ -7,6 +7,7 @@ import time from test_framework.messages import ( CInv, + CTxInWitness, MSG_TX, MSG_WITNESS_TX, MSG_WTX, @@ -21,6 +22,7 @@ from test_framework.p2p import ( NONPREF_PEER_TX_DELAY, OVERLOADED_PEER_TX_DELAY, p2p_lock, + P2PInterface, P2PTxInvStore, TXID_RELAY_DELAY, ) @@ -127,6 +129,22 @@ class OrphanHandlingTest(BitcoinTestFramework): peer.wait_for_getdata([wtxid]) peer.send_and_ping(msg_tx(tx)) + def create_malleated_version(self, tx): + """ + Create a malleated version of the tx where the witness is replaced with garbage data. + Returns a CTransaction object. + """ + tx_bad_wit = tx_from_hex(tx["hex"]) + tx_bad_wit.wit.vtxinwit = [CTxInWitness()] + # Add garbage data to witness 0. We cannot simply strip the witness, as the node would + # classify it as a transaction in which the witness was missing rather than wrong. + tx_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + assert_equal(tx["txid"], tx_bad_wit.rehash()) + assert tx["wtxid"] != tx_bad_wit.getwtxid() + + return tx_bad_wit + @cleanup def test_arrival_timing_orphan(self): self.log.info("Test missing parents that arrive during delay are not requested") @@ -284,8 +302,8 @@ class OrphanHandlingTest(BitcoinTestFramework): # doesn't give up on the orphan. Once all of the missing parents are received, it should be # submitted to mempool. peer.send_message(msg_notfound(vec=[CInv(MSG_WITNESS_TX, int(txid_conf_old, 16))])) + # Sync with ping to ensure orphans are reconsidered peer.send_and_ping(msg_tx(missing_tx["tx"])) - peer.sync_with_ping() assert_equal(node.getmempoolentry(orphan["txid"])["ancestorcount"], 3) @cleanup @@ -394,10 +412,161 @@ class OrphanHandlingTest(BitcoinTestFramework): peer2.assert_never_requested(child["tx"].getwtxid()) # The child should never be requested, even if announced again with potentially different witness. + # Sync with ping to ensure orphans are reconsidered peer3.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(child["txid"], 16))])) self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP) peer3.assert_never_requested(child["txid"]) + @cleanup + def test_same_txid_orphan(self): + self.log.info("Check what happens when orphan with same txid is already in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + + # Create a fake version of the child + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["missingorspent", "stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # Time out the previous request for the parent (node will not request the same transaction + # from multiple nodes at the same time) + node.bumpmocktime(GETDATA_TX_INTERVAL) + + # 4. The parent is requested. Honest peer sends it. + honest_peer.wait_for_getdata([parent_txid_int]) + # Sync with ping to ensure orphans are reconsidered + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 5. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + @cleanup + def test_same_txid_orphan_of_orphan(self): + self.log.info("Check what happens when orphan's parent with same txid is already in orphanage") + node = self.nodes[0] + + tx_grandparent = self.wallet.create_self_transfer() + + # Create middle tx (both parent and child) which will be in orphanage. + tx_middle = self.wallet.create_self_transfer(utxo_to_spend=tx_grandparent["new_utxo"]) + + # Create a fake version of the middle tx + tx_orphan_bad_wit = self.create_malleated_version(tx_middle) + + # Create grandchild spending from tx_middle (and spending from tx_orphan_bad_wit since they + # have the same txid). + tx_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_middle["new_utxo"]) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests missing tx_grandparent by txid. + grandparent_txid_int = int(tx_grandparent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([grandparent_txid_int]) + + # 3. Honest peer relays the grandchild, which is missing a parent. The parent by txid already + # exists in orphanage, but should be re-requested because the node shouldn't assume that the + # witness data is the same. In this case, a same-txid-different-witness transaction exists! + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_grandchild["tx"])) + middle_txid_int = int(tx_middle["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + honest_peer.wait_for_getdata([middle_txid_int]) + + # 4. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_middle["tx"])) + assert_equal(len(node.getrawmempool()), 0) + + # 5. Honest peer sends tx_grandparent + honest_peer.send_and_ping(msg_tx(tx_grandparent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_grandparent["txid"] in node_mempool + assert tx_middle["txid"] in node_mempool + assert tx_grandchild["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_middle["txid"])["wtxid"], tx_middle["wtxid"]) + + @cleanup + def test_orphan_txid_inv(self): + self.log.info("Check node does not ignore announcement with same txid as tx in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child and fake version + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(PeerTxRelayer()) + # Must not send wtxidrelay because otherwise the inv(TX) will be ignored later + honest_peer = node.add_p2p_connection(P2PInterface(wtxidrelay=False)) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer announces the real child, by txid (this isn't common but the node should + # still keep track of it). + child_txid_int = int(tx_child["txid"], 16) + honest_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=child_txid_int)])) + + # 4. The child is requested. Honest peer sends it. + node.bumpmocktime(TXREQUEST_TIME_SKIP) + honest_peer.wait_for_getdata([child_txid_int]) + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # 5. After first parent request times out, the node sends another one for the missing parent + # of the real orphan child. + node.bumpmocktime(GETDATA_TX_INTERVAL) + honest_peer.wait_for_getdata([parent_txid_int]) + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. This may happen in either + # order since the message-processing is randomized. If tx_orphan_bad_wit is validated first, + # its consensus error leads to disconnection of bad_peer. If tx_child is validated first, + # tx_orphan_bad_wit is rejected for txn-same-nonwitness-data-in-mempool (no punishment). + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + def run_test(self): self.nodes[0].setmocktime(int(time.time())) self.wallet_nonsegwit = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) @@ -410,6 +579,9 @@ class OrphanHandlingTest(BitcoinTestFramework): self.test_orphans_overlapping_parents() self.test_orphan_of_orphan() self.test_orphan_inherit_rejection() + self.test_same_txid_orphan() + self.test_same_txid_orphan_of_orphan() + self.test_orphan_txid_inv() if __name__ == '__main__': diff --git a/test/functional/p2p_outbound_eviction.py b/test/functional/p2p_outbound_eviction.py new file mode 100755 index 0000000000..8d89688999 --- /dev/null +++ b/test/functional/p2p_outbound_eviction.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# Copyright (c) 2019-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" Test node outbound peer eviction logic + +A subset of our outbound peers are subject to eviction logic if they cannot keep up +with our vision of the best chain. This criteria applies only to non-protected peers, +and can be triggered by either not learning about any blocks from an outbound peer after +a certain deadline, or by them not being able to catch up fast enough (under the same deadline). + +This tests the different eviction paths based on the peer's behavior and on whether they are protected +or not. +""" +import time + +from test_framework.messages import ( + from_hex, + msg_headers, + CBlockHeader, +) +from test_framework.p2p import P2PInterface +from test_framework.test_framework import BitcoinTestFramework + +# Timeouts (in seconds) +CHAIN_SYNC_TIMEOUT = 20 * 60 +HEADERS_RESPONSE_TIME = 2 * 60 + + +class P2POutEvict(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def test_outbound_eviction_unprotected(self): + # This tests the eviction logic for **unprotected** outbound peers (that is, PeerManagerImpl::ConsiderEviction) + node = self.nodes[0] + cur_mock_time = node.mocktime + + # Get our tip header and its parent + tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False)) + prev_header = from_hex(CBlockHeader(), node.getblockheader(f"{tip_header.hashPrevBlock:064x}", False)) + + self.log.info("Create an outbound connection and don't send any headers") + # Test disconnect due to no block being announced in 22+ minutes (headers are not even exchanged) + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay") + # Wait for over 20 min to trigger the first eviction timeout. This sets the last call past 2 min in the future. + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + # Wait for over 2 more min to trigger the disconnection + peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock) + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Test that the peer gets evicted") + peer.wait_for_disconnect() + + self.log.info("Create an outbound connection and send header but never catch up") + # Mimic a node that just falls behind for long enough + # This should also apply for a node doing IBD that does not catch up in time + # Connect a peer and make it send us headers ending in our tip's parent + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay") + peer.send_and_ping(msg_headers([prev_header])) + + # Trigger the timeouts + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock) + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Test that the peer gets evicted") + peer.wait_for_disconnect() + + self.log.info("Create an outbound connection and keep lagging behind, but not too much") + # Test that if the peer never catches up with our current tip, but it does with the + # expected work that we set when setting the timer (that is, our tip at the time) + # we do not disconnect the peer + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay") + + self.log.info("Mine a block so our peer starts lagging") + prev_prev_hash = tip_header.hashPrevBlock + best_block_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"] + peer.sync_with_ping() + + self.log.info("Keep catching up with the old tip and check that we are not evicted") + for i in range(10): + # Generate an additional block so the peers is 2 blocks behind + prev_header = from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False)) + best_block_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"] + peer.sync_with_ping() + + # Advance time but not enough to evict the peer + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + + # Wait until we get out last call (by receiving a getheaders) + peer.wait_for_getheaders(block_hash=prev_prev_hash) + + # Send a header with the previous tip (so we go back to 1 block behind) + peer.send_and_ping(msg_headers([prev_header])) + prev_prev_hash = tip_header.hash + + self.log.info("Create an outbound connection and take some time to catch up, but do it in time") + # Check that if the peer manages to catch up within time, the timeouts are removed (and the peer is not disconnected) + # We are reusing the peer from the previous case which already sent us a valid (but old) block and whose timer is ticking + + # Send an updated headers message matching our tip + peer.send_and_ping(msg_headers([from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False))])) + + # Wait for long enough for the timeouts to have triggered and check that we are still connected + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Test that the peer does not get evicted") + peer.sync_with_ping() + + node.disconnect_p2ps() + + def test_outbound_eviction_protected(self): + # This tests the eviction logic for **protected** outbound peers (that is, PeerManagerImpl::ConsiderEviction) + # Outbound connections are flagged as protected as long as they have sent us a connecting block with at least as + # much work as our current tip and we have enough empty protected_peers slots. + node = self.nodes[0] + cur_mock_time = node.mocktime + tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False)) + + self.log.info("Create an outbound connection to a peer that shares our tip so it gets granted protection") + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay") + peer.send_and_ping(msg_headers([tip_header])) + + self.log.info("Mine a new block and sync with our peer") + self.generateblock(node, output="raw(42)", transactions=[]) + peer.sync_with_ping() + + self.log.info("Let enough time pass for the timeouts to go off") + # Trigger the timeouts and check how we are still connected + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock) + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Test that the node does not get evicted") + peer.sync_with_ping() + + node.disconnect_p2ps() + + def test_outbound_eviction_mixed(self): + # This tests the outbound eviction logic for a mix of protected and unprotected peers. + node = self.nodes[0] + cur_mock_time = node.mocktime + + self.log.info("Create a mix of protected and unprotected outbound connections to check against eviction") + + # Let's try this logic having multiple peers, some protected and some unprotected + # We protect up to 4 peers as long as they have provided a block with the same amount of work as our tip + self.log.info("The first 4 peers are protected by sending us a valid block with enough work") + tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False)) + headers_message = msg_headers([tip_header]) + protected_peers = [] + for i in range(4): + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay") + peer.send_and_ping(headers_message) + protected_peers.append(peer) + + # We can create 4 additional outbound connections to peers that are unprotected. 2 of them will be well behaved, + # whereas the other 2 will misbehave (1 sending no headers, 1 sending old ones) + self.log.info("The remaining 4 peers will be mixed between honest (2) and misbehaving peers (2)") + prev_header = from_hex(CBlockHeader(), node.getblockheader(f"{tip_header.hashPrevBlock:064x}", False)) + headers_message = msg_headers([prev_header]) + honest_unprotected_peers = [] + for i in range(2): + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=4+i, connection_type="outbound-full-relay") + peer.send_and_ping(headers_message) + honest_unprotected_peers.append(peer) + + misbehaving_unprotected_peers = [] + for i in range(2): + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=6+i, connection_type="outbound-full-relay") + if i%2==0: + peer.send_and_ping(headers_message) + misbehaving_unprotected_peers.append(peer) + + self.log.info("Mine a new block and keep the unprotected honest peer on sync, all the rest off-sync") + # Mine a block so all peers become outdated + target_hash = prev_header.rehash() + tip_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"] + tip_header = from_hex(CBlockHeader(), node.getblockheader(tip_hash, False)) + tip_headers_message = msg_headers([tip_header]) + + # Let the timeouts hit and check back + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + for peer in protected_peers + misbehaving_unprotected_peers: + peer.sync_with_ping() + peer.wait_for_getheaders(block_hash=target_hash) + for peer in honest_unprotected_peers: + peer.send_and_ping(tip_headers_message) + peer.wait_for_getheaders(block_hash=target_hash) + + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Check how none of the honest nor protected peers was evicted but all the misbehaving unprotected were") + for peer in protected_peers + honest_unprotected_peers: + peer.sync_with_ping() + for peer in misbehaving_unprotected_peers: + peer.wait_for_disconnect() + + node.disconnect_p2ps() + + def test_outbound_eviction_blocks_relay_only(self): + # The logic for outbound eviction protection only applies to outbound-full-relay peers + # This tests that other types of peers (blocks-relay-only for instance) are not granted protection + node = self.nodes[0] + cur_mock_time = node.mocktime + tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False)) + + self.log.info("Create an blocks-only outbound connection to a peer that shares our tip. This would usually grant protection") + peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only") + peer.send_and_ping(msg_headers([tip_header])) + + self.log.info("Mine a new block and sync with our peer") + self.generateblock(node, output="raw(42)", transactions=[]) + peer.sync_with_ping() + + self.log.info("Let enough time pass for the timeouts to go off") + # Trigger the timeouts and check how the peer gets evicted, since protection is only given to outbound-full-relay peers + cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1) + node.setmocktime(cur_mock_time) + peer.sync_with_ping() + peer.wait_for_getheaders(block_hash=tip_header.hash) + cur_mock_time += (HEADERS_RESPONSE_TIME + 1) + node.setmocktime(cur_mock_time) + self.log.info("Test that the peer gets evicted") + peer.wait_for_disconnect() + + node.disconnect_p2ps() + + + def run_test(self): + self.nodes[0].setmocktime(int(time.time())) + self.test_outbound_eviction_unprotected() + self.test_outbound_eviction_protected() + self.test_outbound_eviction_mixed() + self.test_outbound_eviction_blocks_relay_only() + + +if __name__ == '__main__': + P2POutEvict().main() diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 213c748eda..45bbd7f1c3 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -1054,7 +1054,7 @@ class SegWitTest(BitcoinTestFramework): @subtest def test_max_witness_push_length(self): - """Test that witness stack can only allow up to 520 byte pushes.""" + """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes.""" block = self.build_next_block() diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 7a50f1e605..0af6b1d2c9 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -8,6 +8,9 @@ Test transaction download behavior from decimal import Decimal import time +from test_framework.mempool_util import ( + fill_mempool, +) from test_framework.messages import ( CInv, MSG_TX, @@ -24,7 +27,6 @@ from test_framework.p2p import ( from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - fill_mempool, ) from test_framework.wallet import MiniWallet @@ -248,7 +250,7 @@ class TxDownloadTest(BitcoinTestFramework): def test_rejects_filter_reset(self): self.log.info('Check that rejected tx is not requested again') node = self.nodes[0] - fill_mempool(self, node, self.wallet) + fill_mempool(self, node) self.wallet.rescan_utxos() mempoolminfee = node.getmempoolinfo()['mempoolminfee'] peer = node.add_p2p_connection(TestP2PConn()) diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 37c42f2533..8ac0afdaaa 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -8,6 +8,9 @@ from decimal import Decimal import random from test_framework.blocktools import COINBASE_MATURITY +from test_framework.mempool_util import ( + fill_mempool, +) from test_framework.messages import ( MAX_BIP125_RBF_SEQUENCE, tx_from_hex, @@ -18,7 +21,6 @@ from test_framework.util import ( assert_equal, assert_fee_amount, assert_raises_rpc_error, - fill_mempool, ) from test_framework.wallet import ( DEFAULT_FEE, @@ -26,6 +28,9 @@ from test_framework.wallet import ( ) +MAX_PACKAGE_COUNT = 25 + + class RPCPackagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -344,6 +349,13 @@ class RPCPackagesTest(BitcoinTestFramework): assert_raises_rpc_error(-25, "package topology disallowed", node.submitpackage, chain_hex) assert_equal(legacy_pool, node.getrawmempool()) + assert_raises_rpc_error(-8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.", node.submitpackage, []) + assert_raises_rpc_error(-8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.", node.submitpackage, [chain_hex[0]] * 1) + assert_raises_rpc_error( + -8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.", + node.submitpackage, [chain_hex[0]] * (MAX_PACKAGE_COUNT + 1) + ) + # Create a transaction chain such as only the parent gets accepted (by making the child's # version non-standard). Make sure the parent does get broadcast. self.log.info("If a package is partially submitted, transactions included in mempool get broadcast") @@ -388,7 +400,7 @@ class RPCPackagesTest(BitcoinTestFramework): ]) self.wallet.rescan_utxos() - fill_mempool(self, node, self.wallet) + fill_mempool(self, node) minrelay = node.getmempoolinfo()["minrelaytxfee"] parent = self.wallet.create_self_transfer( diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 12697e9d0c..3978c80dde 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -490,11 +490,11 @@ class RawTransactionsTest(BitcoinTestFramework): addr2Obj = self.nodes[2].getaddressinfo(addr2) # Tests for createmultisig and addmultisigaddress - assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) + assert_raises_rpc_error(-5, 'Pubkey "01020304" must have a length of either 33 or 65 bytes', self.nodes[0].createmultisig, 1, ["01020304"]) # createmultisig can only take public keys self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here - assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) + assert_raises_rpc_error(-5, f'Pubkey "{addr1}" must be a hex string', self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address'] diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md index b89b40f13d..4cd62c4ef3 100644 --- a/test/functional/test-shell.md +++ b/test/functional/test-shell.md @@ -123,11 +123,11 @@ We can also log custom events to the logger. ``` **Note: Please also consider the functional test -[readme](../test/functional/README.md), which provides an overview of the +[readme](/test/functional/README.md), which provides an overview of the test-framework**. Modules such as -[key.py](../test/functional/test_framework/key.py), -[script.py](../test/functional/test_framework/script.py) and -[messages.py](../test/functional/test_framework/messages.py) are particularly +[key.py](/test/functional/test_framework/key.py), +[script.py](/test/functional/test_framework/script.py) and +[messages.py](/test/functional/test_framework/messages.py) are particularly useful in constructing objects which can be passed to the bitcoind nodes managed by a running `TestShell` object. diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 5b2e3289a9..bcb38b21cd 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -47,7 +47,7 @@ class AddressType(enum.Enum): b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' -def create_deterministic_address_bcrt1_p2tr_op_true(): +def create_deterministic_address_bcrt1_p2tr_op_true(explicit_internal_key=None): """ Generates a deterministic bech32m address (segwit v1 output) that can be spent with a witness stack of OP_TRUE and the control block @@ -55,9 +55,10 @@ def create_deterministic_address_bcrt1_p2tr_op_true(): Returns a tuple with the generated address and the internal key. """ - internal_key = (1).to_bytes(32, 'big') + internal_key = explicit_internal_key or (1).to_bytes(32, 'big') address = output_key_to_p2tr(taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).output_pubkey) - assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka') + if explicit_internal_key is None: + assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka') return (address, internal_key) diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 03042877b2..7edf9f3679 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -160,6 +160,15 @@ class AuthServiceProxy(): raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) + # Check for no-content HTTP status code, which can be returned when an + # RPC client requests a JSON-RPC 2.0 "notification" with no response. + # Currently this is only possible if clients call the _request() method + # directly to send a raw request. + if http_response.status == HTTPStatus.NO_CONTENT: + if len(http_response.read()) != 0: + raise JSONRPCException({'code': -342, 'message': 'Content received with NO CONTENT status code'}) + return None, http_response.status + content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index cfd923bab3..f0dc866f69 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -28,6 +28,7 @@ from .messages import ( ser_uint256, tx_from_hex, uint256_from_str, + WITNESS_SCALE_FACTOR, ) from .script import ( CScript, @@ -45,7 +46,6 @@ from .script_util import ( ) from .util import assert_equal -WITNESS_SCALE_FACTOR = 4 MAX_BLOCK_SIGOPS = 20000 MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR diff --git a/test/functional/test_framework/mempool_util.py b/test/functional/test_framework/mempool_util.py new file mode 100644 index 0000000000..148cc935ed --- /dev/null +++ b/test/functional/test_framework/mempool_util.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Helpful routines for mempool testing.""" +from decimal import Decimal + +from .blocktools import ( + COINBASE_MATURITY, +) +from .util import ( + assert_equal, + assert_greater_than, + create_lots_of_big_transactions, + gen_return_txouts, +) +from .wallet import ( + MiniWallet, +) + + +def fill_mempool(test_framework, node): + """Fill mempool until eviction. + + Allows for simpler testing of scenarios with floating mempoolminfee > minrelay + Requires -datacarriersize=100000 and + -maxmempool=5. + It will not ensure mempools become synced as it + is based on a single node and assumes -minrelaytxfee + is 1 sat/vbyte. + To avoid unintentional tx dependencies, the mempool filling txs are created with a + tagged ephemeral miniwallet instance. + """ + test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises") + txouts = gen_return_txouts() + relayfee = node.getnetworkinfo()['relayfee'] + + assert_equal(relayfee, Decimal('0.00001000')) + + tx_batch_size = 1 + num_of_batches = 75 + # Generate UTXOs to flood the mempool + # 1 to create a tx initially that will be evicted from the mempool later + # 75 transactions each with a fee rate higher than the previous one + ephemeral_miniwallet = MiniWallet(node, tag_name="fill_mempool_ephemeral_wallet") + test_framework.generate(ephemeral_miniwallet, 1 + num_of_batches * tx_batch_size) + + # Mine enough blocks so that the UTXOs are allowed to be spent + test_framework.generate(node, COINBASE_MATURITY - 1) + + # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may + # change their effective feerate and thus the order in which they are selected for eviction. + confirmed_utxos = [ephemeral_miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)] + assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1) + + test_framework.log.debug("Create a mempool tx that will be evicted") + tx_to_be_evicted_id = ephemeral_miniwallet.send_self_transfer( + from_node=node, utxo_to_spend=confirmed_utxos.pop(0), fee_rate=relayfee)["txid"] + + # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool + # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB) + # by 130 should result in a fee that corresponds to 2x of that fee rate + base_fee = relayfee * 130 + + test_framework.log.debug("Fill up the mempool with txs with higher fee rate") + with node.assert_debug_log(["rolling minimum fee bumped"]): + for batch_of_txid in range(num_of_batches): + fee = (batch_of_txid + 1) * base_fee + utxos = confirmed_utxos[:tx_batch_size] + create_lots_of_big_transactions(ephemeral_miniwallet, node, fee, tx_batch_size, txouts, utxos) + del confirmed_utxos[:tx_batch_size] + + test_framework.log.debug("The tx should be evicted by now") + # The number of transactions created should be greater than the ones present in the mempool + assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool())) + # Initial tx created should not be present in the mempool anymore as it had a lower fee rate + assert tx_to_be_evicted_id not in node.getrawmempool() + + test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee") + assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) + assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 67e0be5280..d228bd8991 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -490,7 +490,7 @@ class TestNode(): self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) @contextlib.contextmanager - def wait_for_debug_log(self, expected_msgs, timeout=60): + def busy_wait_for_debug_log(self, expected_msgs, timeout=60): """ Block until we see a particular debug log message fragment or until we exceed the timeout. Return: diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 0de09b6440..c5b69a3954 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -496,65 +496,6 @@ def check_node_connections(*, node, num_in, num_out): assert_equal(info["connections_in"], num_in) assert_equal(info["connections_out"], num_out) -def fill_mempool(test_framework, node, miniwallet): - """Fill mempool until eviction. - - Allows for simpler testing of scenarios with floating mempoolminfee > minrelay - Requires -datacarriersize=100000 and - -maxmempool=5. - It will not ensure mempools become synced as it - is based on a single node and assumes -minrelaytxfee - is 1 sat/vbyte. - To avoid unintentional tx dependencies, it is recommended to use separate miniwallets for - mempool filling vs transactions in tests. - """ - test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises") - txouts = gen_return_txouts() - relayfee = node.getnetworkinfo()['relayfee'] - - assert_equal(relayfee, Decimal('0.00001000')) - - tx_batch_size = 1 - num_of_batches = 75 - # Generate UTXOs to flood the mempool - # 1 to create a tx initially that will be evicted from the mempool later - # 75 transactions each with a fee rate higher than the previous one - test_framework.generate(miniwallet, 1 + (num_of_batches * tx_batch_size)) - - # Mine COINBASE_MATURITY - 1 blocks so that the UTXOs are allowed to be spent - test_framework.generate(node, 100 - 1) - - # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may - # change their effective feerate and thus the order in which they are selected for eviction. - confirmed_utxos = [miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)] - assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1) - - test_framework.log.debug("Create a mempool tx that will be evicted") - tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, utxo_to_spend=confirmed_utxos[0], fee_rate=relayfee)["txid"] - del confirmed_utxos[0] - - # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool - # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB) - # by 130 should result in a fee that corresponds to 2x of that fee rate - base_fee = relayfee * 130 - - test_framework.log.debug("Fill up the mempool with txs with higher fee rate") - with node.assert_debug_log(["rolling minimum fee bumped"]): - for batch_of_txid in range(num_of_batches): - fee = (batch_of_txid + 1) * base_fee - utxos = confirmed_utxos[:tx_batch_size] - create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts, utxos) - del confirmed_utxos[:tx_batch_size] - - test_framework.log.debug("The tx should be evicted by now") - # The number of transactions created should be greater than the ones present in the mempool - assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool())) - # Initial tx created should not be present in the mempool anymore as it had a lower fee rate - assert tx_to_be_evicted_id not in node.getrawmempool() - - test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee") - assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) - assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) # Transaction/Block functions ############################# diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 470ed08ed4..4433cbcc55 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -32,6 +32,7 @@ from test_framework.messages import ( CTxIn, CTxInWitness, CTxOut, + hash256, ) from test_framework.script import ( CScript, @@ -65,7 +66,10 @@ class MiniWalletMode(Enum): However, if the transactions need to be modified by the user (e.g. prepending scriptSig for testing opcodes that are activated by a soft-fork), or the txs should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK - can be useful. Summary of modes: + can be useful. In order to avoid mixing of UTXOs between different MiniWallet + instances, a tag name can be passed to the default mode, to create different + output scripts. Note that the UTXOs from the pre-generated test chain can + only be spent if no tag is passed. Summary of modes: | output | | tx is | can modify | needs mode | description | address | standard | scriptSig | signing @@ -80,22 +84,25 @@ class MiniWalletMode(Enum): class MiniWallet: - def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE): + def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE, tag_name=None): self._test_node = test_node self._utxos = [] self._mode = mode assert isinstance(mode, MiniWalletMode) if mode == MiniWalletMode.RAW_OP_TRUE: + assert tag_name is None self._scriptPubKey = bytes(CScript([OP_TRUE])) elif mode == MiniWalletMode.RAW_P2PK: # use simple deterministic private key (k=1) + assert tag_name is None self._priv_key = ECKey() self._priv_key.set((1).to_bytes(32, 'big'), True) pub_key = self._priv_key.get_pubkey() self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes()) elif mode == MiniWalletMode.ADDRESS_OP_TRUE: - self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true() + internal_key = None if tag_name is None else hash256(tag_name.encode()) + self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true(internal_key) self._scriptPubKey = address_to_scriptpubkey(self._address) # When the pre-mined test framework chain is used, it contains coinbase diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 32b55813a8..690ab64c83 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -26,10 +26,16 @@ import sys import tempfile import re import logging -import unittest os.environ["REQUIRE_WALLET_TYPE_SET"] = "1" +# Minimum amount of space to run the tests. +MIN_FREE_SPACE = 1.1 * 1024 * 1024 * 1024 +# Additional space to run an extra job. +ADDITIONAL_SPACE_PER_JOB = 100 * 1024 * 1024 +# Minimum amount of space required for --nocleanup +MIN_NO_CLEANUP_SPACE = 12 * 1024 * 1024 * 1024 + # Formatting. Default colors to empty strings. DEFAULT, BOLD, GREEN, RED = ("", ""), ("", ""), ("", ""), ("", "") try: @@ -70,23 +76,7 @@ if platform.system() != 'Windows' or sys.getwindowsversion() >= (10, 0, 14393): TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 -# List of framework modules containing unit tests. Should be kept in sync with -# the output of `git grep unittest.TestCase ./test/functional/test_framework` -TEST_FRAMEWORK_MODULES = [ - "address", - "crypto.bip324_cipher", - "blocktools", - "crypto.chacha20", - "crypto.ellswift", - "key", - "messages", - "crypto.muhash", - "crypto.poly1305", - "crypto.ripemd160", - "script", - "segwit_addr", - "wallet_util", -] +TEST_FRAMEWORK_UNIT_TESTS = 'feature_framework_unit_tests.py' EXTENDED_SCRIPTS = [ # These tests are not run by default. @@ -200,6 +190,8 @@ BASE_SCRIPTS = [ 'wallet_txn_clone.py --segwit', 'rpc_getchaintips.py', 'rpc_misc.py', + 'p2p_1p1c_network.py', + 'p2p_opportunistic_1p1c.py', 'interface_rest.py', 'mempool_spend_coinbase.py', 'wallet_avoid_mixing_output_types.py --descriptors', @@ -255,6 +247,7 @@ BASE_SCRIPTS = [ 'wallet_keypool.py --descriptors', 'wallet_descriptor.py --descriptors', 'p2p_nobloomfilter_messages.py', + TEST_FRAMEWORK_UNIT_TESTS, 'p2p_filter.py', 'rpc_setban.py --v1transport', 'rpc_setban.py --v2transport', @@ -292,6 +285,7 @@ BASE_SCRIPTS = [ 'p2p_leak_tx.py --v1transport', 'p2p_leak_tx.py --v2transport', 'p2p_eviction.py', + 'p2p_outbound_eviction.py', 'p2p_ibd_stalling.py --v1transport', 'p2p_ibd_stalling.py --v2transport', 'p2p_net_deadlock.py --v1transport', @@ -440,7 +434,8 @@ def main(): parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") parser.add_argument('--failfast', '-F', action='store_true', help='stop execution after the first test failure') parser.add_argument('--filter', help='filter scripts to run by regular expression') - parser.add_argument('--skipunit', '-u', action='store_true', help='skip unit tests for the test framework') + parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error") args, unknown_args = parser.parse_known_args() @@ -535,6 +530,13 @@ def main(): subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h']) sys.exit(0) + # Warn if there is not enough space on tmpdir to run the tests with --nocleanup + if args.nocleanup: + if shutil.disk_usage(tmpdir).free < MIN_NO_CLEANUP_SPACE: + print(f"{BOLD[1]}WARNING!{BOLD[0]} There may be insufficient free space in {tmpdir} to run the functional test suite with --nocleanup. " + f"A minimum of {MIN_NO_CLEANUP_SPACE // (1024 * 1024 * 1024)} GB of free space is required.") + passon_args.append("--nocleanup") + check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci) check_script_prefixes() @@ -552,10 +554,9 @@ def main(): combined_logs_len=args.combinedlogslen, failfast=args.failfast, use_term_control=args.ansi, - skipunit=args.skipunit, ) -def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, skipunit=False): +def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control): args = args or [] # Warn if bitcoind is already running @@ -572,21 +573,17 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage= if os.path.isdir(cache_dir): print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) + # Warn if there is not enough space on the testing dir + min_space = MIN_FREE_SPACE + (jobs - 1) * ADDITIONAL_SPACE_PER_JOB + if shutil.disk_usage(tmpdir).free < min_space: + print(f"{BOLD[1]}WARNING!{BOLD[0]} There may be insufficient free space in {tmpdir} to run the Bitcoin functional test suite. " + f"Running the test suite with fewer than {min_space // (1024 * 1024)} MB of free space might cause tests to fail.") tests_dir = src_dir + '/test/functional/' # This allows `test_runner.py` to work from an out-of-source build directory using a symlink, # a hard link or a copy on any platform. See https://github.com/bitcoin/bitcoin/pull/27561. sys.path.append(tests_dir) - if not skipunit: - print("Running Unit Tests for Test Framework Modules") - test_framework_tests = unittest.TestSuite() - for module in TEST_FRAMEWORK_MODULES: - test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module))) - result = unittest.TextTestRunner(verbosity=1, failfast=True).run(test_framework_tests) - if not result.wasSuccessful(): - sys.exit("Early exiting after failure in TestFramework unit tests") - flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: @@ -650,6 +647,11 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage= logging.debug("Early exiting after test failure") break + if "[Errno 28] No space left on device" in stdout: + sys.exit(f"Early exiting after test failure due to insuffient free space in {tmpdir}\n" + f"Test execution data left in {tmpdir}.\n" + f"Additional storage is needed to execute testing.") + print_results(test_results, max_len_name, (int(time.time() - start_time))) if coverage: diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 56228d2bad..1b2b8ec1f3 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -461,10 +461,13 @@ class WalletTest(BitcoinTestFramework): assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid") # This will raise an exception for attempting to import a pubkey that isn't in hex - assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex") + assert_raises_rpc_error(-5, 'Pubkey "not hex" must be a hex string', self.nodes[0].importpubkey, "not hex") - # This will raise an exception for importing an invalid pubkey - assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f") + # This will raise exceptions for importing a pubkeys with invalid length / invalid coordinates + too_short_pubkey = "5361746f736869204e616b616d6f746f" + assert_raises_rpc_error(-5, f'Pubkey "{too_short_pubkey}" must have a length of either 33 or 65 bytes', self.nodes[0].importpubkey, too_short_pubkey) + not_on_curve_pubkey = bytes([4] + [0]*64).hex() # pubkey with coordinates (0,0) is not on curve + assert_raises_rpc_error(-5, f'Pubkey "{not_on_curve_pubkey}" must be cryptographically valid', self.nodes[0].importpubkey, not_on_curve_pubkey) # Bech32m addresses cannot be imported into a legacy wallet assert_raises_rpc_error(-5, "Bech32m addresses cannot be imported into legacy wallets", self.nodes[0].importaddress, "bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6") diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py index ff4648e638..71c883f166 100755 --- a/test/functional/wallet_fundrawtransaction.py +++ b/test/functional/wallet_fundrawtransaction.py @@ -1054,8 +1054,8 @@ class RawTransactionsTest(BitcoinTestFramework): assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.fundrawtransaction, raw_tx) # Error conditions - assert_raises_rpc_error(-5, "'not a pubkey' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]}) - assert_raises_rpc_error(-5, "'01234567890a0b0c0d0e0f' is not a valid public key", wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]}) + assert_raises_rpc_error(-5, 'Pubkey "not a pubkey" must be a hex string', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]}) + assert_raises_rpc_error(-5, 'Pubkey "01234567890a0b0c0d0e0f" must have a length of either 33 or 65 bytes', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]}) assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"scripts":["not a script"]}) assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, solving_data={"descriptors":["not a descriptor"]}) assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"]}]) diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index 558d63e85c..a635175e7c 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -11,6 +11,7 @@ import argparse import configparser import logging import os +import platform import random import subprocess import sys @@ -18,7 +19,7 @@ import sys def get_fuzz_env(*, target, source_dir): symbolizer = os.environ.get('LLVM_SYMBOLIZER_PATH', "/usr/bin/llvm-symbolizer") - return { + fuzz_env = { 'FUZZ': target, 'UBSAN_OPTIONS': f'suppressions={source_dir}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1', @@ -27,6 +28,10 @@ def get_fuzz_env(*, target, source_dir): 'ASAN_SYMBOLIZER_PATH':symbolizer, 'MSAN_SYMBOLIZER_PATH':symbolizer, } + if platform.system() == "Windows": + # On Windows, `env` option must include valid `SystemRoot`. + fuzz_env = {**fuzz_env, 'SystemRoot': os.environ.get('SystemRoot')} + return fuzz_env def main(): diff --git a/test/lint/README.md b/test/lint/README.md index 9d167bac72..13c2099808 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -30,13 +30,13 @@ Then you can use: | Lint test | Dependency | |-----------|:----------:| -| [`lint-python.py`](lint/lint-python.py) | [flake8](https://gitlab.com/pycqa/flake8) -| [`lint-python.py`](lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF) -| [`lint-python.py`](lint/lint-python.py) | [mypy](https://github.com/python/mypy) -| [`lint-python.py`](lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq) -| [`lint-python-dead-code.py`](lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture) -| [`lint-shell.py`](lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck) -| [`lint-spelling.py`](lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell) +| [`lint-python.py`](/test/lint/lint-python.py) | [flake8](https://github.com/PyCQA/flake8) +| [`lint-python.py`](/test/lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF) +| [`lint-python.py`](/test/lint/lint-python.py) | [mypy](https://github.com/python/mypy) +| [`lint-python.py`](/test/lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq) +| [`lint-python-dead-code.py`](/test/lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture) +| [`lint-shell.py`](/test/lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck) +| [`lint-spelling.py`](/test/lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell) In use versions and install instructions are available in the [CI setup](../../ci/lint/04_install.sh). diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs index e22e047e4b..d5dd98effe 100644 --- a/test/lint/test_runner/src/main.rs +++ b/test/lint/test_runner/src/main.rs @@ -137,9 +137,9 @@ fn lint_trailing_whitespace() -> LintResult { if trailing_space { Err(r#" ^^^ -Trailing whitespace is problematic, because git may warn about it, or editors may remove it by -default, forcing developers in the future to either undo the changes manually or spend time on -review. +Trailing whitespace (including Windows line endings [CR LF]) is problematic, because git may warn +about it, or editors may remove it by default, forcing developers in the future to either undo the +changes manually or spend time on review. Thus, it is best to remove the trailing space now. @@ -178,7 +178,6 @@ Please add any false positives, such as subtrees, or externally sourced files to fn lint_includes_build_config() -> LintResult { let config_path = "./src/config/bitcoin-config.h.in"; - let include_directive = "#include <config/bitcoin-config.h>"; if !Path::new(config_path).is_file() { assert!(Command::new("./autogen.sh") .status() @@ -235,7 +234,11 @@ fn lint_includes_build_config() -> LintResult { } else { "--files-with-matches" }, - include_directive, + if mode { + "^#include <config/bitcoin-config.h> // IWYU pragma: keep$" + } else { + "#include <config/bitcoin-config.h>" // Catch redundant includes with and without the IWYU pragma + }, "--", ]) .args(defines_files.lines()) @@ -256,6 +259,11 @@ even though bitcoin-config.h indicates that a faster feature is available and sh If you are unsure which symbol is used, you can find it with this command: git grep --perl-regexp '{}' -- file_name + +Make sure to include it with the IWYU pragma. Otherwise, IWYU may falsely instruct to remove the +include again. + +#include <config/bitcoin-config.h> // IWYU pragma: keep "#, defines_regex )); |