aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rwxr-xr-xtest/functional/feature_abortnode.py2
-rwxr-xr-xtest/functional/feature_addrman.py7
-rwxr-xr-xtest/functional/feature_asmap.py15
-rwxr-xr-xtest/functional/feature_assumeutxo.py163
-rwxr-xr-xtest/functional/feature_assumevalid.py2
-rwxr-xr-xtest/functional/feature_bip68_sequence.py6
-rwxr-xr-xtest/functional/feature_block.py4
-rwxr-xr-xtest/functional/feature_cltv.py3
-rwxr-xr-xtest/functional/feature_csv_activation.py3
-rwxr-xr-xtest/functional/feature_dersig.py3
-rwxr-xr-xtest/functional/feature_fee_estimation.py9
-rwxr-xr-xtest/functional/feature_framework_unit_tests.py50
-rwxr-xr-xtest/functional/feature_index_prune.py4
-rwxr-xr-xtest/functional/feature_maxtipage.py4
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py5
-rwxr-xr-xtest/functional/feature_proxy.py88
-rwxr-xr-xtest/functional/feature_reindex_readonly.py9
-rwxr-xr-xtest/functional/feature_taproot.py2
-rwxr-xr-xtest/functional/feature_utxo_set_hash.py4
-rwxr-xr-xtest/functional/feature_versionbits_warning.py8
-rwxr-xr-xtest/functional/interface_rest.py6
-rwxr-xr-xtest/functional/interface_zmq.py38
-rwxr-xr-xtest/functional/mempool_accept.py35
-rwxr-xr-xtest/functional/mempool_accept_v3.py604
-rwxr-xr-xtest/functional/mempool_limit.py54
-rwxr-xr-xtest/functional/mempool_package_limits.py2
-rwxr-xr-xtest/functional/mempool_packages.py3
-rwxr-xr-xtest/functional/mempool_sigoplimit.py9
-rwxr-xr-xtest/functional/mining_basic.py2
-rwxr-xr-xtest/functional/mocks/signer.py31
-rwxr-xr-xtest/functional/p2p_1p1c_network.py165
-rwxr-xr-xtest/functional/p2p_addrv2_relay.py11
-rwxr-xr-xtest/functional/p2p_block_sync.py2
-rwxr-xr-xtest/functional/p2p_compactblocks.py4
-rwxr-xr-xtest/functional/p2p_compactblocks_hb.py13
-rwxr-xr-xtest/functional/p2p_disconnect_ban.py8
-rwxr-xr-xtest/functional/p2p_feefilter.py6
-rwxr-xr-xtest/functional/p2p_filter.py3
-rwxr-xr-xtest/functional/p2p_handshake.py93
-rwxr-xr-xtest/functional/p2p_i2p_ports.py18
-rwxr-xr-xtest/functional/p2p_ibd_stalling.py3
-rwxr-xr-xtest/functional/p2p_initial_headers_sync.py15
-rwxr-xr-xtest/functional/p2p_invalid_block.py3
-rwxr-xr-xtest/functional/p2p_invalid_messages.py45
-rwxr-xr-xtest/functional/p2p_mutated_blocks.py116
-rwxr-xr-xtest/functional/p2p_node_network_limited.py79
-rwxr-xr-xtest/functional/p2p_opportunistic_1p1c.py414
-rwxr-xr-xtest/functional/p2p_permissions.py7
-rwxr-xr-xtest/functional/p2p_segwit.py15
-rwxr-xr-xtest/functional/p2p_sendheaders.py21
-rwxr-xr-xtest/functional/p2p_timeouts.py29
-rwxr-xr-xtest/functional/p2p_tx_download.py43
-rwxr-xr-xtest/functional/p2p_v2_earlykeyresponse.py2
-rwxr-xr-xtest/functional/p2p_v2_transport.py6
-rwxr-xr-xtest/functional/rpc_net.py260
-rwxr-xr-xtest/functional/rpc_packages.py90
-rwxr-xr-xtest/functional/rpc_psbt.py16
-rwxr-xr-xtest/functional/rpc_rawtransaction.py5
-rwxr-xr-xtest/functional/rpc_setban.py10
-rwxr-xr-xtest/functional/rpc_signrawtransactionwithkey.py2
-rwxr-xr-xtest/functional/rpc_uptime.py2
-rw-r--r--test/functional/test-shell.md8
-rw-r--r--test/functional/test_framework/blocktools.py2
-rw-r--r--test/functional/test_framework/crypto/bip324_cipher.py8
-rwxr-xr-xtest/functional/test_framework/messages.py1
-rw-r--r--test/functional/test_framework/netutil.py11
-rwxr-xr-xtest/functional/test_framework/p2p.py44
-rw-r--r--test/functional/test_framework/script.py20
-rwxr-xr-xtest/functional/test_framework/test_framework.py21
-rwxr-xr-xtest/functional/test_framework/test_node.py22
-rw-r--r--test/functional/test_framework/util.py76
-rw-r--r--test/functional/test_framework/wallet.py20
-rwxr-xr-xtest/functional/test_framework/wallet_util.py58
-rwxr-xr-xtest/functional/test_runner.py83
-rwxr-xr-xtest/functional/wallet_abandonconflict.py9
-rwxr-xr-xtest/functional/wallet_address_types.py5
-rwxr-xr-xtest/functional/wallet_assumeutxo.py2
-rwxr-xr-xtest/functional/wallet_avoid_mixing_output_types.py4
-rwxr-xr-xtest/functional/wallet_avoidreuse.py5
-rwxr-xr-xtest/functional/wallet_backup.py13
-rwxr-xr-xtest/functional/wallet_backwards_compatibility.py19
-rwxr-xr-xtest/functional/wallet_balance.py5
-rwxr-xr-xtest/functional/wallet_basic.py6
-rwxr-xr-xtest/functional/wallet_bumpfee.py3
-rwxr-xr-xtest/functional/wallet_conflicts.py301
-rwxr-xr-xtest/functional/wallet_createwalletdescriptor.py123
-rwxr-xr-xtest/functional/wallet_fundrawtransaction.py5
-rwxr-xr-xtest/functional/wallet_gethdkeys.py185
-rwxr-xr-xtest/functional/wallet_groups.py8
-rwxr-xr-xtest/functional/wallet_hd.py3
-rwxr-xr-xtest/functional/wallet_import_rescan.py4
-rwxr-xr-xtest/functional/wallet_importdescriptors.py7
-rwxr-xr-xtest/functional/wallet_importprunedfunds.py2
-rwxr-xr-xtest/functional/wallet_keypool.py16
-rwxr-xr-xtest/functional/wallet_keypool_topup.py24
-rwxr-xr-xtest/functional/wallet_listreceivedby.py2
-rwxr-xr-xtest/functional/wallet_listsinceblock.py2
-rwxr-xr-xtest/functional/wallet_listtransactions.py6
-rwxr-xr-xtest/functional/wallet_migration.py11
-rwxr-xr-xtest/functional/wallet_reorgsrestore.py1
-rwxr-xr-xtest/functional/wallet_send.py33
-rwxr-xr-xtest/functional/wallet_signer.py12
-rwxr-xr-xtest/functional/wallet_signrawtransactionwithwallet.py2
-rwxr-xr-xtest/fuzz/test_runner.py46
-rw-r--r--test/lint/README.md28
-rwxr-xr-xtest/lint/commit-script-check.sh5
-rwxr-xr-xtest/lint/lint-git-commit-check.py23
-rwxr-xr-xtest/lint/lint-include-guards.py8
-rwxr-xr-xtest/lint/lint-includes.py9
-rwxr-xr-xtest/lint/lint-spelling.py5
-rwxr-xr-xtest/lint/lint-whitespace.py136
-rw-r--r--test/lint/lint_ignore_dirs.py5
-rw-r--r--test/lint/test_runner/src/main.rs221
-rw-r--r--test/sanitizer_suppressions/ubsan4
114 files changed, 3595 insertions, 768 deletions
diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py
index 740d3b7f0e..01ba2834c4 100755
--- a/test/functional/feature_abortnode.py
+++ b/test/functional/feature_abortnode.py
@@ -36,7 +36,7 @@ class AbortNodeTest(BitcoinTestFramework):
# Check that node0 aborted
self.log.info("Waiting for crash")
- self.nodes[0].wait_until_stopped(timeout=5, expect_error=True, expected_stderr="Error: A fatal internal error occurred, see debug.log for details")
+ self.nodes[0].wait_until_stopped(timeout=5, expect_error=True, expected_stderr="Error: A fatal internal error occurred, see debug.log for details: Failed to disconnect block.")
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py
index a7ce864fde..95d33d62ea 100755
--- a/test/functional/feature_addrman.py
+++ b/test/functional/feature_addrman.py
@@ -156,12 +156,7 @@ class AddrmanTest(BitcoinTestFramework):
)
self.log.info("Check that missing addrman is recreated")
- self.stop_node(0)
- os.remove(peers_dat)
- with self.nodes[0].assert_debug_log([
- f'Creating peers.dat because the file was not found ("{peers_dat}")',
- ]):
- self.start_node(0)
+ self.restart_node(0, clear_addrman=True)
assert_equal(self.nodes[0].getnodeaddresses(), [])
diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py
index ae483fe449..024a8fa18c 100755
--- a/test/functional/feature_asmap.py
+++ b/test/functional/feature_asmap.py
@@ -39,11 +39,12 @@ def expected_messages(filename):
class AsmapTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [["-checkaddrman=1"]] # Do addrman checks on all operations.
+ # Do addrman checks on all operations and use deterministic addrman
+ self.extra_args = [["-checkaddrman=1", "-test=addrman"]]
def fill_addrman(self, node_id):
- """Add 1 tried address to the addrman, followed by 1 new address."""
- for addr, tried in [[0, True], [1, False]]:
+ """Add 2 tried addresses to the addrman, followed by 2 new addresses."""
+ for addr, tried in [[0, True], [1, True], [2, False], [3, False]]:
self.nodes[node_id].addpeeraddress(address=f"101.{addr}.0.0", tried=tried, port=8333)
def test_without_asmap_arg(self):
@@ -84,12 +85,12 @@ class AsmapTest(BitcoinTestFramework):
self.log.info("Test bitcoind -asmap restart with addrman containing new and tried entries")
self.stop_node(0)
shutil.copyfile(self.asmap_raw, self.default_asmap)
- self.start_node(0, ["-asmap", "-checkaddrman=1"])
+ self.start_node(0, ["-asmap", "-checkaddrman=1", "-test=addrman"])
self.fill_addrman(node_id=0)
- self.restart_node(0, ["-asmap", "-checkaddrman=1"])
+ self.restart_node(0, ["-asmap", "-checkaddrman=1", "-test=addrman"])
with self.node.assert_debug_log(
expected_msgs=[
- "CheckAddrman: new 1, tried 1, total 2 started",
+ "CheckAddrman: new 2, tried 2, total 4 started",
"CheckAddrman: completed",
]
):
@@ -114,7 +115,7 @@ class AsmapTest(BitcoinTestFramework):
def test_asmap_health_check(self):
self.log.info('Test bitcoind -asmap logs ASMap Health Check with basic stats')
shutil.copyfile(self.asmap_raw, self.default_asmap)
- msg = "ASMap Health Check: 2 clearnet peers are mapped to 1 ASNs with 0 peers being unmapped"
+ msg = "ASMap Health Check: 4 clearnet peers are mapped to 3 ASNs with 0 peers being unmapped"
with self.node.assert_debug_log(expected_msgs=[msg]):
self.start_node(0, extra_args=['-asmap'])
os.remove(self.default_asmap)
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py
index 528680f2ca..58a5442f4e 100755
--- a/test/functional/feature_assumeutxo.py
+++ b/test/functional/feature_assumeutxo.py
@@ -11,13 +11,8 @@ The assumeutxo value generated and used here is committed to in
## Possible test improvements
-- TODO: test what happens with -reindex and -reindex-chainstate before the
- snapshot is validated, and make sure it's deleted successfully.
-
Interesting test cases could be loading an assumeutxo snapshot file with:
-- TODO: Valid hash but invalid snapshot file (bad coin height or
- bad other serialization)
- TODO: Valid snapshot file, but referencing a snapshot block that turns out to be
invalid, or has an invalid parent
- TODO: Valid snapshot file and snapshot block, but the block is not on the
@@ -34,6 +29,7 @@ Interesting starting states could be loading a snapshot when the current chain t
"""
from shutil import rmtree
+from dataclasses import dataclass
from test_framework.messages import tx_from_hex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -60,7 +56,7 @@ class AssumeutxoTest(BitcoinTestFramework):
self.extra_args = [
[],
["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"],
- ["-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"],
+ ["-persistmempool=0","-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"],
]
def setup_network(self):
@@ -100,18 +96,29 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info(" - snapshot file with alternated UTXO data")
cases = [
- [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b"], # wrong outpoint hash
- [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad"], # wrong outpoint index
- [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8"], # wrong coin code VARINT((coinbase ? 1 : 0) | (height << 1))
- [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5"], # another wrong coin code
+ # (content, offset, wrong_hash, custom_message)
+ [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash
+ [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index
+ [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT
+ [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code
+ [b"\x84\x58", 36, None, "[snapshot] bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0
+ [b"\xCA\xD2\x8F\x5A", 41, None, "[snapshot] bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY
]
- for content, offset, wrong_hash in cases:
+ for content, offset, wrong_hash, custom_message in cases:
with open(bad_snapshot_path, "wb") as f:
f.write(valid_snapshot_contents[:(32 + 8 + offset)])
f.write(content)
f.write(valid_snapshot_contents[(32 + 8 + offset + len(content)):])
- expected_error(log_msg=f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}")
+
+ log_msg = custom_message if custom_message is not None else f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}"
+ expected_error(log_msg=log_msg)
+
+ def test_headers_not_synced(self, valid_snapshot_path):
+ for node in self.nodes[1:]:
+ assert_raises_rpc_error(-32603, "The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) must appear in the headers chain. Make sure all headers are syncing, and call this RPC again.",
+ node.loadtxoutset,
+ valid_snapshot_path)
def test_invalid_chainstate_scenarios(self):
self.log.info("Test different scenarios of invalid snapshot chainstate in datadir")
@@ -127,7 +134,7 @@ class AssumeutxoTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log([log_msg]):
self.nodes[0].assert_start_raises_init_error(expected_msg=error_msg)
- expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details"
+ expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details: Assumeutxo data not found for the given blockhash '7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a'."
error_details = f"Assumeutxo data not found for the given blockhash"
expected_error(log_msg=error_details, error_msg=expected_error_msg)
@@ -135,6 +142,25 @@ class AssumeutxoTest(BitcoinTestFramework):
rmtree(chainstate_snapshot_path)
self.start_node(0)
+ def test_invalid_mempool_state(self, dump_output_path):
+ self.log.info("Test bitcoind should fail when mempool not empty.")
+ node=self.nodes[2]
+ tx = MiniWallet(node).send_self_transfer(from_node=node)
+
+ assert tx['txid'] in node.getrawmempool()
+
+ # Attempt to load the snapshot on Node 2 and expect it to fail
+ with node.assert_debug_log(expected_msgs=["[snapshot] can't activate a snapshot when mempool not empty"]):
+ assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", node.loadtxoutset, dump_output_path)
+
+ self.restart_node(2, extra_args=self.extra_args[2])
+
+ def test_invalid_file_path(self):
+ self.log.info("Test bitcoind should fail when file path is invalid.")
+ node = self.nodes[0]
+ path = node.datadir_path / node.chain / "invalid" / "path"
+ assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path)
+
def run_test(self):
"""
Bring up two (disconnected) nodes, mine some new blocks on the first,
@@ -153,26 +179,28 @@ class AssumeutxoTest(BitcoinTestFramework):
for n in self.nodes:
n.setmocktime(n.getblockheader(n.getbestblockhash())['time'])
- self.sync_blocks()
-
# Generate a series of blocks that `n0` will have in the snapshot,
- # but that n1 doesn't yet see. In order for the snapshot to activate,
- # though, we have to ferry over the new headers to n1 so that it
- # isn't waiting forever to see the header of the snapshot's base block
- # while disconnected from n0.
+ # but that n1 and n2 don't yet see.
+ assert n0.getblockcount() == START_HEIGHT
+ blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)}
for i in range(100):
+ block_tx = 1
if i % 3 == 0:
self.mini_wallet.send_self_transfer(from_node=n0)
+ block_tx += 1
self.generate(n0, nblocks=1, sync_fun=self.no_op)
- newblock = n0.getblock(n0.getbestblockhash(), 0)
+ height = n0.getblockcount()
+ hash = n0.getbestblockhash()
+ blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx)
+ if i == 4:
+ # Create a stale block that forks off the main chain before the snapshot.
+ temp_invalid = n0.getbestblockhash()
+ n0.invalidateblock(temp_invalid)
+ stale_hash = self.generateblock(n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op)["hash"]
+ n0.invalidateblock(stale_hash)
+ n0.reconsiderblock(temp_invalid)
+ stale_block = n0.getblock(stale_hash, 0)
- # make n1 aware of the new header, but don't give it the block.
- n1.submitheader(newblock)
- n2.submitheader(newblock)
-
- # Ensure everyone is seeing the same headers.
- for n in self.nodes:
- assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT)
self.log.info("-- Testing assumeutxo + some indexes + pruning")
@@ -182,10 +210,27 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}")
dump_output = n0.dumptxoutset('utxos.dat')
+ self.log.info("Test loading snapshot when headers are not synced")
+ self.test_headers_not_synced(dump_output['path'])
+
+ # In order for the snapshot to activate, we have to ferry over the new
+ # headers to n1 and n2 so that they see the header of the snapshot's
+ # base block while disconnected from n0.
+ for i in range(1, 300):
+ block = n0.getblock(n0.getblockhash(i), 0)
+ # make n1 and n2 aware of the new header, but don't give them the
+ # block.
+ n1.submitheader(block)
+ n2.submitheader(block)
+
+ # Ensure everyone is seeing the same headers.
+ for n in self.nodes:
+ assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT)
+
assert_equal(
dump_output['txoutset_hash'],
"a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27")
- assert_equal(dump_output["nchaintx"], 334)
+ assert_equal(dump_output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx)
assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
# Mine more blocks on top of the snapshot that n1 hasn't yet seen. This
@@ -197,14 +242,39 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
+ self.test_invalid_mempool_state(dump_output['path'])
self.test_invalid_snapshot_scenarios(dump_output['path'])
self.test_invalid_chainstate_scenarios()
+ self.test_invalid_file_path()
self.log.info(f"Loading snapshot into second node from {dump_output['path']}")
loaded = n1.loadtxoutset(dump_output['path'])
assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
+ def check_tx_counts(final: bool) -> None:
+ """Check nTx and nChainTx intermediate values right after loading
+ the snapshot, and final values after the snapshot is validated."""
+ for height, block in blocks.items():
+ tx = n1.getblockheader(block.hash)["nTx"]
+ chain_tx = n1.getchaintxstats(nblocks=1, blockhash=block.hash)["txcount"]
+
+ # Intermediate nTx of the starting block should be set, but nTx of
+ # later blocks should be 0 before they are downloaded.
+ if final or height == START_HEIGHT:
+ assert_equal(tx, block.tx)
+ else:
+ assert_equal(tx, 0)
+
+ # Intermediate nChainTx of the starting block and snapshot block
+ # should be set, but others should be 0 until they are downloaded.
+ if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT):
+ assert_equal(chain_tx, block.chain_tx)
+ else:
+ assert_equal(chain_tx, 0)
+
+ check_tx_counts(final=False)
+
normal, snapshot = n1.getchainstates()["chainstates"]
assert_equal(normal['blocks'], START_HEIGHT)
assert_equal(normal.get('snapshot_blockhash'), None)
@@ -215,6 +285,15 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
+ self.log.info("Submit a stale block that forked off the chain before the snapshot")
+ # Normally a block like this would not be downloaded, but if it is
+ # submitted early before the background chain catches up to the fork
+ # point, it winds up in m_blocks_unlinked and triggers a corner case
+ # that previously crashed CheckBlockIndex.
+ n1.submitblock(stale_block)
+ n1.getchaintips()
+ n1.getblock(stale_hash)
+
self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool")
# spend the coinbase output of the first block that is not available on node1
spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1)
@@ -252,6 +331,16 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Restarted node before snapshot validation completed, reloading...")
self.restart_node(1, extra_args=self.extra_args[1])
+
+ # Send snapshot block to n1 out of order. This makes the test less
+ # realistic because normally the snapshot block is one of the last
+ # blocks downloaded, but its useful to test because it triggers more
+ # corner cases in ReceivedBlockTransactions() and CheckBlockIndex()
+ # setting and testing nChainTx values, and it exposed previous bugs.
+ snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT)
+ snapshot_block = n0.getblock(snapshot_hash, 0)
+ n1.submitblock(snapshot_block)
+
self.connect_nodes(0, 1)
self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})")
@@ -268,6 +357,8 @@ class AssumeutxoTest(BitcoinTestFramework):
}
self.wait_until(lambda: n1.getindexinfo() == completed_idx_state)
+ self.log.info("Re-check nTx and nChainTx values")
+ check_tx_counts(final=True)
for i in (0, 1):
n = self.nodes[i]
@@ -295,6 +386,17 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
+ for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']:
+ self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate")
+ self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]])
+ assert_equal(1, len(n2.getchainstates()["chainstates"]))
+ for i in range(1, 300):
+ block = n0.getblock(n0.getblockhash(i), 0)
+ n2.submitheader(block)
+ loaded = n2.loadtxoutset(dump_output['path'])
+ assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
+ assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
+
normal, snapshot = n2.getchainstates()['chainstates']
assert_equal(normal['blocks'], START_HEIGHT)
assert_equal(normal.get('snapshot_blockhash'), None)
@@ -342,6 +444,11 @@ class AssumeutxoTest(BitcoinTestFramework):
self.connect_nodes(0, 2)
self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT)
+@dataclass
+class Block:
+ hash: str
+ tx: int
+ chain_tx: int
if __name__ == '__main__':
AssumeutxoTest().main()
diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py
index 613d2eab14..982fa79915 100755
--- a/test/functional/feature_assumevalid.py
+++ b/test/functional/feature_assumevalid.py
@@ -159,7 +159,7 @@ class AssumeValidTest(BitcoinTestFramework):
for i in range(2202):
p2p1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
- p2p1.sync_with_ping(960)
+ p2p1.sync_with_ping(timeout=960)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
p2p2 = self.nodes[2].add_p2p_connection(BaseNode())
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index 894afffc79..8768d4040d 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -408,10 +408,8 @@ class BIP68Test(BitcoinTestFramework):
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
mini_wallet = MiniWallet(self.nodes[1])
- mini_wallet.rescan_utxos()
- tx = mini_wallet.create_self_transfer()["tx"]
- tx.nVersion = 2
- mini_wallet.sendrawtransaction(from_node=self.nodes[1], tx_hex=tx.serialize().hex())
+ mini_wallet.send_self_transfer(from_node=self.nodes[1], version=2)
+
if __name__ == '__main__':
BIP68Test().main()
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 58ef1e761d..8a95975184 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -1263,6 +1263,10 @@ class FullBlockTest(BitcoinTestFramework):
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
+ # Don't use v2transport for the large reorg, which is too slow with the unoptimized python ChaCha20 implementation
+ if self.options.v2transport:
+ self.nodes[0].disconnect_p2ps()
+ self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore(), supports_v2_p2p=False)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 8c45fb5a4d..fb3f662271 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -83,9 +83,10 @@ CLTV_HEIGHT = 111
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
f'-testactivationheight=cltv@{CLTV_HEIGHT}',
- '-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index 92e4187f3c..bc1f9e8f2f 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -95,8 +95,9 @@ class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
- '-whitelist=noban@127.0.0.1',
f'-testactivationheight=csv@{CSV_ACTIVATION_HEIGHT}',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index 44c12b2a59..035e7151ca 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -47,9 +47,10 @@ DERSIG_HEIGHT = 102
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
f'-testactivationheight=dersig@{DERSIG_HEIGHT}',
- '-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact log msg for testing
]]
self.setup_clean_chain = True
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 4f56d585d3..ffc87f8b8b 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -132,11 +132,12 @@ def make_tx(wallet, utxo, feerate):
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
- # Force fSendTrickle to true (via whitelist.noban)
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
- ["-whitelist=noban@127.0.0.1"],
- ["-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
- ["-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
+ [],
+ ["-blockmaxweight=68000"],
+ ["-blockmaxweight=32000"],
]
def setup_network(self):
diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py
new file mode 100755
index 0000000000..c9754e083c
--- /dev/null
+++ b/test/functional/feature_framework_unit_tests.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017-2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Framework unit tests
+
+Unit tests for the test framework.
+"""
+
+import sys
+import unittest
+
+from test_framework.test_framework import TEST_EXIT_PASSED, TEST_EXIT_FAILED
+
+# List of framework modules containing unit tests. Should be kept in sync with
+# the output of `git grep unittest.TestCase ./test/functional/test_framework`
+TEST_FRAMEWORK_MODULES = [
+ "address",
+ "crypto.bip324_cipher",
+ "blocktools",
+ "crypto.chacha20",
+ "crypto.ellswift",
+ "key",
+ "messages",
+ "crypto.muhash",
+ "crypto.poly1305",
+ "crypto.ripemd160",
+ "script",
+ "segwit_addr",
+ "wallet_util",
+]
+
+
+def run_unit_tests():
+ test_framework_tests = unittest.TestSuite()
+ for module in TEST_FRAMEWORK_MODULES:
+ test_framework_tests.addTest(
+ unittest.TestLoader().loadTestsFromName(f"test_framework.{module}")
+ )
+ result = unittest.TextTestRunner(stream=sys.stdout, verbosity=1, failfast=True).run(
+ test_framework_tests
+ )
+ if not result.wasSuccessful():
+ sys.exit(TEST_EXIT_FAILED)
+ sys.exit(TEST_EXIT_PASSED)
+
+
+if __name__ == "__main__":
+ run_unit_tests()
+
diff --git a/test/functional/feature_index_prune.py b/test/functional/feature_index_prune.py
index d6e802b399..66c0a4f615 100755
--- a/test/functional/feature_index_prune.py
+++ b/test/functional/feature_index_prune.py
@@ -31,7 +31,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
expected_stats = {
'coinstatsindex': {'synced': True, 'best_block_height': height}
}
- self.wait_until(lambda: self.nodes[1].getindexinfo() == expected_stats)
+ self.wait_until(lambda: self.nodes[1].getindexinfo() == expected_stats, timeout=150)
expected = {**expected_filter, **expected_stats}
self.wait_until(lambda: self.nodes[2].getindexinfo() == expected)
@@ -128,7 +128,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
self.log.info("make sure we get an init error when starting the nodes again with the indices")
filter_msg = "Error: basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"
stats_msg = "Error: coinstatsindex best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"
- end_msg = f"{os.linesep}Error: Failed to start indexes, shutting down.."
+ end_msg = f"{os.linesep}Error: A fatal internal error occurred, see debug.log for details: Failed to start indexes, shutting down.."
for i, msg in enumerate([filter_msg, stats_msg, filter_msg]):
self.nodes[i].assert_start_raises_init_error(extra_args=self.extra_args[i], expected_msg=msg+end_msg)
diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py
index 51f37ef1e0..a1774a5395 100755
--- a/test/functional/feature_maxtipage.py
+++ b/test/functional/feature_maxtipage.py
@@ -43,6 +43,10 @@ class MaxTipAgeTest(BitcoinTestFramework):
self.generate(node_miner, 1)
assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False)
+ # reset time to system time so we don't have a time offset with the ibd node the next
+ # time we connect to it, ensuring TimeOffsets::WarnIfOutOfSync() doesn't output to stderr
+ node_miner.setmocktime(0)
+
def run_test(self):
self.log.info("Test IBD with maximum tip age of 24 hours (default).")
self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False)
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 814eb21e6f..39cff7b738 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -81,7 +81,8 @@ class MaxUploadTest(BitcoinTestFramework):
p2p_conns = []
for _ in range(3):
- p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
+ # Don't use v2transport in this test (too slow with the unoptimized python ChaCha20 implementation)
+ p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn(), supports_v2_p2p=False))
# Now mine a big block
mine_large_block(self, self.wallet, self.nodes[0])
@@ -173,7 +174,7 @@ class MaxUploadTest(BitcoinTestFramework):
self.assert_uploadtarget_state(target_reached=False, serve_historical_blocks=False)
# Reconnect to self.nodes[0]
- peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn(), supports_v2_p2p=False)
# Sending mempool message shouldn't disconnect peer, as total limit isn't reached yet
peer.send_and_ping(msg_mempool())
diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py
index 662007d65e..7a6f639021 100755
--- a/test/functional/feature_proxy.py
+++ b/test/functional/feature_proxy.py
@@ -17,6 +17,7 @@ Test plan:
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
+ - proxy over unix domain sockets
- Create various proxies (as threads)
- Create nodes that connect to them
@@ -39,7 +40,9 @@ addnode connect to a CJDNS address
- Test passing unknown -onlynet
"""
+import os
import socket
+import tempfile
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
@@ -47,7 +50,7 @@ from test_framework.util import (
assert_equal,
p2p_port,
)
-from test_framework.netutil import test_ipv6_local
+from test_framework.netutil import test_ipv6_local, test_unix_socket
# Networks returned by RPC getpeerinfo.
NET_UNROUTABLE = "not_publicly_routable"
@@ -60,14 +63,17 @@ NET_CJDNS = "cjdns"
# Networks returned by RPC getnetworkinfo, defined in src/rpc/net.cpp::GetNetworksInfo()
NETWORKS = frozenset({NET_IPV4, NET_IPV6, NET_ONION, NET_I2P, NET_CJDNS})
+# Use the shortest temp path possible since UNIX sockets may have as little as 92-char limit
+socket_path = tempfile.NamedTemporaryFile().name
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
- self.num_nodes = 5
+ self.num_nodes = 7
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
+ self.have_unix_sockets = test_unix_socket()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
@@ -89,6 +95,15 @@ class ProxyTest(BitcoinTestFramework):
else:
self.log.warning("Testing without local IPv6 support")
+ if self.have_unix_sockets:
+ self.conf4 = Socks5Configuration()
+ self.conf4.af = socket.AF_UNIX
+ self.conf4.addr = socket_path
+ self.conf4.unauth = True
+ self.conf4.auth = True
+ else:
+ self.log.warning("Testing without local unix domain sockets support")
+
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
@@ -96,6 +111,9 @@ class ProxyTest(BitcoinTestFramework):
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
+ if self.have_unix_sockets:
+ self.serv4 = Socks5Server(self.conf4)
+ self.serv4.start()
# We will not try to connect to this.
self.i2p_sam = ('127.0.0.1', 7656)
@@ -109,10 +127,15 @@ class ProxyTest(BitcoinTestFramework):
['-listen', f'-proxy={self.conf2.addr[0]}:{self.conf2.addr[1]}','-proxyrandomize=1'],
[],
['-listen', f'-proxy={self.conf1.addr[0]}:{self.conf1.addr[1]}','-proxyrandomize=1',
- '-cjdnsreachable']
+ '-cjdnsreachable'],
+ [],
+ []
]
if self.have_ipv6:
args[3] = ['-listen', f'-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}','-proxyrandomize=0', '-noonion']
+ if self.have_unix_sockets:
+ args[5] = ['-listen', f'-proxy=unix:{socket_path}']
+ args[6] = ['-listen', f'-onion=unix:{socket_path}']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
@@ -124,7 +147,7 @@ class ProxyTest(BitcoinTestFramework):
def node_test(self, node, *, proxies, auth, test_onion, test_cjdns):
rv = []
addr = "15.61.23.23:1234"
- self.log.debug(f"Test: outgoing IPv4 connection through node for address {addr}")
+ self.log.debug(f"Test: outgoing IPv4 connection through node {node.index} for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[0].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -140,7 +163,7 @@ class ProxyTest(BitcoinTestFramework):
if self.have_ipv6:
addr = "[1233:3432:2434:2343:3234:2345:6546:4534]:5443"
- self.log.debug(f"Test: outgoing IPv6 connection through node for address {addr}")
+ self.log.debug(f"Test: outgoing IPv6 connection through node {node.index} for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[1].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -156,7 +179,7 @@ class ProxyTest(BitcoinTestFramework):
if test_onion:
addr = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:8333"
- self.log.debug(f"Test: outgoing onion connection through node for address {addr}")
+ self.log.debug(f"Test: outgoing onion connection through node {node.index} for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[2].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -171,7 +194,7 @@ class ProxyTest(BitcoinTestFramework):
if test_cjdns:
addr = "[fc00:1:2:3:4:5:6:7]:8888"
- self.log.debug(f"Test: outgoing CJDNS connection through node for address {addr}")
+ self.log.debug(f"Test: outgoing CJDNS connection through node {node.index} for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[1].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -185,7 +208,7 @@ class ProxyTest(BitcoinTestFramework):
self.network_test(node, addr, network=NET_CJDNS)
addr = "node.noumenon:8333"
- self.log.debug(f"Test: outgoing DNS name connection through node for address {addr}")
+ self.log.debug(f"Test: outgoing DNS name connection through node {node.index} for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[3].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -230,6 +253,12 @@ class ProxyTest(BitcoinTestFramework):
proxies=[self.serv1, self.serv1, self.serv1, self.serv1],
auth=False, test_onion=True, test_cjdns=True)
+ if self.have_unix_sockets:
+ self.node_test(self.nodes[5],
+ proxies=[self.serv4, self.serv4, self.serv4, self.serv4],
+ auth=True, test_onion=True, test_cjdns=False)
+
+
def networks_dict(d):
r = {}
for x in d['networks']:
@@ -315,6 +344,37 @@ class ProxyTest(BitcoinTestFramework):
assert_equal(n4['i2p']['reachable'], False)
assert_equal(n4['cjdns']['reachable'], True)
+ if self.have_unix_sockets:
+ n5 = networks_dict(nodes_network_info[5])
+ assert_equal(NETWORKS, n5.keys())
+ for net in NETWORKS:
+ if net == NET_I2P:
+ expected_proxy = ''
+ expected_randomize = False
+ else:
+ expected_proxy = 'unix:' + self.conf4.addr # no port number
+ expected_randomize = True
+ assert_equal(n5[net]['proxy'], expected_proxy)
+ assert_equal(n5[net]['proxy_randomize_credentials'], expected_randomize)
+ assert_equal(n5['onion']['reachable'], True)
+ assert_equal(n5['i2p']['reachable'], False)
+ assert_equal(n5['cjdns']['reachable'], False)
+
+ n6 = networks_dict(nodes_network_info[6])
+ assert_equal(NETWORKS, n6.keys())
+ for net in NETWORKS:
+ if net != NET_ONION:
+ expected_proxy = ''
+ expected_randomize = False
+ else:
+ expected_proxy = 'unix:' + self.conf4.addr # no port number
+ expected_randomize = True
+ assert_equal(n6[net]['proxy'], expected_proxy)
+ assert_equal(n6[net]['proxy_randomize_credentials'], expected_randomize)
+ assert_equal(n6['onion']['reachable'], True)
+ assert_equal(n6['i2p']['reachable'], False)
+ assert_equal(n6['cjdns']['reachable'], False)
+
self.stop_node(1)
self.log.info("Test passing invalid -proxy hostname raises expected init error")
@@ -383,6 +443,18 @@ class ProxyTest(BitcoinTestFramework):
msg = "Error: Unknown network specified in -onlynet: 'abc'"
self.nodes[1].assert_start_raises_init_error(expected_msg=msg)
+ self.log.info("Test passing too-long unix path to -proxy raises init error")
+ self.nodes[1].extra_args = [f"-proxy=unix:{'x' * 1000}"]
+ if self.have_unix_sockets:
+ msg = f"Error: Invalid -proxy address or hostname: 'unix:{'x' * 1000}'"
+ else:
+ # If unix sockets are not supported, the file path is incorrectly interpreted as host:port
+ msg = f"Error: Invalid port specified in -proxy: 'unix:{'x' * 1000}'"
+ self.nodes[1].assert_start_raises_init_error(expected_msg=msg)
+
+ # Cleanup socket path we established outside the individual test directory.
+ if self.have_unix_sockets:
+ os.unlink(socket_path)
if __name__ == '__main__':
ProxyTest().main()
diff --git a/test/functional/feature_reindex_readonly.py b/test/functional/feature_reindex_readonly.py
index dd99c3c4fa..25cff87a3b 100755
--- a/test/functional/feature_reindex_readonly.py
+++ b/test/functional/feature_reindex_readonly.py
@@ -24,6 +24,7 @@ class BlockstoreReindexTest(BitcoinTestFramework):
opreturn = "6a"
nulldata = fastprune_blockfile_size * "ff"
self.generateblock(self.nodes[0], output=f"raw({opreturn}{nulldata})", transactions=[])
+ block_count = self.nodes[0].getblockcount()
self.stop_node(0)
assert (self.nodes[0].chain_path / "blocks" / "blk00000.dat").exists()
@@ -73,10 +74,10 @@ class BlockstoreReindexTest(BitcoinTestFramework):
pass
if undo_immutable:
- self.log.info("Attempt to restart and reindex the node with the unwritable block file")
- with self.nodes[0].assert_debug_log(expected_msgs=['FlushStateToDisk', 'failed to open file'], unexpected_msgs=[]):
- self.nodes[0].assert_start_raises_init_error(extra_args=['-reindex', '-fastprune'],
- expected_msg="Error: A fatal internal error occurred, see debug.log for details")
+ self.log.debug("Attempt to restart and reindex the node with the unwritable block file")
+ with self.nodes[0].wait_for_debug_log([b"Reindexing finished"]):
+ self.start_node(0, extra_args=['-reindex', '-fastprune'])
+ assert block_count == self.nodes[0].getblockcount()
undo_immutable()
filename.chmod(0o777)
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index e85541d0ec..e7d65b4539 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -10,7 +10,6 @@ from test_framework.blocktools import (
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
- WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
@@ -20,6 +19,7 @@ from test_framework.messages import (
CTxOut,
SEQUENCE_FINAL,
tx_from_hex,
+ WITNESS_SCALE_FACTOR,
)
from test_framework.script import (
ANNEX_TAG,
diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py
index be154b411f..0bdcc6d83d 100755
--- a/test/functional/feature_utxo_set_hash.py
+++ b/test/functional/feature_utxo_set_hash.py
@@ -4,8 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test UTXO set hash value calculation in gettxoutsetinfo."""
-import struct
-
from test_framework.messages import (
CBlock,
COutPoint,
@@ -58,7 +56,7 @@ class UTXOSetHashTest(BitcoinTestFramework):
continue
data = COutPoint(int(tx.rehash(), 16), n).serialize()
- data += struct.pack("<i", height * 2 + coinbase)
+ data += (height * 2 + coinbase).to_bytes(4, "little")
data += tx_out.serialize()
muhash.insert(data)
diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py
index 073d3de812..2c330eb681 100755
--- a/test/functional/feature_versionbits_warning.py
+++ b/test/functional/feature_versionbits_warning.py
@@ -73,8 +73,8 @@ class VersionBitsWarningTest(BitcoinTestFramework):
self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
- assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
- assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
+ assert not VB_PATTERN.match(",".join(node.getmininginfo()["warnings"]))
+ assert not VB_PATTERN.match(",".join(node.getnetworkinfo()["warnings"]))
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION)
@@ -94,8 +94,8 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Generating one more block will be enough to generate an error.
self.generatetoaddress(node, 1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
- assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
- assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
+ assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getmininginfo()["warnings"])
+ assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getnetworkinfo()["warnings"])
# Check that the alert file shows the versionbits unknown rules warning
self.wait_until(lambda: self.versionbits_in_alert_file())
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
index b81eae2506..ae8d6b226d 100755
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -53,8 +53,7 @@ class RESTTest (BitcoinTestFramework):
self.num_nodes = 2
self.extra_args = [["-rest", "-blockfilterindex=1"], []]
# whitelist peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
+ self.noban_tx_relay = True
self.supports_cli = False
def test_rest_request(
@@ -337,6 +336,9 @@ class RESTTest (BitcoinTestFramework):
assert_greater_than(json_obj['bytes'], 300)
mempool_info = self.nodes[0].getmempoolinfo()
+ # pop unstable unbroadcastcount before check
+ for obj in [json_obj, mempool_info]:
+ obj.pop("unbroadcastcount")
assert_equal(json_obj, mempool_info)
# Check that there are our submitted transactions in the TX memory pool
diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py
index 2358dd4387..9f6f8919de 100755
--- a/test/functional/interface_zmq.py
+++ b/test/functional/interface_zmq.py
@@ -3,8 +3,11 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
+import os
import struct
+import tempfile
from time import sleep
+from io import BytesIO
from test_framework.address import (
ADDRESS_BCRT1_P2WSH_OP_TRUE,
@@ -17,6 +20,7 @@ from test_framework.blocktools import (
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import (
+ CBlock,
hash256,
tx_from_hex,
)
@@ -28,7 +32,7 @@ from test_framework.util import (
from test_framework.wallet import (
MiniWallet,
)
-from test_framework.netutil import test_ipv6_local
+from test_framework.netutil import test_ipv6_local, test_unix_socket
# Test may be skipped and not have zmq installed
@@ -104,9 +108,8 @@ class ZMQTestSetupBlock:
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
- # This test isn't testing txn relay/timing, so set whitelist on the
- # peers for instant txn relay. This speeds up the test run time 2-3x.
- self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.zmq_port_base = p2p_port(self.num_nodes + 1)
def skip_test_if_missing_module(self):
@@ -118,6 +121,10 @@ class ZMQTest (BitcoinTestFramework):
self.ctx = zmq.Context()
try:
self.test_basic()
+ if test_unix_socket():
+ self.test_basic(unix=True)
+ else:
+ self.log.info("Skipping ipc test, because UNIX sockets are not supported.")
self.test_sequence()
self.test_mempool_sync()
self.test_reorg()
@@ -138,8 +145,7 @@ class ZMQTest (BitcoinTestFramework):
socket.setsockopt(zmq.IPV6, 1)
subscribers.append(ZMQSubscriber(socket, topic.encode()))
- self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services] +
- self.extra_args[0])
+ self.restart_node(0, [f"-zmqpub{topic}={address.replace('ipc://', 'unix:')}" for topic, address in services])
for i, sub in enumerate(subscribers):
sub.socket.connect(services[i][1])
@@ -176,12 +182,19 @@ class ZMQTest (BitcoinTestFramework):
return subscribers
- def test_basic(self):
+ def test_basic(self, unix = False):
+ self.log.info(f"Running basic test with {'ipc' if unix else 'tcp'} protocol")
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = f"tcp://127.0.0.1:{self.zmq_port_base}"
+
+ if unix:
+ # Use the shortest temp path possible since paths may have as little as 92-char limit
+ socket_path = tempfile.NamedTemporaryFile().name
+ address = f"ipc://{socket_path}"
+
subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]])
hashblock = subs[0]
@@ -203,8 +216,13 @@ class ZMQTest (BitcoinTestFramework):
assert_equal(tx.hash, txid.hex())
# Should receive the generated raw block.
- block = rawblock.receive()
- assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
+ hex = rawblock.receive()
+ block = CBlock()
+ block.deserialize(BytesIO(hex))
+ assert block.is_valid()
+ assert_equal(block.vtx[0].hash, tx.hash)
+ assert_equal(len(block.vtx), 1)
+ assert_equal(genhashes[x], hash256_reversed(hex[:80]).hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
@@ -242,6 +260,8 @@ class ZMQTest (BitcoinTestFramework):
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
+ if unix:
+ os.unlink(socket_path)
def test_reorg(self):
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 8f3aec96a7..b00be5f4f0 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -28,6 +28,8 @@ from test_framework.script import (
OP_HASH160,
OP_RETURN,
OP_TRUE,
+ SIGHASH_ALL,
+ sign_input_legacy,
)
from test_framework.script_util import (
DUMMY_MIN_OP_RETURN_SCRIPT,
@@ -90,9 +92,23 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
txid_in_block = self.wallet.sendrawtransaction(from_node=node, tx_hex=raw_tx_in_block)
self.generate(node, 1)
self.mempool_size = 0
+ # Also check feerate. 1BTC/kvB fails
+ assert_raises_rpc_error(-8, "Fee rates larger than or equal to 1BTC/kvB are not accepted", lambda: self.check_mempool_result(
+ result_expected=None,
+ rawtxs=[raw_tx_in_block],
+ maxfeerate=1,
+ ))
+ # Check negative feerate
+ assert_raises_rpc_error(-3, "Amount out of range", lambda: self.check_mempool_result(
+ result_expected=None,
+ rawtxs=[raw_tx_in_block],
+ maxfeerate=-0.01,
+ ))
+ # ... 0.99 passes
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
+ maxfeerate=0.99,
)
self.log.info('A transaction not in the mempool')
@@ -372,5 +388,24 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
maxfeerate=0,
)
+ self.log.info('Spending a confirmed bare multisig is okay')
+ address = self.wallet.get_address()
+ tx = tx_from_hex(raw_tx_reference)
+ privkey, pubkey = generate_keypair()
+ tx.vout[0].scriptPubKey = keys_to_multisig_script([pubkey] * 3, k=1) # Some bare multisig script (1-of-3)
+ tx.rehash()
+ self.generateblock(node, address, [tx.serialize().hex()])
+ tx_spend = CTransaction()
+ tx_spend.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx_spend.vout.append(CTxOut(tx.vout[0].nValue - int(fee*COIN), script_to_p2wsh_script(CScript([OP_TRUE]))))
+ tx_spend.rehash()
+ sign_input_legacy(tx_spend, 0, tx.vout[0].scriptPubKey, privkey, sighash_type=SIGHASH_ALL)
+ tx_spend.vin[0].scriptSig = bytes(CScript([OP_0])) + tx_spend.vin[0].scriptSig
+ self.check_mempool_result(
+ result_expected=[{'txid': tx_spend.rehash(), 'allowed': True, 'vsize': tx_spend.get_vsize(), 'fees': { 'base': Decimal('0.00000700')}}],
+ rawtxs=[tx_spend.serialize().hex()],
+ maxfeerate=0,
+ )
+
if __name__ == '__main__':
MempoolAcceptanceTest().main()
diff --git a/test/functional/mempool_accept_v3.py b/test/functional/mempool_accept_v3.py
new file mode 100755
index 0000000000..8285b82c19
--- /dev/null
+++ b/test/functional/mempool_accept_v3.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+from decimal import Decimal
+
+from test_framework.messages import (
+ MAX_BIP125_RBF_SEQUENCE,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_greater_than,
+ assert_greater_than_or_equal,
+ assert_raises_rpc_error,
+)
+from test_framework.wallet import (
+ COIN,
+ DEFAULT_FEE,
+ MiniWallet,
+)
+
+MAX_REPLACEMENT_CANDIDATES = 100
+
+def cleanup(extra_args=None):
+ def decorator(func):
+ def wrapper(self):
+ try:
+ if extra_args is not None:
+ self.restart_node(0, extra_args=extra_args)
+ func(self)
+ finally:
+ # Clear mempool again after test
+ self.generate(self.nodes[0], 1)
+ if extra_args is not None:
+ self.restart_node(0)
+ return wrapper
+ return decorator
+
+class MempoolAcceptV3(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.extra_args = [["-acceptnonstdtxn=1"]]
+ self.setup_clean_chain = True
+
+ def check_mempool(self, txids):
+ """Assert exact contents of the node's mempool (by txid)."""
+ mempool_contents = self.nodes[0].getrawmempool()
+ assert_equal(len(txids), len(mempool_contents))
+ assert all([txid in txids for txid in mempool_contents])
+
+ @cleanup(extra_args=["-datacarriersize=1000", "-acceptnonstdtxn=1"])
+ def test_v3_acceptance(self):
+ node = self.nodes[0]
+ self.log.info("Test a child of a v3 transaction cannot be more than 1000vB")
+ tx_v3_parent_normal = self.wallet.send_self_transfer(from_node=node, version=3)
+ self.check_mempool([tx_v3_parent_normal["txid"]])
+ tx_v3_child_heavy = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_v3_parent_normal["new_utxo"],
+ target_weight=4004,
+ version=3
+ )
+ assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), 1000)
+ expected_error_child_heavy = f"v3-rule-violation, v3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big"
+ assert_raises_rpc_error(-26, expected_error_child_heavy, node.sendrawtransaction, tx_v3_child_heavy["hex"])
+ self.check_mempool([tx_v3_parent_normal["txid"]])
+ # tx has no descendants
+ assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 1)
+
+ self.log.info("Test that, during replacements, only the new transaction counts for v3 descendant limit")
+ tx_v3_child_almost_heavy = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=tx_v3_parent_normal["new_utxo"],
+ target_weight=3987,
+ version=3
+ )
+ assert_greater_than_or_equal(1000, tx_v3_child_almost_heavy["tx"].get_vsize())
+ self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy["txid"]])
+ assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)
+ tx_v3_child_almost_heavy_rbf = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE * 2,
+ utxo_to_spend=tx_v3_parent_normal["new_utxo"],
+ target_weight=3500,
+ version=3
+ )
+ assert_greater_than_or_equal(tx_v3_child_almost_heavy["tx"].get_vsize() + tx_v3_child_almost_heavy_rbf["tx"].get_vsize(), 1000)
+ self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy_rbf["txid"]])
+ assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_v3_replacement(self):
+ node = self.nodes[0]
+ self.log.info("Test v3 transactions may be replaced by v3 transactions")
+ utxo_v3_bip125 = self.wallet.get_utxo()
+ tx_v3_bip125 = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=utxo_v3_bip125,
+ sequence=MAX_BIP125_RBF_SEQUENCE,
+ version=3
+ )
+ self.check_mempool([tx_v3_bip125["txid"]])
+
+ tx_v3_bip125_rbf = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE * 2,
+ utxo_to_spend=utxo_v3_bip125,
+ version=3
+ )
+ self.check_mempool([tx_v3_bip125_rbf["txid"]])
+
+ self.log.info("Test v3 transactions may be replaced by V2 transactions")
+ tx_v3_bip125_rbf_v2 = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE * 3,
+ utxo_to_spend=utxo_v3_bip125,
+ version=2
+ )
+ self.check_mempool([tx_v3_bip125_rbf_v2["txid"]])
+
+ self.log.info("Test that replacements cannot cause violation of inherited v3")
+ utxo_v3_parent = self.wallet.get_utxo()
+ tx_v3_parent = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=utxo_v3_parent,
+ version=3
+ )
+ tx_v3_child = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=tx_v3_parent["new_utxo"],
+ version=3
+ )
+ self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]])
+
+ tx_v3_child_rbf_v2 = self.wallet.create_self_transfer(
+ fee_rate=DEFAULT_FEE * 2,
+ utxo_to_spend=tx_v3_parent["new_utxo"],
+ version=2
+ )
+ expected_error_v2_v3 = f"v3-rule-violation, non-v3 tx {tx_v3_child_rbf_v2['txid']} (wtxid={tx_v3_child_rbf_v2['wtxid']}) cannot spend from v3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})"
+ assert_raises_rpc_error(-26, expected_error_v2_v3, node.sendrawtransaction, tx_v3_child_rbf_v2["hex"])
+ self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]])
+
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_v3_bip125(self):
+ node = self.nodes[0]
+ self.log.info("Test v3 transactions that don't signal BIP125 are replaceable")
+ assert_equal(node.getmempoolinfo()["fullrbf"], False)
+ utxo_v3_no_bip125 = self.wallet.get_utxo()
+ tx_v3_no_bip125 = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=utxo_v3_no_bip125,
+ sequence=MAX_BIP125_RBF_SEQUENCE + 1,
+ version=3
+ )
+
+ self.check_mempool([tx_v3_no_bip125["txid"]])
+ assert not node.getmempoolentry(tx_v3_no_bip125["txid"])["bip125-replaceable"]
+ tx_v3_no_bip125_rbf = self.wallet.send_self_transfer(
+ from_node=node,
+ fee_rate=DEFAULT_FEE * 2,
+ utxo_to_spend=utxo_v3_no_bip125,
+ version=3
+ )
+ self.check_mempool([tx_v3_no_bip125_rbf["txid"]])
+
+ @cleanup(extra_args=["-datacarriersize=40000", "-acceptnonstdtxn=1"])
+ def test_v3_reorg(self):
+ node = self.nodes[0]
+ self.log.info("Test that, during a reorg, v3 rules are not enforced")
+ tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2)
+ tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3)
+ tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3)
+ self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]])
+
+ block = self.generate(node, 1)
+ self.check_mempool([])
+ tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2)
+ tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3)
+ tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_weight=5000, version=3)
+ assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], 1000)
+ self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
+ node.invalidateblock(block[0])
+ self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
+ # This is needed because generate() will create the exact same block again.
+ node.reconsiderblock(block[0])
+
+
+ @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
+ def test_nondefault_package_limits(self):
+ """
+ Max standard tx size + v3 rules imply the ancestor/descendant rules (at their default
+ values), but those checks must not be skipped. Ensure both sets of checks are done by
+ changing the ancestor/descendant limit configurations.
+ """
+ node = self.nodes[0]
+ self.log.info("Test that a decreased limitdescendantsize also applies to v3 child")
+ tx_v3_parent_large1 = self.wallet.send_self_transfer(from_node=node, target_weight=99900, version=3)
+ tx_v3_child_large1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent_large1["new_utxo"], version=3)
+ # Child is within v3 limits, but parent's descendant limit is exceeded
+ assert_greater_than(1000, tx_v3_child_large1["tx"].get_vsize())
+ assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds descendant size limit for tx {tx_v3_parent_large1['txid']}", node.sendrawtransaction, tx_v3_child_large1["hex"])
+ self.check_mempool([tx_v3_parent_large1["txid"]])
+ assert_equal(node.getmempoolentry(tx_v3_parent_large1["txid"])["descendantcount"], 1)
+ self.generate(node, 1)
+
+ self.log.info("Test that a decreased limitancestorsize also applies to v3 parent")
+ self.restart_node(0, extra_args=["-limitancestorsize=10", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
+ tx_v3_parent_large2 = self.wallet.send_self_transfer(from_node=node, target_weight=99900, version=3)
+ tx_v3_child_large2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent_large2["new_utxo"], version=3)
+ # Child is within v3 limits
+ assert_greater_than_or_equal(1000, tx_v3_child_large2["tx"].get_vsize())
+ assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])
+ self.check_mempool([tx_v3_parent_large2["txid"]])
+
+ @cleanup(extra_args=["-datacarriersize=1000", "-acceptnonstdtxn=1"])
+ def test_v3_ancestors_package(self):
+ self.log.info("Test that v3 ancestor limits are checked within the package")
+ node = self.nodes[0]
+ tx_v3_parent_normal = self.wallet.create_self_transfer(
+ fee_rate=0,
+ target_weight=4004,
+ version=3
+ )
+ tx_v3_parent_2_normal = self.wallet.create_self_transfer(
+ fee_rate=0,
+ target_weight=4004,
+ version=3
+ )
+ tx_v3_child_multiparent = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[tx_v3_parent_normal["new_utxo"], tx_v3_parent_2_normal["new_utxo"]],
+ fee_per_output=10000,
+ version=3
+ )
+ tx_v3_child_heavy = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[tx_v3_parent_normal["new_utxo"]],
+ target_weight=4004,
+ fee_per_output=10000,
+ version=3
+ )
+
+ self.check_mempool([])
+ result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_parent_2_normal["hex"], tx_v3_child_multiparent["hex"]])
+ assert_equal(result['package_msg'], f"v3-violation, tx {tx_v3_child_multiparent['txid']} (wtxid={tx_v3_child_multiparent['wtxid']}) would have too many ancestors")
+ self.check_mempool([])
+
+ self.check_mempool([])
+ result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_child_heavy["hex"]])
+ # tx_v3_child_heavy is heavy based on weight, not sigops.
+ assert_equal(result['package_msg'], f"v3-violation, v3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes")
+ self.check_mempool([])
+
+ tx_v3_parent = self.wallet.create_self_transfer(version=3)
+ tx_v3_child = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxo"], version=3)
+ tx_v3_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_child["new_utxo"], version=3)
+ result = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child["hex"], tx_v3_grandchild["hex"]])
+ assert all([txresult["package-error"] == f"v3-violation, tx {tx_v3_grandchild['txid']} (wtxid={tx_v3_grandchild['wtxid']}) would have too many ancestors" for txresult in result])
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_v3_ancestors_package_and_mempool(self):
+ """
+ A v3 transaction in a package cannot have 2 v3 parents.
+ Test that if we have a transaction graph A -> B -> C, where A, B, C are
+ all v3 transactions, that we cannot use submitpackage to get the
+ transactions all into the mempool.
+
+ Verify, in particular, that if A is already in the mempool, then
+ submitpackage(B, C) will fail.
+ """
+ node = self.nodes[0]
+ self.log.info("Test that v3 ancestor limits include transactions within the package and all in-mempool ancestors")
+ # This is our transaction "A":
+ tx_in_mempool = self.wallet.send_self_transfer(from_node=node, version=3)
+
+ # Verify that A is in the mempool
+ self.check_mempool([tx_in_mempool["txid"]])
+
+ # tx_0fee_parent is our transaction "B"; just create it.
+ tx_0fee_parent = self.wallet.create_self_transfer(utxo_to_spend=tx_in_mempool["new_utxo"], fee=0, fee_rate=0, version=3)
+
+ # tx_child_violator is our transaction "C"; create it:
+ tx_child_violator = self.wallet.create_self_transfer_multi(utxos_to_spend=[tx_0fee_parent["new_utxo"]], version=3)
+
+ # submitpackage(B, C) should fail
+ result = node.submitpackage([tx_0fee_parent["hex"], tx_child_violator["hex"]])
+ assert_equal(result['package_msg'], f"v3-violation, tx {tx_child_violator['txid']} (wtxid={tx_child_violator['wtxid']}) would have too many ancestors")
+ self.check_mempool([tx_in_mempool["txid"]])
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_sibling_eviction_package(self):
+ """
+ When a transaction has a mempool sibling, it may be eligible for sibling eviction.
+ However, this option is only available in single transaction acceptance. It doesn't work in
+ a multi-testmempoolaccept (where RBF is disabled) or when doing package CPFP.
+ """
+ self.log.info("Test v3 sibling eviction in submitpackage and multi-testmempoolaccept")
+ node = self.nodes[0]
+ # Add a parent + child to mempool
+ tx_mempool_parent = self.wallet.send_self_transfer_multi(
+ from_node=node,
+ utxos_to_spend=[self.wallet.get_utxo()],
+ num_outputs=2,
+ version=3
+ )
+ tx_mempool_sibling = self.wallet.send_self_transfer(
+ from_node=node,
+ utxo_to_spend=tx_mempool_parent["new_utxos"][0],
+ version=3
+ )
+ self.check_mempool([tx_mempool_parent["txid"], tx_mempool_sibling["txid"]])
+
+ tx_sibling_1 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_mempool_parent["new_utxos"][1],
+ version=3,
+ fee_rate=DEFAULT_FEE*100,
+ )
+ tx_has_mempool_uncle = self.wallet.create_self_transfer(utxo_to_spend=tx_sibling_1["new_utxo"], version=3)
+
+ tx_sibling_2 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_mempool_parent["new_utxos"][0],
+ version=3,
+ fee_rate=DEFAULT_FEE*200,
+ )
+
+ tx_sibling_3 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_mempool_parent["new_utxos"][1],
+ version=3,
+ fee_rate=0,
+ )
+ tx_bumps_parent_with_sibling = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_sibling_3["new_utxo"],
+ version=3,
+ fee_rate=DEFAULT_FEE*300,
+ )
+
+ # Fails with another non-related transaction via testmempoolaccept
+ tx_unrelated = self.wallet.create_self_transfer(version=3)
+ result_test_unrelated = node.testmempoolaccept([tx_sibling_1["hex"], tx_unrelated["hex"]])
+ assert_equal(result_test_unrelated[0]["reject-reason"], "v3-rule-violation")
+
+ # Fails in a package via testmempoolaccept
+ result_test_1p1c = node.testmempoolaccept([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]])
+ assert_equal(result_test_1p1c[0]["reject-reason"], "v3-rule-violation")
+
+ # Allowed when tx is submitted in a package and evaluated individually.
+ # Note that the child failed since it would be the 3rd generation.
+ result_package_indiv = node.submitpackage([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]])
+ self.check_mempool([tx_mempool_parent["txid"], tx_sibling_1["txid"]])
+ expected_error_gen3 = f"v3-rule-violation, tx {tx_has_mempool_uncle['txid']} (wtxid={tx_has_mempool_uncle['wtxid']}) would have too many ancestors"
+
+ assert_equal(result_package_indiv["tx-results"][tx_has_mempool_uncle['wtxid']]['error'], expected_error_gen3)
+
+ # Allowed when tx is submitted in a package with in-mempool parent (which is deduplicated).
+ node.submitpackage([tx_mempool_parent["hex"], tx_sibling_2["hex"]])
+ self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]])
+
+ # Child cannot pay for sibling eviction for parent, as it violates v3 topology limits
+ result_package_cpfp = node.submitpackage([tx_sibling_3["hex"], tx_bumps_parent_with_sibling["hex"]])
+ self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]])
+ expected_error_cpfp = f"v3-rule-violation, tx {tx_mempool_parent['txid']} (wtxid={tx_mempool_parent['wtxid']}) would exceed descendant count limit"
+
+ assert_equal(result_package_cpfp["tx-results"][tx_sibling_3['wtxid']]['error'], expected_error_cpfp)
+
+
+ @cleanup(extra_args=["-datacarriersize=1000", "-acceptnonstdtxn=1"])
+ def test_v3_package_inheritance(self):
+ self.log.info("Test that v3 inheritance is checked within package")
+ node = self.nodes[0]
+ tx_v3_parent = self.wallet.create_self_transfer(
+ fee_rate=0,
+ target_weight=4004,
+ version=3
+ )
+ tx_v2_child = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[tx_v3_parent["new_utxo"]],
+ fee_per_output=10000,
+ version=2
+ )
+ self.check_mempool([])
+ result = node.submitpackage([tx_v3_parent["hex"], tx_v2_child["hex"]])
+ assert_equal(result['package_msg'], f"v3-violation, non-v3 tx {tx_v2_child['txid']} (wtxid={tx_v2_child['wtxid']}) cannot spend from v3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})")
+ self.check_mempool([])
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_v3_in_testmempoolaccept(self):
+ node = self.nodes[0]
+
+ self.log.info("Test that v3 inheritance is accurately assessed in testmempoolaccept")
+ tx_v2 = self.wallet.create_self_transfer(version=2)
+ tx_v2_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=2)
+ tx_v3_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=3)
+ tx_v3 = self.wallet.create_self_transfer(version=3)
+ tx_v2_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=2)
+ tx_v3_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=3)
+
+ # testmempoolaccept paths don't require child-with-parents topology. Ensure that topology
+ # assumptions aren't made in inheritance checks.
+ test_accept_v2_and_v3 = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"]])
+ assert all([result["allowed"] for result in test_accept_v2_and_v3])
+
+ test_accept_v3_from_v2 = node.testmempoolaccept([tx_v2["hex"], tx_v3_from_v2["hex"]])
+ expected_error_v3_from_v2 = f"v3-violation, v3 tx {tx_v3_from_v2['txid']} (wtxid={tx_v3_from_v2['wtxid']}) cannot spend from non-v3 tx {tx_v2['txid']} (wtxid={tx_v2['wtxid']})"
+ assert all([result["package-error"] == expected_error_v3_from_v2 for result in test_accept_v3_from_v2])
+
+ test_accept_v2_from_v3 = node.testmempoolaccept([tx_v3["hex"], tx_v2_from_v3["hex"]])
+ expected_error_v2_from_v3 = f"v3-violation, non-v3 tx {tx_v2_from_v3['txid']} (wtxid={tx_v2_from_v3['wtxid']}) cannot spend from v3 tx {tx_v3['txid']} (wtxid={tx_v3['wtxid']})"
+ assert all([result["package-error"] == expected_error_v2_from_v3 for result in test_accept_v2_from_v3])
+
+ test_accept_pairs = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"], tx_v2_from_v2["hex"], tx_v3_from_v3["hex"]])
+ assert all([result["allowed"] for result in test_accept_pairs])
+
+ self.log.info("Test that descendant violations are caught in testmempoolaccept")
+ tx_v3_independent = self.wallet.create_self_transfer(version=3)
+ tx_v3_parent = self.wallet.create_self_transfer_multi(num_outputs=2, version=3)
+ tx_v3_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][0], version=3)
+ tx_v3_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][1], version=3)
+ test_accept_2children = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"]])
+ expected_error_2children = f"v3-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit"
+ assert all([result["package-error"] == expected_error_2children for result in test_accept_2children])
+
+ # Extra v3 transaction does not get incorrectly marked as extra descendant
+ test_accept_1child_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_independent["hex"]])
+ assert all([result["allowed"] for result in test_accept_1child_with_exra])
+
+ # Extra v3 transaction does not make us ignore the extra descendant
+ test_accept_2children_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"], tx_v3_independent["hex"]])
+ expected_error_extra = f"v3-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit"
+ assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_exra])
+ # Same result if the parent is already in mempool
+ node.sendrawtransaction(tx_v3_parent["hex"])
+ test_accept_2children_with_in_mempool_parent = node.testmempoolaccept([tx_v3_child_1["hex"], tx_v3_child_2["hex"]])
+ assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_in_mempool_parent])
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_reorg_2child_rbf(self):
+ node = self.nodes[0]
+ self.log.info("Test that children of a v3 transaction can be replaced individually, even if there are multiple due to reorg")
+
+ ancestor_tx = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3)
+ self.check_mempool([ancestor_tx["txid"]])
+
+ block = self.generate(node, 1)[0]
+ self.check_mempool([])
+
+ child_1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0])
+ child_2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][1])
+ self.check_mempool([child_1["txid"], child_2["txid"]])
+
+ self.generate(node, 1)
+ self.check_mempool([])
+
+ # Create a reorg, causing ancestor_tx to exceed the 1-child limit
+ node.invalidateblock(block)
+ self.check_mempool([ancestor_tx["txid"], child_1["txid"], child_2["txid"]])
+ assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3)
+
+ # Create a replacement of child_1. It does not conflict with child_2.
+ child_1_conflict = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0], fee_rate=Decimal("0.01"))
+
+ # Ensure child_1 and child_1_conflict are different transactions
+ assert (child_1_conflict["txid"] != child_1["txid"])
+ self.check_mempool([ancestor_tx["txid"], child_1_conflict["txid"], child_2["txid"]])
+ assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3)
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_v3_sibling_eviction(self):
+ self.log.info("Test sibling eviction for v3")
+ node = self.nodes[0]
+ tx_v3_parent = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3)
+ # This is the sibling to replace
+ tx_v3_child_1 = self.wallet.send_self_transfer(
+ from_node=node, utxo_to_spend=tx_v3_parent["new_utxos"][0], fee_rate=DEFAULT_FEE * 2, version=3
+ )
+ assert tx_v3_child_1["txid"] in node.getrawmempool()
+
+ self.log.info("Test tx must be higher feerate than sibling to evict it")
+ tx_v3_child_2_rule6 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE, version=3
+ )
+ rule6_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule6['txid']}; new feerate"
+ assert_raises_rpc_error(-26, rule6_str, node.sendrawtransaction, tx_v3_child_2_rule6["hex"])
+ self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']])
+
+ self.log.info("Test tx must meet absolute fee rules to evict sibling")
+ tx_v3_child_2_rule4 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=2 * DEFAULT_FEE + Decimal("0.00000001"), version=3
+ )
+ rule4_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule4['txid']}, not enough additional fees to relay"
+ assert_raises_rpc_error(-26, rule4_str, node.sendrawtransaction, tx_v3_child_2_rule4["hex"])
+ self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']])
+
+ self.log.info("Test tx cannot cause more than 100 evictions including RBF and sibling eviction")
+ # First add 4 groups of 25 transactions.
+ utxos_for_conflict = []
+ txids_v2_100 = []
+ for _ in range(4):
+ confirmed_utxo = self.wallet.get_utxo(confirmed_only=True)
+ utxos_for_conflict.append(confirmed_utxo)
+ # 25 is within descendant limits
+ chain_length = int(MAX_REPLACEMENT_CANDIDATES / 4)
+ chain = self.wallet.create_self_transfer_chain(chain_length=chain_length, utxo_to_spend=confirmed_utxo)
+ for item in chain:
+ txids_v2_100.append(item["txid"])
+ node.sendrawtransaction(item["hex"])
+ self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]])
+
+ # Replacing 100 transactions is fine
+ tx_v3_replacement_only = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000)
+ # Override maxfeerate - it costs a lot to replace these 100 transactions.
+ assert node.testmempoolaccept([tx_v3_replacement_only["hex"]], maxfeerate=0)[0]["allowed"]
+ # Adding another one exceeds the limit.
+ utxos_for_conflict.append(tx_v3_parent["new_utxos"][1])
+ tx_v3_child_2_rule5 = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000, version=3)
+ rule5_str = f"too many potential replacements (including sibling eviction), rejecting replacement {tx_v3_child_2_rule5['txid']}; too many potential replacements (101 > 100)"
+ assert_raises_rpc_error(-26, rule5_str, node.sendrawtransaction, tx_v3_child_2_rule5["hex"])
+ self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]])
+
+ self.log.info("Test sibling eviction is successful if it meets all RBF rules")
+ tx_v3_child_2 = self.wallet.create_self_transfer(
+ utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE*10, version=3
+ )
+ node.sendrawtransaction(tx_v3_child_2["hex"])
+ self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_2["txid"]])
+
+ self.log.info("Test that it's possible to do a sibling eviction and RBF at the same time")
+ utxo_unrelated_conflict = self.wallet.get_utxo(confirmed_only=True)
+ tx_unrelated_replacee = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_unrelated_conflict)
+ assert tx_unrelated_replacee["txid"] in node.getrawmempool()
+
+ fee_to_beat = max(int(tx_v3_child_2["fee"] * COIN), int(tx_unrelated_replacee["fee"]*COIN))
+
+ tx_v3_child_3 = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat*2, version=3
+ )
+ node.sendrawtransaction(tx_v3_child_3["hex"])
+ self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]])
+
+ @cleanup(extra_args=["-acceptnonstdtxn=1"])
+ def test_reorg_sibling_eviction_1p2c(self):
+ node = self.nodes[0]
+ self.log.info("Test that sibling eviction is not allowed when multiple siblings exist")
+
+ tx_with_multi_children = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=3, version=3, confirmed_only=True)
+ self.check_mempool([tx_with_multi_children["txid"]])
+
+ block_to_disconnect = self.generate(node, 1)[0]
+ self.check_mempool([])
+
+ tx_with_sibling1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][0])
+ tx_with_sibling2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][1])
+ self.check_mempool([tx_with_sibling1["txid"], tx_with_sibling2["txid"]])
+
+ # Create a reorg, bringing tx_with_multi_children back into the mempool with a descendant count of 3.
+ node.invalidateblock(block_to_disconnect)
+ self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling1["txid"], tx_with_sibling2["txid"]])
+ assert_equal(node.getmempoolentry(tx_with_multi_children["txid"])["descendantcount"], 3)
+
+ # Sibling eviction is not allowed because there are two siblings
+ tx_with_sibling3 = self.wallet.create_self_transfer(
+ version=3,
+ utxo_to_spend=tx_with_multi_children["new_utxos"][2],
+ fee_rate=DEFAULT_FEE*50
+ )
+ expected_error_2siblings = f"v3-rule-violation, tx {tx_with_multi_children['txid']} (wtxid={tx_with_multi_children['wtxid']}) would exceed descendant count limit"
+ assert_raises_rpc_error(-26, expected_error_2siblings, node.sendrawtransaction, tx_with_sibling3["hex"])
+
+ # However, an RBF (with conflicting inputs) is possible even if the resulting cluster size exceeds 2
+ tx_with_sibling3_rbf = self.wallet.send_self_transfer(
+ from_node=node,
+ version=3,
+ utxo_to_spend=tx_with_multi_children["new_utxos"][0],
+ fee_rate=DEFAULT_FEE*50
+ )
+ self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling3_rbf["txid"], tx_with_sibling2["txid"]])
+
+
+ def run_test(self):
+ self.log.info("Generate blocks to create UTXOs")
+ node = self.nodes[0]
+ self.wallet = MiniWallet(node)
+ self.generate(self.wallet, 120)
+ self.test_v3_acceptance()
+ self.test_v3_replacement()
+ self.test_v3_bip125()
+ self.test_v3_reorg()
+ self.test_nondefault_package_limits()
+ self.test_v3_ancestors_package()
+ self.test_v3_ancestors_package_and_mempool()
+ self.test_sibling_eviction_package()
+ self.test_v3_package_inheritance()
+ self.test_v3_in_testmempoolaccept()
+ self.test_reorg_2child_rbf()
+ self.test_v3_sibling_eviction()
+ self.test_reorg_sibling_eviction_1p2c()
+
+
+if __name__ == "__main__":
+ MempoolAcceptV3().main()
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index 6215610c31..e8a568f7ab 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -6,7 +6,6 @@
from decimal import Decimal
-from test_framework.blocktools import COINBASE_MATURITY
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -14,8 +13,7 @@ from test_framework.util import (
assert_fee_amount,
assert_greater_than,
assert_raises_rpc_error,
- create_lots_of_big_transactions,
- gen_return_txouts,
+ fill_mempool,
)
from test_framework.wallet import (
COIN,
@@ -34,50 +32,6 @@ class MempoolLimitTest(BitcoinTestFramework):
]]
self.supports_cli = False
- def fill_mempool(self):
- """Fill mempool until eviction."""
- self.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises")
- txouts = gen_return_txouts()
- node = self.nodes[0]
- miniwallet = self.wallet
- relayfee = node.getnetworkinfo()['relayfee']
-
- tx_batch_size = 1
- num_of_batches = 75
- # Generate UTXOs to flood the mempool
- # 1 to create a tx initially that will be evicted from the mempool later
- # 75 transactions each with a fee rate higher than the previous one
- # And 1 more to verify that this tx does not get added to the mempool with a fee rate less than the mempoolminfee
- # And 2 more for the package cpfp test
- self.generate(miniwallet, 1 + (num_of_batches * tx_batch_size))
-
- # Mine 99 blocks so that the UTXOs are allowed to be spent
- self.generate(node, COINBASE_MATURITY - 1)
-
- self.log.debug("Create a mempool tx that will be evicted")
- tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, fee_rate=relayfee)["txid"]
-
- # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool
- # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB)
- # by 130 should result in a fee that corresponds to 2x of that fee rate
- base_fee = relayfee * 130
-
- self.log.debug("Fill up the mempool with txs with higher fee rate")
- with node.assert_debug_log(["rolling minimum fee bumped"]):
- for batch_of_txid in range(num_of_batches):
- fee = (batch_of_txid + 1) * base_fee
- create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts)
-
- self.log.debug("The tx should be evicted by now")
- # The number of transactions created should be greater than the ones present in the mempool
- assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool()))
- # Initial tx created should not be present in the mempool anymore as it had a lower fee rate
- assert tx_to_be_evicted_id not in node.getrawmempool()
-
- self.log.debug("Check that mempoolminfee is larger than minrelaytxfee")
- assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
- assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
-
def test_rbf_carveout_disallowed(self):
node = self.nodes[0]
@@ -139,7 +93,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- self.fill_mempool()
+ fill_mempool(self, node, self.wallet)
current_info = node.getmempoolinfo()
mempoolmin_feerate = current_info["mempoolminfee"]
@@ -229,7 +183,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- self.fill_mempool()
+ fill_mempool(self, node, self.wallet)
current_info = node.getmempoolinfo()
mempoolmin_feerate = current_info["mempoolminfee"]
@@ -303,7 +257,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- self.fill_mempool()
+ fill_mempool(self, node, self.wallet)
# Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool
self.log.info('Create a mempool tx that will not pass mempoolminfee')
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
index 81451bf2a5..2a64597511 100755
--- a/test/functional/mempool_package_limits.py
+++ b/test/functional/mempool_package_limits.py
@@ -29,7 +29,7 @@ def check_package_limits(func):
testres_error_expected = node.testmempoolaccept(rawtxs=package_hex)
assert_equal(len(testres_error_expected), len(package_hex))
for txres in testres_error_expected:
- assert_equal(txres["package-error"], "package-mempool-limits")
+ assert "package-mempool-limits" in txres["package-error"]
# Clear mempool and check that the package passes now
self.generate(node, 1)
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 95f7939412..dcb66b2ca1 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -27,10 +27,11 @@ assert CUSTOM_DESCENDANT_LIMIT >= CUSTOM_ANCESTOR_LIMIT
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
[
"-maxorphantx=1000",
- "-whitelist=noban@127.0.0.1", # immediate tx relay
],
[
"-maxorphantx=1000",
diff --git a/test/functional/mempool_sigoplimit.py b/test/functional/mempool_sigoplimit.py
index 2e7850fb40..d3fb5f9119 100755
--- a/test/functional/mempool_sigoplimit.py
+++ b/test/functional/mempool_sigoplimit.py
@@ -39,7 +39,7 @@ from test_framework.wallet import MiniWallet
from test_framework.wallet_util import generate_keypair
DEFAULT_BYTES_PER_SIGOP = 20 # default setting
-
+MAX_PUBKEYS_PER_MULTISIG = 20
class BytesPerSigOpTest(BitcoinTestFramework):
def set_test_params(self):
@@ -159,13 +159,14 @@ class BytesPerSigOpTest(BitcoinTestFramework):
# Separately, the parent tx is ok
parent_individual_testres = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex()])[0]
assert parent_individual_testres["allowed"]
- # Multisig is counted as MAX_PUBKEYS_PER_MULTISIG = 20 sigops
- assert_equal(parent_individual_testres["vsize"], 5000 * 20)
+ max_multisig_vsize = MAX_PUBKEYS_PER_MULTISIG * 5000
+ assert_equal(parent_individual_testres["vsize"], max_multisig_vsize)
# But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked
# here, it would get further in validation and give too-long-mempool-chain error instead.
packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()])
- assert_equal([x["package-error"] for x in packet_test], ["package-mempool-limits", "package-mempool-limits"])
+ expected_package_error = f"package-mempool-limits, package size {2*max_multisig_vsize} exceeds ancestor size limit [limit: 101000]"
+ assert_equal([x["package-error"] for x in packet_test], [expected_package_error] * 2)
# When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits
res = self.nodes[0].submitpackage([tx_parent.serialize().hex(), tx_child.serialize().hex()])
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
index da796d3f70..5f2dde8eac 100755
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -308,7 +308,7 @@ class MiningTest(BitcoinTestFramework):
# Should ask for the block from a p2p node, if they announce the header as well:
peer = node.add_p2p_connection(P2PDataStore())
- peer.wait_for_getheaders(timeout=5) # Drop the first getheaders
+ peer.wait_for_getheaders(timeout=5, block_hash=block.hashPrevBlock)
peer.send_blocks_and_test(blocks=[block], node=node)
# Must be active now:
assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips()
diff --git a/test/functional/mocks/signer.py b/test/functional/mocks/signer.py
index 5f4fad6380..23d163aac3 100755
--- a/test/functional/mocks/signer.py
+++ b/test/functional/mocks/signer.py
@@ -25,35 +25,36 @@ def getdescriptors(args):
sys.stdout.write(json.dumps({
"receive": [
- "pkh([00000001/44'/1'/" + args.account + "']" + xpub + "/0/*)#vt6w3l3j",
- "sh(wpkh([00000001/49'/1'/" + args.account + "']" + xpub + "/0/*))#r0grqw5x",
- "wpkh([00000001/84'/1'/" + args.account + "']" + xpub + "/0/*)#x30uthjs",
- "tr([00000001/86'/1'/" + args.account + "']" + xpub + "/0/*)#sng9rd4t"
+ "pkh([00000001/44h/1h/" + args.account + "']" + xpub + "/0/*)#aqllu46s",
+ "sh(wpkh([00000001/49h/1h/" + args.account + "']" + xpub + "/0/*))#5dh56mgg",
+ "wpkh([00000001/84h/1h/" + args.account + "']" + xpub + "/0/*)#h62dxaej",
+ "tr([00000001/86h/1h/" + args.account + "']" + xpub + "/0/*)#pcd5w87f"
],
"internal": [
- "pkh([00000001/44'/1'/" + args.account + "']" + xpub + "/1/*)#all0v2p2",
- "sh(wpkh([00000001/49'/1'/" + args.account + "']" + xpub + "/1/*))#kwx4c3pe",
- "wpkh([00000001/84'/1'/" + args.account + "']" + xpub + "/1/*)#h92akzzg",
- "tr([00000001/86'/1'/" + args.account + "']" + xpub + "/1/*)#p8dy7c9n"
+ "pkh([00000001/44h/1h/" + args.account + "']" + xpub + "/1/*)#v567pq2g",
+ "sh(wpkh([00000001/49h/1h/" + args.account + "']" + xpub + "/1/*))#pvezzyah",
+ "wpkh([00000001/84h/1h/" + args.account + "']" + xpub + "/1/*)#xw0vmgf2",
+ "tr([00000001/86h/1h/" + args.account + "']" + xpub + "/1/*)#svg4njw3"
]
}))
def displayaddress(args):
- # Several descriptor formats are acceptable, so allowing for potential
- # changes to InferDescriptor:
if args.fingerprint != "00000001":
return sys.stdout.write(json.dumps({"error": "Unexpected fingerprint", "fingerprint": args.fingerprint}))
- expected_desc = [
- "wpkh([00000001/84'/1'/0'/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#0yneg42r",
- "tr([00000001/86'/1'/0'/0/0]c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#4vdj9jqk",
- ]
+ expected_desc = {
+ "wpkh([00000001/84h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#3te6hhy7": "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g",
+ "sh(wpkh([00000001/49h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7))#kz9y5w82": "2N2gQKzjUe47gM8p1JZxaAkTcoHPXV6YyVp",
+ "pkh([00000001/44h/1h/0h/0/0]02c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#q3pqd8wh": "n1LKejAadN6hg2FrBXoU1KrwX4uK16mco9",
+ "tr([00000001/86h/1h/0h/0/0]c97dc3f4420402e01a113984311bf4a1b8de376cac0bdcfaf1b3ac81f13433c7)#puqqa90m": "tb1phw4cgpt6cd30kz9k4wkpwm872cdvhss29jga2xpmftelhqll62mscq0k4g",
+ "wpkh([00000001/84h/1h/0h/0/1]03a20a46308be0b8ded6dff0a22b10b4245c587ccf23f3b4a303885be3a524f172)#aqpjv5xr": "wrong_address",
+ }
if args.desc not in expected_desc:
return sys.stdout.write(json.dumps({"error": "Unexpected descriptor", "desc": args.desc}))
- return sys.stdout.write(json.dumps({"address": "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g"}))
+ return sys.stdout.write(json.dumps({"address": expected_desc[args.desc]}))
def signtx(args):
if args.fingerprint != "00000001":
diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py
new file mode 100755
index 0000000000..e88c826962
--- /dev/null
+++ b/test/functional/p2p_1p1c_network.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024-present The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test that 1p1c package submission allows a 1p1c package to propagate in a "network" of nodes. Send
+various packages from different nodes on a network in which some nodes have already received some of
+the transactions (and submitted them to mempool, kept them as orphans or rejected them as
+too-low-feerate transactions). The packages should be received and accepted by all nodes.
+"""
+
+from decimal import Decimal
+from math import ceil
+
+from test_framework.messages import (
+ msg_tx,
+)
+from test_framework.p2p import (
+ P2PInterface,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_greater_than,
+ fill_mempool,
+)
+from test_framework.wallet import (
+ MiniWallet,
+ MiniWalletMode,
+)
+
+# 1sat/vB feerate denominated in BTC/KvB
+FEERATE_1SAT_VB = Decimal("0.00001000")
+
+class PackageRelayTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+ # hugely speeds up the test, as it involves multiple hops of tx relay.
+ self.noban_tx_relay = True
+ self.extra_args = [[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ ]] * self.num_nodes
+ self.supports_cli = False
+
+ def raise_network_minfee(self):
+ filler_wallet = MiniWallet(self.nodes[0])
+ fill_mempool(self, self.nodes[0], filler_wallet)
+
+ self.log.debug("Wait for the network to sync mempools")
+ self.sync_mempools()
+
+ self.log.debug("Check that all nodes' mempool minimum feerates are above min relay feerate")
+ for node in self.nodes:
+ assert_equal(node.getmempoolinfo()['minrelaytxfee'], FEERATE_1SAT_VB)
+ assert_greater_than(node.getmempoolinfo()['mempoolminfee'], FEERATE_1SAT_VB)
+
+ def create_basic_1p1c(self, wallet):
+ low_fee_parent = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, confirmed_only=True)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+ package_hex_basic = [low_fee_parent["hex"], high_fee_child["hex"]]
+ return package_hex_basic, low_fee_parent["tx"], high_fee_child["tx"]
+
+ def create_package_2outs(self, wallet):
+ # First create a tester tx to see the vsize, and then adjust the fees
+ utxo_for_2outs = wallet.get_utxo(confirmed_only=True)
+
+ low_fee_parent_2outs_tester = wallet.create_self_transfer_multi(
+ utxos_to_spend=[utxo_for_2outs],
+ num_outputs=2,
+ )
+
+ # Target 1sat/vB so the number of satoshis is equal to the vsize.
+ # Round up. The goal is to be between min relay feerate and mempool min feerate.
+ fee_2outs = ceil(low_fee_parent_2outs_tester["tx"].get_vsize() / 2)
+
+ low_fee_parent_2outs = wallet.create_self_transfer_multi(
+ utxos_to_spend=[utxo_for_2outs],
+ num_outputs=2,
+ fee_per_output=fee_2outs,
+ )
+
+ # Now create the child
+ high_fee_child_2outs = wallet.create_self_transfer_multi(
+ utxos_to_spend=low_fee_parent_2outs["new_utxos"][::-1],
+ fee_per_output=fee_2outs*100,
+ )
+ return [low_fee_parent_2outs["hex"], high_fee_child_2outs["hex"]], low_fee_parent_2outs["tx"], high_fee_child_2outs["tx"]
+
+ def create_package_2p1c(self, wallet):
+ parent1 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True)
+ parent2 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*20, confirmed_only=True)
+ child = wallet.create_self_transfer_multi(
+ utxos_to_spend=[parent1["new_utxo"], parent2["new_utxo"]],
+ fee_per_output=999*parent1["tx"].get_vsize(),
+ )
+ return [parent1["hex"], parent2["hex"], child["hex"]], parent1["tx"], parent2["tx"], child["tx"]
+
+ def create_packages(self):
+ # 1: Basic 1-parent-1-child package, parent 1sat/vB, child 999sat/vB
+ package_hex_1, parent_1, child_1 = self.create_basic_1p1c(self.wallet)
+
+ # 2: same as 1, parent's txid is the same as its wtxid.
+ package_hex_2, parent_2, child_2 = self.create_basic_1p1c(self.wallet_nonsegwit)
+
+ # 3: 2-parent-1-child package. Both parents are above mempool min feerate. No package submission happens.
+ # We require packages to be child-with-unconfirmed-parents and only allow 1-parent-1-child packages.
+ package_hex_3, parent_31, parent_32, child_3 = self.create_package_2p1c(self.wallet)
+
+ # 4: parent + child package where the child spends 2 different outputs from the parent.
+ package_hex_4, parent_4, child_4 = self.create_package_2outs(self.wallet)
+
+ # Assemble return results
+ packages_to_submit = [package_hex_1, package_hex_2, package_hex_3, package_hex_4]
+ # node0: sender
+ # node1: pre-received the children (orphan)
+ # node3: pre-received the parents (too low fee)
+ # All nodes receive parent_31 ahead of time.
+ txns_to_send = [
+ [],
+ [child_1, child_2, parent_31, child_3, child_4],
+ [parent_31],
+ [parent_1, parent_2, parent_31, parent_4]
+ ]
+
+ return packages_to_submit, txns_to_send
+
+ def run_test(self):
+ self.wallet = MiniWallet(self.nodes[1])
+ self.wallet_nonsegwit = MiniWallet(self.nodes[2], mode=MiniWalletMode.RAW_P2PK)
+ self.generate(self.wallet_nonsegwit, 10)
+ self.generate(self.wallet, 120)
+
+ self.log.info("Fill mempools with large transactions to raise mempool minimum feerates")
+ self.raise_network_minfee()
+
+ # Create the transactions.
+ self.wallet.rescan_utxos(include_mempool=True)
+ packages_to_submit, transactions_to_presend = self.create_packages()
+
+ self.peers = [self.nodes[i].add_p2p_connection(P2PInterface()) for i in range(self.num_nodes)]
+
+ self.log.info("Pre-send some transactions to nodes")
+ for (i, peer) in enumerate(self.peers):
+ for tx in transactions_to_presend[i]:
+ peer.send_and_ping(msg_tx(tx))
+ # This disconnect removes any sent orphans from the orphanage (EraseForPeer) and times
+ # out the in-flight requests. It is currently required for the test to pass right now,
+ # because the node will not reconsider an orphan tx and will not (re)try requesting
+ # orphan parents from multiple peers if the first one didn't respond.
+ # TODO: remove this in the future if the node tries orphan resolution with multiple peers.
+ peer.peer_disconnect()
+
+ self.log.info("Submit full packages to node0")
+ for package_hex in packages_to_submit:
+ submitpackage_result = self.nodes[0].submitpackage(package_hex)
+ assert_equal(submitpackage_result["package_msg"], "success")
+
+ self.log.info("Wait for mempools to sync")
+ self.sync_mempools(timeout=20)
+
+
+if __name__ == '__main__':
+ PackageRelayTest().main()
diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py
index f9a8c44be2..ea114e7d70 100755
--- a/test/functional/p2p_addrv2_relay.py
+++ b/test/functional/p2p_addrv2_relay.py
@@ -11,6 +11,7 @@ import time
from test_framework.messages import (
CAddress,
msg_addrv2,
+ msg_sendaddrv2,
)
from test_framework.p2p import (
P2PInterface,
@@ -75,6 +76,12 @@ class AddrTest(BitcoinTestFramework):
self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
+ self.log.info('Check disconnection when sending sendaddrv2 after verack')
+ conn = self.nodes[0].add_p2p_connection(P2PInterface())
+ with self.nodes[0].assert_debug_log(['sendaddrv2 received after verack from peer=0; disconnecting']):
+ conn.send_message(msg_sendaddrv2())
+ conn.wait_for_disconnect()
+
self.log.info('Create connection that sends addrv2 messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = msg_addrv2()
@@ -89,8 +96,8 @@ class AddrTest(BitcoinTestFramework):
msg.addrs = ADDRS
msg_size = calc_addrv2_msg_size(ADDRS)
with self.nodes[0].assert_debug_log([
- f'received: addrv2 ({msg_size} bytes) peer=0',
- f'sending addrv2 ({msg_size} bytes) peer=1',
+ f'received: addrv2 ({msg_size} bytes) peer=1',
+ f'sending addrv2 ({msg_size} bytes) peer=2',
]):
addr_source.send_and_ping(msg)
self.nodes[0].setmocktime(int(time.time()) + 30 * 60)
diff --git a/test/functional/p2p_block_sync.py b/test/functional/p2p_block_sync.py
index d821edc1b1..6c7f08364e 100755
--- a/test/functional/p2p_block_sync.py
+++ b/test/functional/p2p_block_sync.py
@@ -22,7 +22,7 @@ class BlockSyncTest(BitcoinTestFramework):
# node0 -> node1 -> node2
# So node1 has both an inbound and outbound peer.
# In our test, we will mine a block on node0, and ensure that it makes
- # to to both node1 and node2.
+ # to both node1 and node2.
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index d6c06fdeed..9e314db110 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -139,7 +139,7 @@ class TestP2PConn(P2PInterface):
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
- self.wait_for_disconnect(timeout)
+ self.wait_for_disconnect(timeout=timeout)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
@@ -387,7 +387,7 @@ class CompactBlocksTest(BitcoinTestFramework):
if announce == "inv":
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))
- test_node.wait_until(lambda: "getheaders" in test_node.last_message, timeout=30)
+ test_node.wait_for_getheaders(timeout=30)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
diff --git a/test/functional/p2p_compactblocks_hb.py b/test/functional/p2p_compactblocks_hb.py
index c985a1f98d..023b33ff6d 100755
--- a/test/functional/p2p_compactblocks_hb.py
+++ b/test/functional/p2p_compactblocks_hb.py
@@ -32,10 +32,15 @@ class CompactBlocksConnectionTest(BitcoinTestFramework):
self.connect_nodes(peer, 0)
self.generate(self.nodes[0], 1)
self.disconnect_nodes(peer, 0)
- status_to = [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)]
- status_from = [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)]
- assert_equal(status_to, status_from)
- return status_to
+
+ def status_to():
+ return [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)]
+
+ def status_from():
+ return [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)]
+
+ self.wait_until(lambda: status_to() == status_from())
+ return status_to()
def run_test(self):
self.log.info("Testing reserved high-bandwidth mode slot for outbound peer...")
diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py
index c389ff732f..678b006886 100755
--- a/test/functional/p2p_disconnect_ban.py
+++ b/test/functional/p2p_disconnect_ban.py
@@ -77,6 +77,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
+ self.nodes[1].setban("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "add")
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
@@ -85,13 +86,13 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("setban: test banning with absolute timestamp")
self.nodes[1].setban("192.168.0.2", "add", old_time + 120, True)
- # Move time forward by 3 seconds so the third ban has expired
+ # Move time forward by 3 seconds so the fourth ban has expired
self.nodes[1].setmocktime(old_time + 3)
- assert_equal(len(self.nodes[1].listbanned()), 4)
+ assert_equal(len(self.nodes[1].listbanned()), 5)
self.log.info("Test ban_duration and time_remaining")
for ban in self.nodes[1].listbanned():
- if ban["address"] in ["127.0.0.0/32", "127.0.0.0/24"]:
+ if ban["address"] in ["127.0.0.0/32", "127.0.0.0/24", "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"]:
assert_equal(ban["ban_duration"], 86400)
assert_equal(ban["time_remaining"], 86397)
elif ban["address"] == "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19":
@@ -108,6 +109,7 @@ class DisconnectBanTest(BitcoinTestFramework):
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("192.168.0.2/32", listAfterShutdown[2]['address'])
assert_equal("/19" in listAfterShutdown[3]['address'], True)
+ assert_equal("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", listAfterShutdown[4]['address'])
# Clear ban lists
self.nodes[1].clearbanned()
diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py
index 6b03cdf877..bcba534f9a 100755
--- a/test/functional/p2p_feefilter.py
+++ b/test/functional/p2p_feefilter.py
@@ -46,16 +46,16 @@ class TestP2PConn(P2PInterface):
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
- # grant noban permission to all peers to speed up tx relay / mempool sync
self.extra_args = [[
"-minrelaytxfee=0.00000100",
- "-mintxfee=0.00000100",
- "-whitelist=noban@127.0.0.1",
+ "-mintxfee=0.00000100"
]] * self.num_nodes
def run_test(self):
diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index 62d55cc101..7c8ed58e51 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -94,9 +94,10 @@ class P2PBloomFilter(P2PInterface):
class FilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
'-peerbloomfilters',
- '-whitelist=noban@127.0.0.1', # immediate tx relay
]]
def generatetoscriptpubkey(self, scriptpubkey):
diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py
new file mode 100755
index 0000000000..dd19fe9333
--- /dev/null
+++ b/test/functional/p2p_handshake.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test P2P behaviour during the handshake phase (VERSION, VERACK messages).
+"""
+import itertools
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.messages import (
+ NODE_NETWORK,
+ NODE_NETWORK_LIMITED,
+ NODE_NONE,
+ NODE_P2P_V2,
+ NODE_WITNESS,
+)
+from test_framework.p2p import P2PInterface
+
+
+# Desirable service flags for outbound non-pruned and pruned peers. Note that
+# the desirable service flags for pruned peers are dynamic and only apply if
+# 1. the peer's service flag NODE_NETWORK_LIMITED is set *and*
+# 2. the local chain is close to the tip (<24h)
+DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS
+DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS
+
+
+class P2PHandshakeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def add_outbound_connection(self, node, connection_type, services, wait_for_disconnect):
+ peer = node.add_outbound_p2p_connection(
+ P2PInterface(), p2p_idx=0, wait_for_disconnect=wait_for_disconnect,
+ connection_type=connection_type, services=services,
+ supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport)
+ if not wait_for_disconnect:
+ # check that connection is alive past the version handshake and disconnect manually
+ peer.sync_with_ping()
+ peer.peer_disconnect()
+ peer.wait_for_disconnect()
+ self.wait_until(lambda: len(node.getpeerinfo()) == 0)
+
+ def test_desirable_service_flags(self, node, service_flag_tests, desirable_service_flags, expect_disconnect):
+ """Check that connecting to a peer either fails or succeeds depending on its offered
+ service flags in the VERSION message. The test is exercised for all relevant
+ outbound connection types where the desirable service flags check is done."""
+ CONNECTION_TYPES = ["outbound-full-relay", "block-relay-only", "addr-fetch"]
+ for conn_type, services in itertools.product(CONNECTION_TYPES, service_flag_tests):
+ if self.options.v2transport:
+ services |= NODE_P2P_V2
+ expected_result = "disconnect" if expect_disconnect else "connect"
+ self.log.info(f' - services 0x{services:08x}, type "{conn_type}" [{expected_result}]')
+ if expect_disconnect:
+ assert (services & desirable_service_flags) != desirable_service_flags
+ expected_debug_log = f'does not offer the expected services ' \
+ f'({services:08x} offered, {desirable_service_flags:08x} expected)'
+ with node.assert_debug_log([expected_debug_log]):
+ self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=True)
+ else:
+ assert (services & desirable_service_flags) == desirable_service_flags
+ self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False)
+
+ def generate_at_mocktime(self, time):
+ self.nodes[0].setmocktime(time)
+ self.generate(self.nodes[0], 1)
+ self.nodes[0].setmocktime(0)
+
+ def run_test(self):
+ node = self.nodes[0]
+ self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)")
+ self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS],
+ DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True)
+ self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS],
+ DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False)
+
+ self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)")
+ self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail
+ self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS],
+ DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True)
+ self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed
+ self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS],
+ DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False)
+
+ self.log.info("Check that feeler connections get disconnected immediately")
+ with node.assert_debug_log([f"feeler connection completed"]):
+ self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True)
+
+
+if __name__ == '__main__':
+ P2PHandshakeTest().main()
diff --git a/test/functional/p2p_i2p_ports.py b/test/functional/p2p_i2p_ports.py
index 13188b9305..20dcb50a57 100755
--- a/test/functional/p2p_i2p_ports.py
+++ b/test/functional/p2p_i2p_ports.py
@@ -6,36 +6,28 @@
Test ports handling for I2P hosts
"""
-import re
from test_framework.test_framework import BitcoinTestFramework
+PROXY = "127.0.0.1:60000"
class I2PPorts(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
# The test assumes that an I2P SAM proxy is not listening here.
- self.extra_args = [["-i2psam=127.0.0.1:60000"]]
+ self.extra_args = [[f"-i2psam={PROXY}"]]
def run_test(self):
node = self.nodes[0]
self.log.info("Ensure we don't try to connect if port!=0")
addr = "zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:8333"
- raised = False
- try:
- with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]):
- node.addnode(node=addr, command="onetry")
- except AssertionError as e:
- raised = True
- if not re.search(r"Expected messages .* does not partially match log", str(e)):
- raise AssertionError(f"Assertion raised as expected, but with an unexpected message: {str(e)}")
- if not raised:
- raise AssertionError("Assertion should have been raised")
+ with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}, connection refused due to arbitrary port 8333"]):
+ node.addnode(node=addr, command="onetry")
self.log.info("Ensure we try to connect if port=0 and get an error due to missing I2P proxy")
addr = "h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0"
- with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]):
+ with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}: Cannot connect to {PROXY}"]):
node.addnode(node=addr, command="onetry")
diff --git a/test/functional/p2p_ibd_stalling.py b/test/functional/p2p_ibd_stalling.py
index 0eb37fa92f..830f374d63 100755
--- a/test/functional/p2p_ibd_stalling.py
+++ b/test/functional/p2p_ibd_stalling.py
@@ -80,7 +80,8 @@ class P2PIBDStallingTest(BitcoinTestFramework):
# Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
# returning the number of downloaded (but not connected) blocks.
- self.wait_until(lambda: self.total_bytes_recv_for_blocks() == 172761)
+ bytes_recv = 172761 if not self.options.v2transport else 169692
+ self.wait_until(lambda: self.total_bytes_recv_for_blocks() == bytes_recv)
self.all_sync_send_with_ping(peers)
# If there was a peer marked for stalling, it would get disconnected
diff --git a/test/functional/p2p_initial_headers_sync.py b/test/functional/p2p_initial_headers_sync.py
index e67c384da7..bc6e0fb355 100755
--- a/test/functional/p2p_initial_headers_sync.py
+++ b/test/functional/p2p_initial_headers_sync.py
@@ -38,9 +38,10 @@ class HeadersSyncTest(BitcoinTestFramework):
def run_test(self):
self.log.info("Adding a peer to node0")
peer1 = self.nodes[0].add_p2p_connection(P2PInterface())
+ best_block_hash = int(self.nodes[0].getbestblockhash(), 16)
# Wait for peer1 to receive a getheaders
- peer1.wait_for_getheaders()
+ peer1.wait_for_getheaders(block_hash=best_block_hash)
# An empty reply will clear the outstanding getheaders request,
# allowing additional getheaders requests to be sent to this peer in
# the future.
@@ -60,17 +61,12 @@ class HeadersSyncTest(BitcoinTestFramework):
assert "getheaders" not in peer2.last_message
assert "getheaders" not in peer3.last_message
- with p2p_lock:
- peer1.last_message.pop("getheaders", None)
-
self.log.info("Have all peers announce a new block")
self.announce_random_block(all_peers)
self.log.info("Check that peer1 receives a getheaders in response")
- peer1.wait_for_getheaders()
+ peer1.wait_for_getheaders(block_hash=best_block_hash)
peer1.send_message(msg_headers()) # Send empty response, see above
- with p2p_lock:
- peer1.last_message.pop("getheaders", None)
self.log.info("Check that exactly 1 of {peer2, peer3} received a getheaders in response")
count = 0
@@ -80,7 +76,6 @@ class HeadersSyncTest(BitcoinTestFramework):
if "getheaders" in p.last_message:
count += 1
peer_receiving_getheaders = p
- p.last_message.pop("getheaders", None)
p.send_message(msg_headers()) # Send empty response, see above
assert_equal(count, 1)
@@ -89,14 +84,14 @@ class HeadersSyncTest(BitcoinTestFramework):
self.announce_random_block(all_peers)
self.log.info("Check that peer1 receives a getheaders in response")
- peer1.wait_for_getheaders()
+ peer1.wait_for_getheaders(block_hash=best_block_hash)
self.log.info("Check that the remaining peer received a getheaders as well")
expected_peer = peer2
if peer2 == peer_receiving_getheaders:
expected_peer = peer3
- expected_peer.wait_for_getheaders()
+ expected_peer.wait_for_getheaders(block_hash=best_block_hash)
self.log.info("Success!")
diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py
index 806fd9c6cb..8ec62ae5ee 100755
--- a/test/functional/p2p_invalid_block.py
+++ b/test/functional/p2p_invalid_block.py
@@ -32,7 +32,8 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [["-whitelist=noban@127.0.0.1"]]
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
def run_test(self):
# Add p2p connection to node0
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 4916d36ab7..40a69936bc 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -109,6 +109,9 @@ class InvalidMessagesTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
def test_magic_bytes(self):
+ # Skip with v2, magic bytes are v1-specific
+ if self.options.v2transport:
+ return
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong MessageStart ffffffff received']):
@@ -120,6 +123,9 @@ class InvalidMessagesTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
+ # Skip with v2, the checksum is v1-specific
+ if self.options.v2transport:
+ return
self.log.info("Test message with invalid checksum logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong checksum (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
@@ -137,7 +143,11 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_size(self):
self.log.info("Test message with oversized payload disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['Header error: Size too large (badmsg, 4000001 bytes)']):
+ error_msg = (
+ ['V2 transport error: packet too large (4000014 bytes)'] if self.options.v2transport
+ else ['Header error: Size too large (badmsg, 4000001 bytes)']
+ )
+ with self.nodes[0].assert_debug_log(error_msg):
msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
conn.send_raw_message(msg)
@@ -147,15 +157,26 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_msgtype(self):
self.log.info("Test message with invalid message type logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['Header error: Invalid message type']):
+ if self.options.v2transport:
+ msgtype = 99 # not defined
msg = msg_unrecognized(str_data="d")
- msg = conn.build_message(msg)
- # Modify msgtype
- msg = msg[:7] + b'\x00' + msg[7 + 1:]
- conn.send_raw_message(msg)
- conn.sync_with_ping(timeout=1)
- # Check that traffic is accounted for (24 bytes header + 2 bytes payload)
- assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
+ contents = msgtype.to_bytes(1, 'big') + msg.serialize()
+ tmsg = conn.v2_state.v2_enc_packet(contents, ignore=False)
+ with self.nodes[0].assert_debug_log(['V2 transport error: invalid message type']):
+ conn.send_raw_message(tmsg)
+ conn.sync_with_ping(timeout=1)
+ # Check that traffic is accounted for (20 bytes plus 3 bytes contents)
+ assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 23)
+ else:
+ with self.nodes[0].assert_debug_log(['Header error: Invalid message type']):
+ msg = msg_unrecognized(str_data="d")
+ msg = conn.build_message(msg)
+ # Modify msgtype
+ msg = msg[:7] + b'\x00' + msg[7 + 1:]
+ conn.send_raw_message(msg)
+ conn.sync_with_ping(timeout=1)
+ # Check that traffic is accounted for (24 bytes header + 2 bytes payload)
+ assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
self.nodes[0].disconnect_p2ps()
def test_addrv2(self, label, required_log_messages, raw_addrv2):
@@ -306,8 +327,10 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
- conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- conn2 = self.nodes[0].add_p2p_connection(P2PDataStore())
+ # Don't use v2 here - the non-optimised encryption would take too long to encrypt
+ # the large messages
+ conn = self.nodes[0].add_p2p_connection(P2PDataStore(), supports_v2_p2p=False)
+ conn2 = self.nodes[0].add_p2p_connection(P2PDataStore(), supports_v2_p2p=False)
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MAX_PROTOCOL_MESSAGE_LENGTH
diff --git a/test/functional/p2p_mutated_blocks.py b/test/functional/p2p_mutated_blocks.py
new file mode 100755
index 0000000000..737edaf5bf
--- /dev/null
+++ b/test/functional/p2p_mutated_blocks.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+# Copyright (c) The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+"""
+Test that an attacker can't degrade compact block relay by sending unsolicited
+mutated blocks to clear in-flight blocktxn requests from other honest peers.
+"""
+
+from test_framework.p2p import P2PInterface
+from test_framework.messages import (
+ BlockTransactions,
+ msg_cmpctblock,
+ msg_block,
+ msg_blocktxn,
+ HeaderAndShortIDs,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.blocktools import (
+ COINBASE_MATURITY,
+ create_block,
+ add_witness_commitment,
+ NORMAL_GBT_REQUEST_PARAMS,
+)
+from test_framework.util import assert_equal
+from test_framework.wallet import MiniWallet
+import copy
+
+class MutatedBlocksTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [
+ [
+ "-testactivationheight=segwit@1", # causes unconnected headers/blocks to not have segwit considered deployed
+ ],
+ ]
+
+ def run_test(self):
+ self.wallet = MiniWallet(self.nodes[0])
+ self.generate(self.wallet, COINBASE_MATURITY)
+
+ honest_relayer = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay")
+ attacker = self.nodes[0].add_p2p_connection(P2PInterface())
+
+ # Create new block with two transactions (coinbase + 1 self-transfer).
+ # The self-transfer transaction is needed to trigger a compact block
+ # `getblocktxn` roundtrip.
+ tx = self.wallet.create_self_transfer()["tx"]
+ block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[tx])
+ add_witness_commitment(block)
+ block.solve()
+
+ # Create mutated version of the block by changing the transaction
+ # version on the self-transfer.
+ mutated_block = copy.deepcopy(block)
+ mutated_block.vtx[1].nVersion = 4
+
+ # Announce the new block via a compact block through the honest relayer
+ cmpctblock = HeaderAndShortIDs()
+ cmpctblock.initialize_from_block(block, use_witness=True)
+ honest_relayer.send_message(msg_cmpctblock(cmpctblock.to_p2p()))
+
+ # Wait for a `getblocktxn` that attempts to fetch the self-transfer
+ def self_transfer_requested():
+ if not honest_relayer.last_message.get('getblocktxn'):
+ return False
+
+ get_block_txn = honest_relayer.last_message['getblocktxn']
+ return get_block_txn.block_txn_request.blockhash == block.sha256 and \
+ get_block_txn.block_txn_request.indexes == [1]
+ honest_relayer.wait_until(self_transfer_requested, timeout=5)
+
+ # Block at height 101 should be the only one in flight from peer 0
+ peer_info_prior_to_attack = self.nodes[0].getpeerinfo()
+ assert_equal(peer_info_prior_to_attack[0]['id'], 0)
+ assert_equal([101], peer_info_prior_to_attack[0]["inflight"])
+
+ # Attempt to clear the honest relayer's download request by sending the
+ # mutated block (as the attacker).
+ with self.nodes[0].assert_debug_log(expected_msgs=["Block mutated: bad-txnmrklroot, hashMerkleRoot mismatch"]):
+ attacker.send_message(msg_block(mutated_block))
+ # Attacker should get disconnected for sending a mutated block
+ attacker.wait_for_disconnect(timeout=5)
+
+ # Block at height 101 should *still* be the only block in-flight from
+ # peer 0
+ peer_info_after_attack = self.nodes[0].getpeerinfo()
+ assert_equal(peer_info_after_attack[0]['id'], 0)
+ assert_equal([101], peer_info_after_attack[0]["inflight"])
+
+ # The honest relayer should be able to complete relaying the block by
+ # sending the blocktxn that was requested.
+ block_txn = msg_blocktxn()
+ block_txn.block_transactions = BlockTransactions(blockhash=block.sha256, transactions=[tx])
+ honest_relayer.send_and_ping(block_txn)
+ assert_equal(self.nodes[0].getbestblockhash(), block.hash)
+
+ # Check that unexpected-witness mutation check doesn't trigger on a header that doesn't connect to anything
+ assert_equal(len(self.nodes[0].getpeerinfo()), 1)
+ attacker = self.nodes[0].add_p2p_connection(P2PInterface())
+ block_missing_prev = copy.deepcopy(block)
+ block_missing_prev.hashPrevBlock = 123
+ block_missing_prev.solve()
+
+ # Attacker gets a DoS score of 10, not immediately disconnected, so we do it 10 times to get to 100
+ for _ in range(10):
+ assert_equal(len(self.nodes[0].getpeerinfo()), 2)
+ with self.nodes[0].assert_debug_log(expected_msgs=["AcceptBlock FAILED (prev-blk-not-found)"]):
+ attacker.send_message(msg_block(block_missing_prev))
+ attacker.wait_for_disconnect(timeout=5)
+
+
+if __name__ == '__main__':
+ MutatedBlocksTest().main()
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index 89c35e943b..8b63d8ee26 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -15,14 +15,17 @@ from test_framework.messages import (
NODE_P2P_V2,
NODE_WITNESS,
msg_getdata,
- msg_verack,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
+ assert_raises_rpc_error,
+ try_rpc
)
+# Minimum blocks required to signal NODE_NETWORK_LIMITED #
+NODE_NETWORK_LIMITED_MIN_BLOCKS = 288
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
@@ -43,7 +46,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
- self.extra_args = [['-prune=550', '-addrmantest'], [], []]
+ self.extra_args = [['-prune=550'], [], []]
def disconnect_all(self):
self.disconnect_nodes(0, 1)
@@ -54,6 +57,64 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
+ def test_avoid_requesting_historical_blocks(self):
+ self.log.info("Test full node does not request blocks beyond the limited peer threshold")
+ pruned_node = self.nodes[0]
+ miner = self.nodes[1]
+ full_node = self.nodes[2]
+
+ # Connect and generate block to ensure IBD=false
+ self.connect_nodes(1, 0)
+ self.connect_nodes(1, 2)
+ self.generate(miner, 1)
+
+ # Verify peers are out of IBD
+ for node in self.nodes:
+ assert not node.getblockchaininfo()['initialblockdownload']
+
+ # Isolate full_node (the node will remain out of IBD)
+ full_node.setnetworkactive(False)
+ self.wait_until(lambda: len(full_node.getpeerinfo()) == 0)
+
+ # Mine blocks and sync the pruned node. Surpass the NETWORK_NODE_LIMITED threshold.
+ # Blocks deeper than the threshold are considered "historical blocks"
+ num_historial_blocks = 12
+ self.generate(miner, NODE_NETWORK_LIMITED_MIN_BLOCKS + num_historial_blocks, sync_fun=self.no_op)
+ self.sync_blocks([miner, pruned_node])
+
+ # Connect full_node to prune_node and check peers don't disconnect right away.
+ # (they will disconnect if full_node, which is chain-wise behind, request blocks
+ # older than NODE_NETWORK_LIMITED_MIN_BLOCKS)
+ start_height_full_node = full_node.getblockcount()
+ full_node.setnetworkactive(True)
+ self.connect_nodes(2, 0)
+ assert_equal(len(full_node.getpeerinfo()), 1)
+
+ # Wait until the full_node is headers-wise sync
+ best_block_hash = pruned_node.getbestblockhash()
+ default_value = {'status': ''} # No status
+ self.wait_until(lambda: next(filter(lambda x: x['hash'] == best_block_hash, full_node.getchaintips()), default_value)['status'] == "headers-only")
+
+ # Now, since the node aims to download a window of 1024 blocks,
+ # ensure it requests the blocks below the threshold only (with a
+ # 2-block buffer). And also, ensure it does not request any
+ # historical block.
+ tip_height = pruned_node.getblockcount()
+ limit_buffer = 2
+ # Prevent races by waiting for the tip to arrive first
+ self.wait_until(lambda: not try_rpc(-1, "Block not found", full_node.getblock, pruned_node.getbestblockhash()))
+ for height in range(start_height_full_node + 1, tip_height + 1):
+ if height <= tip_height - (NODE_NETWORK_LIMITED_MIN_BLOCKS - limit_buffer):
+ assert_raises_rpc_error(-1, "Block not found on disk", full_node.getblock, pruned_node.getblockhash(height))
+ else:
+ full_node.getblock(pruned_node.getblockhash(height)) # just assert it does not throw an exception
+
+ # Lastly, ensure the full_node is not sync and verify it can get synced by
+ # establishing a connection with another full node capable of providing them.
+ assert_equal(full_node.getblockcount(), start_height_full_node)
+ self.connect_nodes(2, 1)
+ self.sync_blocks([miner, full_node])
+
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
@@ -77,17 +138,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
- node.wait_for_disconnect(5)
-
- self.log.info("Check local address relay, do a fresh connection.")
- self.nodes[0].disconnect_p2ps()
- node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
- node1.send_message(msg_verack())
-
- node1.wait_for_addr()
- #must relay address with NODE_NETWORK_LIMITED
- assert_equal(node1.firstAddrnServices, expected_services)
-
+ node.wait_for_disconnect(timeout=5)
self.nodes[0].disconnect_p2ps()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
@@ -118,5 +169,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
+ self.test_avoid_requesting_historical_blocks()
+
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py
new file mode 100755
index 0000000000..e07acd5481
--- /dev/null
+++ b/test/functional/p2p_opportunistic_1p1c.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024-present The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test opportunistic 1p1c package submission logic.
+"""
+
+from decimal import Decimal
+import time
+from test_framework.messages import (
+ CInv,
+ CTxInWitness,
+ MAX_BIP125_RBF_SEQUENCE,
+ MSG_WTX,
+ msg_inv,
+ msg_tx,
+ tx_from_hex,
+)
+from test_framework.p2p import (
+ P2PInterface,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_greater_than,
+ fill_mempool,
+)
+from test_framework.wallet import (
+ MiniWallet,
+ MiniWalletMode,
+)
+
+# 1sat/vB feerate denominated in BTC/KvB
+FEERATE_1SAT_VB = Decimal("0.00001000")
+# Number of seconds to wait to ensure no getdata is received
+GETDATA_WAIT = 60
+
+def cleanup(func):
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(self, *args, **kwargs)
+ finally:
+ self.nodes[0].disconnect_p2ps()
+ # Do not clear the node's mempool, as each test requires mempool min feerate > min
+ # relay feerate. However, do check that this is the case.
+ assert self.nodes[0].getmempoolinfo()["mempoolminfee"] > self.nodes[0].getnetworkinfo()["relayfee"]
+ # Ensure we do not try to spend the same UTXOs in subsequent tests, as they will look like RBF attempts.
+ self.wallet.rescan_utxos(include_mempool=True)
+
+ # Resets if mocktime was used
+ self.nodes[0].setmocktime(0)
+ return wrapper
+
+class PackageRelayTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ ]]
+ self.supports_cli = False
+
+ def create_tx_below_mempoolminfee(self, wallet):
+ """Create a 1-input 1sat/vB transaction using a confirmed UTXO. Decrement and use
+ self.sequence so that subsequent calls to this function result in unique transactions."""
+
+ self.sequence -= 1
+ assert_greater_than(self.nodes[0].getmempoolinfo()["mempoolminfee"], FEERATE_1SAT_VB)
+
+ return wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, sequence=self.sequence, confirmed_only=True)
+
+ @cleanup
+ def test_basic_child_then_parent(self):
+ node = self.nodes[0]
+ self.log.info("Check that opportunistic 1p1c logic works when child is received before parent")
+
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first (perhaps the low feerate parent didn't meet feefilter or the requests were sent to different nodes). It is missing an input.
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. Sender relays the parent. Parent+Child are evaluated as a package and accepted.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 4. Both transactions should now be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ node.disconnect_p2ps()
+
+ @cleanup
+ def test_basic_parent_then_child(self, wallet):
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(wallet)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+ peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay")
+
+ # 1. Parent is relayed first. It is too low feerate.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ peer_sender.wait_for_getdata([parent_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+
+ # Send again from peer_ignored, check that it is ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+
+ # 2. Child is relayed next. It is missing an input.
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 3. Node requests the missing parent by txid.
+ # It should do so even if it has previously rejected that parent for being too low feerate.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 4. Sender re-relays the parent. Parent+Child are evaluated as a package and accepted.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 5. Both transactions should now be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ @cleanup
+ def test_low_and_high_child(self, wallet):
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(wallet)
+ # This feerate is above mempoolminfee, but not enough to also bump the low feerate parent.
+ feerate_just_above = node.getmempoolinfo()["mempoolminfee"]
+ med_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=feerate_just_above)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+ peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay")
+
+ self.log.info("Check that tx caches low fee parent + low fee child package rejections")
+
+ # 1. Send parent, rejected for being low feerate.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ peer_sender.wait_for_getdata([parent_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+
+ # Send again from peer_ignored, check that it is ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+
+ # 2. Send an (orphan) child that has a higher feerate, but not enough to bump the parent.
+ med_child_wtxid_int = int(med_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([med_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(med_fee_child["tx"]))
+
+ # 3. Node requests the orphan's missing parent.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 4. The low parent + low child are submitted as a package. They are not accepted due to low package feerate.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+ assert med_fee_child["txid"] not in node.getrawmempool()
+
+ # If peer_ignored announces the low feerate child, it should be ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+ # If either peer sends the parent again, package evaluation should not be attempted
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ peer_ignored.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+ assert med_fee_child["txid"] not in node.getrawmempool()
+
+ # 5. Send the high feerate (orphan) child
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 6. Node requests the orphan's parent, even though it has already been rejected, both by
+ # itself and with a child. This is necessary, otherwise high_fee_child can be censored.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 7. The low feerate parent + high feerate child are submitted as a package.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 8. Both transactions should now be in mempool
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+ assert med_fee_child["txid"] not in node_mempool
+
+ @cleanup
+ def test_orphan_consensus_failure(self):
+ self.log.info("Check opportunistic 1p1c logic with consensus-invalid orphan causes disconnect of the correct peer")
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ coin = low_fee_parent["new_utxo"]
+ address = node.get_deterministic_priv_key().address
+ # Create raw transaction spending the parent, but with no signature (a consensus error).
+ hex_orphan_no_sig = node.createrawtransaction([{"txid": coin["txid"], "vout": coin["vout"]}], {address : coin["value"] - Decimal("0.0001")})
+ tx_orphan_bad_wit = tx_from_hex(hex_orphan_no_sig)
+ tx_orphan_bad_wit.wit.vtxinwit.append(CTxInWitness())
+ tx_orphan_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage']
+
+ bad_orphan_sender = node.add_p2p_connection(P2PInterface())
+ parent_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first. It is missing an input.
+ child_wtxid_int = int(tx_orphan_bad_wit.getwtxid(), 16)
+ bad_orphan_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)]))
+ bad_orphan_sender.wait_for_getdata([child_wtxid_int])
+ bad_orphan_sender.send_and_ping(msg_tx(tx_orphan_bad_wit))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ bad_orphan_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. A different peer relays the parent. Parent+Child are evaluated as a package and rejected.
+ parent_sender.send_message(msg_tx(low_fee_parent["tx"]))
+
+ # 4. Transactions should not be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] not in node_mempool
+ assert tx_orphan_bad_wit.rehash() not in node_mempool
+
+ # 5. Peer that sent a consensus-invalid transaction should be disconnected.
+ bad_orphan_sender.wait_for_disconnect()
+
+ # The peer that didn't provide the orphan should not be disconnected.
+ parent_sender.sync_with_ping()
+
+ @cleanup
+ def test_parent_consensus_failure(self):
+ self.log.info("Check opportunistic 1p1c logic with consensus-invalid parent causes disconnect of the correct peer")
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+
+ # Create invalid version of parent with a bad signature.
+ tx_parent_bad_wit = tx_from_hex(low_fee_parent["hex"])
+ tx_parent_bad_wit.wit.vtxinwit.append(CTxInWitness())
+ tx_parent_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage']
+
+ package_sender = node.add_p2p_connection(P2PInterface())
+ fake_parent_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first. It is missing an input.
+ child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)]))
+ package_sender.wait_for_getdata([child_wtxid_int])
+ package_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(tx_parent_bad_wit.rehash(), 16)
+ package_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. A different node relays the parent. The parent is first evaluated by itself and
+ # rejected for being too low feerate. Then it is evaluated as a package and, after passing
+ # feerate checks, rejected for having a bad signature (consensus error).
+ fake_parent_sender.send_message(msg_tx(tx_parent_bad_wit))
+
+ # 4. Transactions should not be in mempool.
+ node_mempool = node.getrawmempool()
+ assert tx_parent_bad_wit.rehash() not in node_mempool
+ assert high_fee_child["txid"] not in node_mempool
+
+ # 5. Peer sent a consensus-invalid transaction.
+ fake_parent_sender.wait_for_disconnect()
+
+ self.log.info("Check that fake parent does not cause orphan to be deleted and real package can still be submitted")
+ # 6. Child-sending should not have been punished and the orphan should remain in orphanage.
+ # It can send the "real" parent transaction, and the package is accepted.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ package_sender.wait_for_getdata([parent_wtxid_int])
+ package_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ @cleanup
+ def test_multiple_parents(self):
+ self.log.info("Check that node does not request more than 1 previously-rejected low feerate parent")
+
+ node = self.nodes[0]
+ node.setmocktime(int(time.time()))
+
+ # 2-parent-1-child package where both parents are below mempool min feerate
+ parent_low_1 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit)
+ parent_low_2 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit)
+ child_bumping = self.wallet_nonsegwit.create_self_transfer_multi(
+ utxos_to_spend=[parent_low_1["new_utxo"], parent_low_2["new_utxo"]],
+ fee_per_output=999*parent_low_1["tx"].get_vsize(),
+ )
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+
+ # 1. Send both parents. Each should be rejected for being too low feerate.
+ # Send unsolicited so that we can later check that no "getdata" was ever received.
+ peer_sender.send_and_ping(msg_tx(parent_low_1["tx"]))
+ peer_sender.send_and_ping(msg_tx(parent_low_2["tx"]))
+
+ # parent_low_1 and parent_low_2 are rejected for being low feerate.
+ assert parent_low_1["txid"] not in node.getrawmempool()
+ assert parent_low_2["txid"] not in node.getrawmempool()
+
+ # 2. Send child.
+ peer_sender.send_and_ping(msg_tx(child_bumping["tx"]))
+
+ # 3. Node should not request any parents, as it should recognize that it will not accept
+ # multi-parent-1-child packages.
+ node.bumpmocktime(GETDATA_WAIT)
+ peer_sender.sync_with_ping()
+ assert "getdata" not in peer_sender.last_message
+
+ @cleanup
+ def test_other_parent_in_mempool(self):
+ self.log.info("Check opportunistic 1p1c fails if child already has another parent in mempool")
+ node = self.nodes[0]
+
+ # This parent needs CPFP
+ parent_low = self.create_tx_below_mempoolminfee(self.wallet)
+ # This parent does not need CPFP and can be submitted alone ahead of time
+ parent_high = self.wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True)
+ child = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[parent_high["new_utxo"], parent_low["new_utxo"]],
+ fee_per_output=999*parent_low["tx"].get_vsize(),
+ )
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+
+ # 1. Send first parent which will be accepted.
+ peer_sender.send_and_ping(msg_tx(parent_high["tx"]))
+ assert parent_high["txid"] in node.getrawmempool()
+
+ # 2. Send child.
+ peer_sender.send_and_ping(msg_tx(child["tx"]))
+
+ # 3. Node requests parent_low. However, 1p1c fails because package-not-child-with-unconfirmed-parents
+ parent_low_txid_int = int(parent_low["txid"], 16)
+ peer_sender.wait_for_getdata([parent_low_txid_int])
+ peer_sender.send_and_ping(msg_tx(parent_low["tx"]))
+
+ node_mempool = node.getrawmempool()
+ assert parent_high["txid"] in node_mempool
+ assert parent_low["txid"] not in node_mempool
+ assert child["txid"] not in node_mempool
+
+ # Same error if submitted through submitpackage without parent_high
+ package_hex_missing_parent = [parent_low["hex"], child["hex"]]
+ result_missing_parent = node.submitpackage(package_hex_missing_parent)
+ assert_equal(result_missing_parent["package_msg"], "package-not-child-with-unconfirmed-parents")
+
+ def run_test(self):
+ node = self.nodes[0]
+ # To avoid creating transactions with the same txid (can happen if we set the same feerate
+ # and reuse the same input as a previous transaction that wasn't successfully submitted),
+ # we give each subtest a different nSequence for its transactions.
+ self.sequence = MAX_BIP125_RBF_SEQUENCE
+
+ self.wallet = MiniWallet(node)
+ self.wallet_nonsegwit = MiniWallet(node, mode=MiniWalletMode.RAW_P2PK)
+ self.generate(self.wallet_nonsegwit, 10)
+ self.generate(self.wallet, 20)
+
+ filler_wallet = MiniWallet(node)
+ fill_mempool(self, node, filler_wallet)
+
+ self.log.info("Check opportunistic 1p1c logic when parent (txid != wtxid) is received before child")
+ self.test_basic_parent_then_child(self.wallet)
+
+ self.log.info("Check opportunistic 1p1c logic when parent (txid == wtxid) is received before child")
+ self.test_basic_parent_then_child(self.wallet_nonsegwit)
+
+ self.log.info("Check opportunistic 1p1c logic when child is received before parent")
+ self.test_basic_child_then_parent()
+
+ self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid != wtxid)")
+ self.test_low_and_high_child(self.wallet)
+
+ self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid == wtxid)")
+ self.test_low_and_high_child(self.wallet_nonsegwit)
+
+ self.test_orphan_consensus_failure()
+ self.test_parent_consensus_failure()
+ self.test_multiple_parents()
+ self.test_other_parent_in_mempool()
+
+
+if __name__ == '__main__':
+ PackageRelayTest().main()
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 6153e4a156..80a27943fd 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -83,7 +83,14 @@ class P2PPermissionsTests(BitcoinTestFramework):
["-whitelist=all@127.0.0.1"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"])
+ for flag, permissions in [(["-whitelist=noban,out@127.0.0.1"], ["noban", "download"]), (["-whitelist=noban@127.0.0.1"], [])]:
+ self.restart_node(0, flag)
+ self.connect_nodes(0, 1)
+ peerinfo = self.nodes[0].getpeerinfo()[0]
+ assert_equal(peerinfo['permissions'], permissions)
+
self.stop_node(1)
+ self.nodes[1].assert_start_raises_init_error(["-whitelist=in,out@127.0.0.1"], "Only direction was set, no permissions", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitelist=oopsie@127.0.0.1"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index d316c4b602..45bbd7f1c3 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -191,31 +191,32 @@ class TestP2PConn(P2PInterface):
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with p2p_lock:
self.last_message.pop("getdata", None)
- self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))
- self.wait_for_getheaders()
+ self.wait_for_getheaders(block_hash=block.hashPrevBlock, timeout=timeout)
self.send_message(msg)
- self.wait_for_getdata([block.sha256])
+ self.wait_for_getdata([block.sha256], timeout=timeout)
def request_block(self, blockhash, inv_type, timeout=60):
with p2p_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
- self.wait_for_block(blockhash, timeout)
+ self.wait_for_block(blockhash, timeout=timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
- ["-acceptnonstdtxn=1", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}", "-whitelist=noban@127.0.0.1", "-par=1"],
+ ["-acceptnonstdtxn=1", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}", "-par=1"],
["-acceptnonstdtxn=0", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}"],
]
self.supports_cli = False
@@ -1053,7 +1054,7 @@ class SegWitTest(BitcoinTestFramework):
@subtest
def test_max_witness_push_length(self):
- """Test that witness stack can only allow up to 520 byte pushes."""
+ """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes."""
block = self.build_next_block()
@@ -2054,7 +2055,7 @@ class SegWitTest(BitcoinTestFramework):
test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False)
# Expect a request for parent (tx) by txid despite use of WTX peer
- self.wtx_node.wait_for_getdata([tx.sha256], 60)
+ self.wtx_node.wait_for_getdata([tx.sha256], timeout=60)
with p2p_lock:
lgd = self.wtx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_WITNESS_TX, tx.sha256)])
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index 508d6fe403..27a3aa8fb9 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -311,6 +311,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
+ expected_hash = tip
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
@@ -334,7 +335,10 @@ class SendHeadersTest(BitcoinTestFramework):
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
- test_node.wait_for_getheaders()
+ if i == 0:
+ test_node.wait_for_getheaders(block_hash=expected_hash)
+ else:
+ assert "getheaders" not in test_node.last_message
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
@@ -521,6 +525,7 @@ class SendHeadersTest(BitcoinTestFramework):
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
+ expected_hash = tip
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
@@ -533,15 +538,14 @@ class SendHeadersTest(BitcoinTestFramework):
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
- with p2p_lock:
- test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
- test_node.wait_for_getheaders()
+ test_node.wait_for_getheaders(block_hash=expected_hash)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
+ expected_hash = blocks[1].sha256
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
@@ -556,13 +560,12 @@ class SendHeadersTest(BitcoinTestFramework):
for i in range(1, MAX_NUM_UNCONNECTING_HEADERS_MSGS):
# Send a header that doesn't connect, check that we get a getheaders.
- with p2p_lock:
- test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
- test_node.wait_for_getheaders()
+ test_node.wait_for_getheaders(block_hash=expected_hash)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
+ expected_hash = blocks[0].sha256
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
@@ -571,10 +574,8 @@ class SendHeadersTest(BitcoinTestFramework):
# before we get disconnected. Should be 5*MAX_NUM_UNCONNECTING_HEADERS_MSGS
for i in range(5 * MAX_NUM_UNCONNECTING_HEADERS_MSGS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
- with p2p_lock:
- test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
- test_node.wait_for_getheaders()
+ test_node.wait_for_getheaders(block_hash=expected_hash)
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py
index b4fa5099d8..80d7b6e9ae 100755
--- a/test/functional/p2p_timeouts.py
+++ b/test/functional/p2p_timeouts.py
@@ -69,11 +69,8 @@ class TimeoutsTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(['Unsupported message "ping" prior to verack from peer=0']):
no_verack_node.send_message(msg_ping())
- # With v2, non-version messages before the handshake would be interpreted as part of the key exchange.
- # Therefore, don't execute this part of the test if v2transport is chosen.
- if not self.options.v2transport:
- with self.nodes[0].assert_debug_log(['non-version message before version handshake. Message "ping" from peer=1']):
- no_version_node.send_message(msg_ping())
+ with self.nodes[0].assert_debug_log(['non-version message before version handshake. Message "ping" from peer=1']):
+ no_version_node.send_message(msg_ping())
self.mock_forward(1)
assert "version" in no_verack_node.last_message
@@ -83,14 +80,20 @@ class TimeoutsTest(BitcoinTestFramework):
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
- if not self.options.v2transport:
- no_version_node.send_message(msg_ping())
-
- expected_timeout_logs = [
- "version handshake timeout peer=0",
- f"socket no message in first 3 seconds, {'0' if self.options.v2transport else '1'} 0 peer=1",
- "socket no message in first 3 seconds, 0 0 peer=2",
- ]
+ no_version_node.send_message(msg_ping())
+
+ if self.options.v2transport:
+ expected_timeout_logs = [
+ "version handshake timeout peer=0",
+ "version handshake timeout peer=1",
+ "version handshake timeout peer=2",
+ ]
+ else:
+ expected_timeout_logs = [
+ "version handshake timeout peer=0",
+ "socket no message in first 3 seconds, 1 0 peer=1",
+ "socket no message in first 3 seconds, 0 0 peer=2",
+ ]
with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs):
self.mock_forward(2)
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 0e463c5072..7a50f1e605 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -5,6 +5,7 @@
"""
Test transaction download behavior
"""
+from decimal import Decimal
import time
from test_framework.messages import (
@@ -14,6 +15,7 @@ from test_framework.messages import (
MSG_WTX,
msg_inv,
msg_notfound,
+ msg_tx,
)
from test_framework.p2p import (
P2PInterface,
@@ -22,6 +24,7 @@ from test_framework.p2p import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
+ fill_mempool,
)
from test_framework.wallet import MiniWallet
@@ -54,6 +57,7 @@ MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RE
class TxDownloadTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
+ self.extra_args= [['-datacarriersize=100000', '-maxmempool=5', '-persistmempool=0']] * self.num_nodes
def test_tx_requests(self):
self.log.info("Test that we request transactions from all our peers, eventually")
@@ -241,6 +245,29 @@ class TxDownloadTest(BitcoinTestFramework):
self.log.info('Check that spurious notfound is ignored')
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
+ def test_rejects_filter_reset(self):
+ self.log.info('Check that rejected tx is not requested again')
+ node = self.nodes[0]
+ fill_mempool(self, node, self.wallet)
+ self.wallet.rescan_utxos()
+ mempoolminfee = node.getmempoolinfo()['mempoolminfee']
+ peer = node.add_p2p_connection(TestP2PConn())
+ low_fee_tx = self.wallet.create_self_transfer(fee_rate=Decimal("0.9")*mempoolminfee)
+ assert_equal(node.testmempoolaccept([low_fee_tx['hex']])[0]["reject-reason"], "mempool min fee not met")
+ peer.send_and_ping(msg_tx(low_fee_tx['tx']))
+ peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(low_fee_tx['wtxid'], 16))]))
+ node.setmocktime(int(time.time()))
+ node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT)
+ peer.sync_with_ping()
+ assert_equal(peer.tx_getdata_count, 0)
+
+ self.log.info('Check that rejection filter is cleared after new block comes in')
+ self.generate(self.wallet, 1, sync_fun=self.no_op)
+ peer.sync_with_ping()
+ peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(low_fee_tx['wtxid'], 16))]))
+ node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT)
+ peer.wait_for_getdata([int(low_fee_tx['wtxid'], 16)])
+
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
@@ -257,16 +284,22 @@ class TxDownloadTest(BitcoinTestFramework):
# Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when
# the next trickle relay event happens.
- for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
+ for test, with_inbounds in [
+ (self.test_in_flight_max, True),
+ (self.test_inv_block, True),
+ (self.test_tx_requests, True),
+ (self.test_rejects_filter_reset, False),
+ ]:
self.stop_nodes()
self.start_nodes()
self.connect_nodes(1, 0)
# Setup the p2p connections
self.peers = []
- for node in self.nodes:
- for _ in range(NUM_INBOUND):
- self.peers.append(node.add_p2p_connection(TestP2PConn()))
- self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
+ if with_inbounds:
+ for node in self.nodes:
+ for _ in range(NUM_INBOUND):
+ self.peers.append(node.add_p2p_connection(TestP2PConn()))
+ self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
test()
diff --git a/test/functional/p2p_v2_earlykeyresponse.py b/test/functional/p2p_v2_earlykeyresponse.py
index 1f570e6010..32d2e1148a 100755
--- a/test/functional/p2p_v2_earlykeyresponse.py
+++ b/test/functional/p2p_v2_earlykeyresponse.py
@@ -75,7 +75,7 @@ class P2PEarlyKey(BitcoinTestFramework):
self.log.info('Sending first 4 bytes of ellswift which match network magic')
self.log.info('If a response is received, assertion failure would happen in our custom data_received() function')
# send happens in `initiate_v2_handshake()` in `connection_made()`
- peer1 = node0.add_p2p_connection(PeerEarlyKey(), wait_for_verack=False, send_version=False, supports_v2_p2p=True)
+ peer1 = node0.add_p2p_connection(PeerEarlyKey(), wait_for_verack=False, send_version=False, supports_v2_p2p=True, wait_for_v2_handshake=False)
self.wait_until(lambda: peer1.connection_opened)
self.log.info('Sending remaining ellswift and garbage which are different from V1_PREFIX. Since a response is')
self.log.info('expected now, our custom data_received() function wouldn\'t result in assertion failure')
diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py
index ec43fc5a97..fe2449124d 100755
--- a/test/functional/p2p_v2_transport.py
+++ b/test/functional/p2p_v2_transport.py
@@ -12,6 +12,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
p2p_port,
+ assert_raises_rpc_error
)
@@ -59,6 +60,11 @@ class V2TransportTest(BitcoinTestFramework):
# V1 nodes can sync with each other
assert_equal(self.nodes[2].getblockcount(), 0)
assert_equal(self.nodes[3].getblockcount(), 0)
+
+ # addnode rpc error when v2transport requested but not enabled
+ ip_port = "127.0.0.1:{}".format(p2p_port(3))
+ assert_raises_rpc_error(-8, "Error: v2transport requested but not enabled (see -v2transport)", self.nodes[2].addnode, node=ip_port, command='add', v2transport=True)
+
with self.nodes[2].assert_debug_log(expected_msgs=[],
unexpected_msgs=[sending_handshake, downgrading_to_v1]):
self.connect_nodes(2, 3, peer_advertises_v2=False)
diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py
index afb75ab208..2701d2471d 100755
--- a/test/functional/rpc_net.py
+++ b/test/functional/rpc_net.py
@@ -13,7 +13,6 @@ import platform
import time
import test_framework.messages
-from test_framework.netutil import ADDRMAN_NEW_BUCKET_COUNT, ADDRMAN_TRIED_BUCKET_COUNT, ADDRMAN_BUCKET_SIZE
from test_framework.p2p import (
P2PInterface,
P2P_SERVICES,
@@ -42,6 +41,24 @@ def assert_net_servicesnames(servicesflag, servicenames):
assert servicesflag_generated == servicesflag
+def seed_addrman(node):
+ """ Populate the addrman with addresses from different networks.
+ Here 2 ipv4, 2 ipv6, 1 cjdns, 2 onion and 1 i2p addresses are added.
+ """
+ # These addresses currently don't collide with a deterministic addrman.
+ # If the addrman positioning/bucketing is changed, these might collide
+ # and adding them fails.
+ success = { "success": True }
+ assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), success)
+ assert_equal(node.addpeeraddress(address="2.0.0.0", port=8333), success)
+ assert_equal(node.addpeeraddress(address="1233:3432:2434:2343:3234:2345:6546:4534", tried=True, port=8333), success)
+ assert_equal(node.addpeeraddress(address="2803:0:1234:abcd::1", port=45324), success)
+ assert_equal(node.addpeeraddress(address="fc00:1:2:3:4:5:6:7", port=8333), success)
+ assert_equal(node.addpeeraddress(address="pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", tried=True, port=8333), success)
+ assert_equal(node.addpeeraddress(address="nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", port=45324, tried=True), success)
+ assert_equal(node.addpeeraddress(address="c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", port=8333), success)
+
+
class NetTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
@@ -113,10 +130,15 @@ class NetTest(BitcoinTestFramework):
self.nodes[0].setmocktime(no_version_peer_conntime)
with self.nodes[0].wait_for_new_peer():
no_version_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
+ if self.options.v2transport:
+ self.wait_until(lambda: self.nodes[0].getpeerinfo()[no_version_peer_id]["transport_protocol_type"] == "v2")
self.nodes[0].setmocktime(0)
peer_info = self.nodes[0].getpeerinfo()[no_version_peer_id]
peer_info.pop("addr")
peer_info.pop("addrbind")
+ # The next two fields will vary for v2 connections because we send a rng-based number of decoy messages
+ peer_info.pop("bytesrecv")
+ peer_info.pop("bytessent")
assert_equal(
peer_info,
{
@@ -125,9 +147,7 @@ class NetTest(BitcoinTestFramework):
"addr_relay_enabled": False,
"bip152_hb_from": False,
"bip152_hb_to": False,
- "bytesrecv": 0,
"bytesrecv_per_msg": {},
- "bytessent": 0,
"bytessent_per_msg": {},
"connection_type": "inbound",
"conntime": no_version_peer_conntime,
@@ -136,8 +156,8 @@ class NetTest(BitcoinTestFramework):
"inflight": [],
"last_block": 0,
"last_transaction": 0,
- "lastrecv": 0,
- "lastsend": 0,
+ "lastrecv": 0 if not self.options.v2transport else no_version_peer_conntime,
+ "lastsend": 0 if not self.options.v2transport else no_version_peer_conntime,
"minfeefilter": Decimal("0E-8"),
"network": "not_publicly_routable",
"permissions": [],
@@ -145,13 +165,13 @@ class NetTest(BitcoinTestFramework):
"relaytxes": False,
"services": "0000000000000000",
"servicesnames": [],
- "session_id": "",
+ "session_id": "" if not self.options.v2transport else no_version_peer.v2_state.peer['session_id'].hex(),
"startingheight": -1,
"subver": "",
"synced_blocks": -1,
"synced_headers": -1,
"timeoffset": 0,
- "transport_protocol_type": "v1" if not self.options.v2transport else "detecting",
+ "transport_protocol_type": "v1" if not self.options.v2transport else "v2",
"version": 0,
},
)
@@ -302,22 +322,16 @@ class NetTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "Network not recognized: Foo", self.nodes[0].getnodeaddresses, 1, "Foo")
def test_addpeeraddress(self):
- """RPC addpeeraddress sets the source address equal to the destination address.
- If an address with the same /16 as an existing new entry is passed, it will be
- placed in the same new bucket and have a 1/64 chance of the bucket positions
- colliding (depending on the value of nKey in the addrman), in which case the
- new address won't be added. The probability of collision can be reduced to
- 1/2^16 = 1/65536 by using an address from a different /16. We avoid this here
- by first testing adding a tried table entry before testing adding a new table one.
- """
self.log.info("Test addpeeraddress")
- self.restart_node(1, ["-checkaddrman=1"])
+ # The node has an existing, non-deterministic addrman from a previous test.
+ # Clear it to have a deterministic addrman.
+ self.restart_node(1, ["-checkaddrman=1", "-test=addrman"], clear_addrman=True)
node = self.nodes[1]
- self.log.debug("Test that addpeerinfo is a hidden RPC")
+ self.log.debug("Test that addpeeraddress is a hidden RPC")
# It is hidden from general help, but its detailed help may be called directly.
- assert "addpeerinfo" not in node.help()
- assert "addpeerinfo" in node.help("addpeerinfo")
+ assert "addpeeraddress" not in node.help()
+ assert "unknown command: addpeeraddress" not in node.help("addpeeraddress")
self.log.debug("Test that adding an empty address fails")
assert_equal(node.addpeeraddress(address="", port=8333), {"success": False})
@@ -330,26 +344,50 @@ class NetTest(BitcoinTestFramework):
assert_raises_rpc_error(-1, "JSON integer out of range", self.nodes[0].addpeeraddress, address="1.2.3.4", port=-1)
assert_raises_rpc_error(-1, "JSON integer out of range", self.nodes[0].addpeeraddress, address="1.2.3.4", port=65536)
+ self.log.debug("Test that adding a valid address to the new table succeeds")
+ assert_equal(node.addpeeraddress(address="1.0.0.0", tried=False, port=8333), {"success": True})
+ addrman = node.getrawaddrman()
+ assert_equal(len(addrman["tried"]), 0)
+ new_table = list(addrman["new"].values())
+ assert_equal(len(new_table), 1)
+ assert_equal(new_table[0]["address"], "1.0.0.0")
+ assert_equal(new_table[0]["port"], 8333)
+
+ self.log.debug("Test that adding an already-present new address to the new and tried tables fails")
+ for value in [True, False]:
+ assert_equal(node.addpeeraddress(address="1.0.0.0", tried=value, port=8333), {"success": False, "error": "failed-adding-to-new"})
+ assert_equal(len(node.getnodeaddresses(count=0)), 1)
+
self.log.debug("Test that adding a valid address to the tried table succeeds")
- self.addr_time = int(time.time())
- node.setmocktime(self.addr_time)
assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), {"success": True})
- with node.assert_debug_log(expected_msgs=["CheckAddrman: new 0, tried 1, total 1 started"]):
- addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
- assert_equal(len(addrs), 1)
- assert_equal(addrs[0]["address"], "1.2.3.4")
- assert_equal(addrs[0]["port"], 8333)
+ addrman = node.getrawaddrman()
+ assert_equal(len(addrman["new"]), 1)
+ tried_table = list(addrman["tried"].values())
+ assert_equal(len(tried_table), 1)
+ assert_equal(tried_table[0]["address"], "1.2.3.4")
+ assert_equal(tried_table[0]["port"], 8333)
+ node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
self.log.debug("Test that adding an already-present tried address to the new and tried tables fails")
for value in [True, False]:
- assert_equal(node.addpeeraddress(address="1.2.3.4", tried=value, port=8333), {"success": False})
- assert_equal(len(node.getnodeaddresses(count=0)), 1)
-
- self.log.debug("Test that adding a second address, this time to the new table, succeeds")
+ assert_equal(node.addpeeraddress(address="1.2.3.4", tried=value, port=8333), {"success": False, "error": "failed-adding-to-new"})
+ assert_equal(len(node.getnodeaddresses(count=0)), 2)
+
+ self.log.debug("Test that adding an address, which collides with the address in tried table, fails")
+ colliding_address = "1.2.5.45" # grinded address that produces a tried-table collision
+ assert_equal(node.addpeeraddress(address=colliding_address, tried=True, port=8333), {"success": False, "error": "failed-adding-to-tried"})
+ # When adding an address to the tried table, it's first added to the new table.
+ # As we fail to move it to the tried table, it remains in the new table.
+ addrman_info = node.getaddrmaninfo()
+ assert_equal(addrman_info["all_networks"]["tried"], 1)
+ assert_equal(addrman_info["all_networks"]["new"], 2)
+
+ self.log.debug("Test that adding an another address to the new table succeeds")
assert_equal(node.addpeeraddress(address="2.0.0.0", port=8333), {"success": True})
- with node.assert_debug_log(expected_msgs=["CheckAddrman: new 1, tried 1, total 2 started"]):
- addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
- assert_equal(len(addrs), 2)
+ addrman_info = node.getaddrmaninfo()
+ assert_equal(addrman_info["all_networks"]["tried"], 1)
+ assert_equal(addrman_info["all_networks"]["new"], 3)
+ node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
def test_sendmsgtopeer(self):
node = self.nodes[0]
@@ -387,30 +425,38 @@ class NetTest(BitcoinTestFramework):
def test_getaddrmaninfo(self):
self.log.info("Test getaddrmaninfo")
+ self.restart_node(1, extra_args=["-cjdnsreachable", "-test=addrman"], clear_addrman=True)
node = self.nodes[1]
+ seed_addrman(node)
+
+ expected_network_count = {
+ 'all_networks': {'new': 4, 'tried': 4, 'total': 8},
+ 'ipv4': {'new': 1, 'tried': 1, 'total': 2},
+ 'ipv6': {'new': 1, 'tried': 1, 'total': 2},
+ 'onion': {'new': 0, 'tried': 2, 'total': 2},
+ 'i2p': {'new': 1, 'tried': 0, 'total': 1},
+ 'cjdns': {'new': 1, 'tried': 0, 'total': 1},
+ }
- # current count of ipv4 addresses in addrman is {'new':1, 'tried':1}
- self.log.info("Test that count of addresses in addrman match expected values")
+ self.log.debug("Test that count of addresses in addrman match expected values")
res = node.getaddrmaninfo()
- assert_equal(res["ipv4"]["new"], 1)
- assert_equal(res["ipv4"]["tried"], 1)
- assert_equal(res["ipv4"]["total"], 2)
- assert_equal(res["all_networks"]["new"], 1)
- assert_equal(res["all_networks"]["tried"], 1)
- assert_equal(res["all_networks"]["total"], 2)
- for net in ["ipv6", "onion", "i2p", "cjdns"]:
- assert_equal(res[net]["new"], 0)
- assert_equal(res[net]["tried"], 0)
- assert_equal(res[net]["total"], 0)
+ for network, count in expected_network_count.items():
+ assert_equal(res[network]['new'], count['new'])
+ assert_equal(res[network]['tried'], count['tried'])
+ assert_equal(res[network]['total'], count['total'])
def test_getrawaddrman(self):
self.log.info("Test getrawaddrman")
+ self.restart_node(1, extra_args=["-cjdnsreachable", "-test=addrman"], clear_addrman=True)
node = self.nodes[1]
+ self.addr_time = int(time.time())
+ node.setmocktime(self.addr_time)
+ seed_addrman(node)
self.log.debug("Test that getrawaddrman is a hidden RPC")
# It is hidden from general help, but its detailed help may be called directly.
assert "getrawaddrman" not in node.help()
- assert "getrawaddrman" in node.help("getrawaddrman")
+ assert "unknown command: getrawaddrman" not in node.help("getrawaddrman")
def check_addr_information(result, expected):
"""Utility to compare a getrawaddrman result entry with an expected entry"""
@@ -427,88 +473,96 @@ class NetTest(BitcoinTestFramework):
getrawaddrman = node.getrawaddrman()
getaddrmaninfo = node.getaddrmaninfo()
for (table_name, table_info) in expected.items():
- assert_equal(len(getrawaddrman[table_name]), len(table_info["entries"]))
+ assert_equal(len(getrawaddrman[table_name]), len(table_info))
assert_equal(len(getrawaddrman[table_name]), getaddrmaninfo["all_networks"][table_name])
for bucket_position in getrawaddrman[table_name].keys():
- bucket = int(bucket_position.split("/")[0])
- position = int(bucket_position.split("/")[1])
-
- # bucket and position only be sanity checked here as the
- # test-addrman isn't deterministic
- assert 0 <= int(bucket) < table_info["bucket_count"]
- assert 0 <= int(position) < ADDRMAN_BUCKET_SIZE
-
entry = getrawaddrman[table_name][bucket_position]
- expected_entry = list(filter(lambda e: e["address"] == entry["address"], table_info["entries"]))[0]
+ expected_entry = list(filter(lambda e: e["address"] == entry["address"], table_info))[0]
+ assert bucket_position == expected_entry["bucket_position"]
check_addr_information(entry, expected_entry)
- # we expect one addrman new and tried table entry, which were added in a previous test
+ # we expect 4 new and 4 tried table entries in the addrman which were added using seed_addrman()
expected = {
- "new": {
- "bucket_count": ADDRMAN_NEW_BUCKET_COUNT,
- "entries": [
+ "new": [
{
+ "bucket_position": "82/8",
"address": "2.0.0.0",
"port": 8333,
"services": 9,
"network": "ipv4",
"source": "2.0.0.0",
"source_network": "ipv4",
+ },
+ {
+ "bucket_position": "336/24",
+ "address": "fc00:1:2:3:4:5:6:7",
+ "port": 8333,
+ "services": 9,
+ "network": "cjdns",
+ "source": "fc00:1:2:3:4:5:6:7",
+ "source_network": "cjdns",
+ },
+ {
+ "bucket_position": "963/46",
+ "address": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p",
+ "port": 8333,
+ "services": 9,
+ "network": "i2p",
+ "source": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p",
+ "source_network": "i2p",
+ },
+ {
+ "bucket_position": "613/6",
+ "address": "2803:0:1234:abcd::1",
+ "services": 9,
+ "network": "ipv6",
+ "source": "2803:0:1234:abcd::1",
+ "source_network": "ipv6",
+ "port": 45324,
}
- ]
- },
- "tried": {
- "bucket_count": ADDRMAN_TRIED_BUCKET_COUNT,
- "entries": [
+ ],
+ "tried": [
{
+ "bucket_position": "6/33",
"address": "1.2.3.4",
"port": 8333,
"services": 9,
"network": "ipv4",
"source": "1.2.3.4",
"source_network": "ipv4",
+ },
+ {
+ "bucket_position": "197/34",
+ "address": "1233:3432:2434:2343:3234:2345:6546:4534",
+ "port": 8333,
+ "services": 9,
+ "network": "ipv6",
+ "source": "1233:3432:2434:2343:3234:2345:6546:4534",
+ "source_network": "ipv6",
+ },
+ {
+ "bucket_position": "72/61",
+ "address": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion",
+ "port": 8333,
+ "services": 9,
+ "network": "onion",
+ "source": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion",
+ "source_network": "onion"
+ },
+ {
+ "bucket_position": "139/46",
+ "address": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion",
+ "services": 9,
+ "network": "onion",
+ "source": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion",
+ "source_network": "onion",
+ "port": 45324,
}
- ]
- }
+ ]
}
- self.log.debug("Test that the getrawaddrman contains information about the addresses added in a previous test")
- check_getrawaddrman_entries(expected)
-
- self.log.debug("Add one new address to each addrman table")
- expected["new"]["entries"].append({
- "address": "2803:0:1234:abcd::1",
- "services": 9,
- "network": "ipv6",
- "source": "2803:0:1234:abcd::1",
- "source_network": "ipv6",
- "port": -1, # set once addpeeraddress is successful
- })
- expected["tried"]["entries"].append({
- "address": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion",
- "services": 9,
- "network": "onion",
- "source": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion",
- "source_network": "onion",
- "port": -1, # set once addpeeraddress is successful
- })
-
- port = 0
- for (table_name, table_info) in expected.items():
- # There's a slight chance that the to-be-added address collides with an already
- # present table entry. To avoid this, we increment the port until an address has been
- # added. Incrementing the port changes the position in the new table bucket (bucket
- # stays the same) and changes both the bucket and the position in the tried table.
- while True:
- if node.addpeeraddress(address=table_info["entries"][1]["address"], port=port, tried=table_name == "tried")["success"]:
- table_info["entries"][1]["port"] = port
- self.log.debug(f"Added {table_info['entries'][1]['address']} to {table_name} table")
- break
- else:
- port += 1
-
- self.log.debug("Test that the newly added addresses appear in getrawaddrman")
+ self.log.debug("Test that getrawaddrman contains information about newly added addresses in each addrman table")
check_getrawaddrman_entries(expected)
diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py
index 664f2df3f1..37c42f2533 100755
--- a/test/functional/rpc_packages.py
+++ b/test/functional/rpc_packages.py
@@ -18,6 +18,7 @@ from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
+ fill_mempool,
)
from test_framework.wallet import (
DEFAULT_FEE,
@@ -29,7 +30,8 @@ class RPCPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [["-whitelist=noban@127.0.0.1"]] # noban speeds up tx relay
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
def assert_testres_equal(self, package_hex, testres_expected):
"""Shuffle package_hex and assert that the testmempoolaccept result matches testres_expected. This should only
@@ -81,6 +83,8 @@ class RPCPackagesTest(BitcoinTestFramework):
self.test_conflicting()
self.test_rbf()
self.test_submitpackage()
+ self.test_maxfeerate_submitpackage()
+ self.test_maxburn_submitpackage()
def test_independent(self, coin):
self.log.info("Test multiple independent transactions in a package")
@@ -356,5 +360,89 @@ class RPCPackagesTest(BitcoinTestFramework):
assert_equal(res["tx-results"][sec_wtxid]["error"], "version")
peer.wait_for_broadcast([first_wtxid])
+ def test_maxfeerate_submitpackage(self):
+ node = self.nodes[0]
+ # clear mempool
+ deterministic_address = node.get_deterministic_priv_key().address
+ self.generatetoaddress(node, 1, deterministic_address)
+
+ self.log.info("Submitpackage maxfeerate arg testing")
+ chained_txns = self.wallet.create_self_transfer_chain(chain_length=2)
+ minrate_btc_kvb = min([chained_txn["fee"] / chained_txn["tx"].get_vsize() * 1000 for chained_txn in chained_txns])
+ chain_hex = [t["hex"] for t in chained_txns]
+ pkg_result = node.submitpackage(chain_hex, maxfeerate=minrate_btc_kvb - Decimal("0.00000001"))
+
+ # First tx failed in single transaction evaluation, so package message is generic
+ assert_equal(pkg_result["package_msg"], "transaction failed")
+ assert_equal(pkg_result["tx-results"][chained_txns[0]["wtxid"]]["error"], "max feerate exceeded")
+ assert_equal(pkg_result["tx-results"][chained_txns[1]["wtxid"]]["error"], "bad-txns-inputs-missingorspent")
+ assert_equal(node.getrawmempool(), [])
+
+ # Make chain of two transactions where parent doesn't make minfee threshold
+ # but child is too high fee
+ # Lower mempool limit to make it easier to fill_mempool
+ self.restart_node(0, extra_args=[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ "-persistmempool=0",
+ ])
+ self.wallet.rescan_utxos()
+
+ fill_mempool(self, node, self.wallet)
+
+ minrelay = node.getmempoolinfo()["minrelaytxfee"]
+ parent = self.wallet.create_self_transfer(
+ fee_rate=minrelay,
+ confirmed_only=True,
+ )
+
+ child = self.wallet.create_self_transfer(
+ fee_rate=DEFAULT_FEE,
+ utxo_to_spend=parent["new_utxo"],
+ )
+
+ pkg_result = node.submitpackage([parent["hex"], child["hex"]], maxfeerate=DEFAULT_FEE - Decimal("0.00000001"))
+
+ # Child is connected even though parent is invalid and still reports fee exceeded
+ # this implies sub-package evaluation of both entries together.
+ assert_equal(pkg_result["package_msg"], "transaction failed")
+ assert "mempool min fee not met" in pkg_result["tx-results"][parent["wtxid"]]["error"]
+ assert_equal(pkg_result["tx-results"][child["wtxid"]]["error"], "max feerate exceeded")
+ assert parent["txid"] not in node.getrawmempool()
+ assert child["txid"] not in node.getrawmempool()
+
+ # Reset maxmempool, datacarriersize, reset dynamic mempool minimum feerate, and empty mempool.
+ self.restart_node(0)
+ self.wallet.rescan_utxos()
+
+ assert_equal(node.getrawmempool(), [])
+
+ def test_maxburn_submitpackage(self):
+ node = self.nodes[0]
+
+ assert_equal(node.getrawmempool(), [])
+
+ self.log.info("Submitpackage maxburnamount arg testing")
+ chained_txns_burn = self.wallet.create_self_transfer_chain(
+ chain_length=2,
+ utxo_to_spend=self.wallet.get_utxo(confirmed_only=True),
+ )
+ chained_burn_hex = [t["hex"] for t in chained_txns_burn]
+
+ tx = tx_from_hex(chained_burn_hex[1])
+ tx.vout[-1].scriptPubKey = b'a' * 10001 # scriptPubKey bigger than 10k IsUnspendable
+ chained_burn_hex = [chained_burn_hex[0], tx.serialize().hex()]
+ # burn test is run before any package evaluation; nothing makes it in and we get broader exception
+ assert_raises_rpc_error(-25, "Unspendable output exceeds maximum configured by user", node.submitpackage, chained_burn_hex, 0, chained_txns_burn[1]["new_utxo"]["value"] - Decimal("0.00000001"))
+ assert_equal(node.getrawmempool(), [])
+
+ minrate_btc_kvb_burn = min([chained_txn_burn["fee"] / chained_txn_burn["tx"].get_vsize() * 1000 for chained_txn_burn in chained_txns_burn])
+
+ # Relax the restrictions for both and send it; parent gets through as own subpackage
+ pkg_result = node.submitpackage(chained_burn_hex, maxfeerate=minrate_btc_kvb_burn, maxburnamount=chained_txns_burn[1]["new_utxo"]["value"])
+ assert "error" not in pkg_result["tx-results"][chained_txns_burn[0]["wtxid"]]
+ assert_equal(pkg_result["tx-results"][tx.getwtxid()]["error"], "scriptpubkey")
+ assert_equal(node.getrawmempool(), [chained_txns_burn[0]["txid"]])
+
if __name__ == "__main__":
RPCPackagesTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 1fd938d18a..6ee7e56886 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -16,8 +16,6 @@ from test_framework.messages import (
CTxIn,
CTxOut,
MAX_BIP125_RBF_SEQUENCE,
- WITNESS_SCALE_FACTOR,
- ser_compact_size,
)
from test_framework.psbt import (
PSBT,
@@ -42,6 +40,7 @@ from test_framework.util import (
find_vout_for_address,
)
from test_framework.wallet_util import (
+ calculate_input_weight,
generate_keypair,
get_generate_key,
)
@@ -752,12 +751,9 @@ class PSBTTest(BitcoinTestFramework):
input_idx = i
break
psbt_in = dec["inputs"][input_idx]
- # Calculate the input weight
- # (prevout + sequence + length of scriptSig + scriptsig + 1 byte buffer) * WITNESS_SCALE_FACTOR + num scriptWitness stack items + (length of stack item + stack item) * N stack items + 1 byte buffer
- len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
- len_scriptsig += len(ser_compact_size(len_scriptsig)) + 1
- len_scriptwitness = (sum([(len(x) // 2) + len(ser_compact_size(len(x) // 2)) for x in psbt_in["final_scriptwitness"]]) + len(psbt_in["final_scriptwitness"]) + 1) if "final_scriptwitness" in psbt_in else 0
- input_weight = ((40 + len_scriptsig) * WITNESS_SCALE_FACTOR) + len_scriptwitness
+ scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else ""
+ witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None
+ input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex)
low_input_weight = input_weight // 2
high_input_weight = input_weight * 2
@@ -881,7 +877,7 @@ class PSBTTest(BitcoinTestFramework):
assert_equal(comb_psbt, psbt)
self.log.info("Test walletprocesspsbt raises if an invalid sighashtype is passed")
- assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all")
+ assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all")
self.log.info("Test decoding PSBT with per-input preimage types")
# note that the decodepsbt RPC doesn't check whether preimages and hashes match
@@ -987,7 +983,7 @@ class PSBTTest(BitcoinTestFramework):
self.nodes[2].sendrawtransaction(processed_psbt['hex'])
self.log.info("Test descriptorprocesspsbt raises if an invalid sighashtype is passed")
- assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all")
+ assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all")
if __name__ == '__main__':
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index e5d7cea135..3978c80dde 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -73,9 +73,8 @@ class RawTransactionsTest(BitcoinTestFramework):
["-txindex"],
["-fastprune", "-prune=1"],
]
- # whitelist all peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.supports_cli = False
def setup_network(self):
diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py
index bc426d7371..ba86b278bd 100755
--- a/test/functional/rpc_setban.py
+++ b/test/functional/rpc_setban.py
@@ -64,20 +64,10 @@ class SetBanTests(BitcoinTestFramework):
assert self.is_banned(node, tor_addr)
assert not self.is_banned(node, ip_addr)
- self.log.info("Test the ban list is preserved through restart")
-
- self.restart_node(1)
- assert self.is_banned(node, tor_addr)
- assert not self.is_banned(node, ip_addr)
-
node.setban(tor_addr, "remove")
assert not self.is_banned(self.nodes[1], tor_addr)
assert not self.is_banned(node, ip_addr)
- self.restart_node(1)
- assert not self.is_banned(node, tor_addr)
- assert not self.is_banned(node, ip_addr)
-
self.log.info("Test -bantime")
self.restart_node(1, ["-bantime=1234"])
self.nodes[1].setban("127.0.0.1", "add")
diff --git a/test/functional/rpc_signrawtransactionwithkey.py b/test/functional/rpc_signrawtransactionwithkey.py
index 0913f5057e..268584331e 100755
--- a/test/functional/rpc_signrawtransactionwithkey.py
+++ b/test/functional/rpc_signrawtransactionwithkey.py
@@ -124,7 +124,7 @@ class SignRawTransactionWithKeyTest(BitcoinTestFramework):
self.log.info("Test signing transaction with invalid sighashtype")
tx = self.nodes[0].createrawtransaction(INPUTS, OUTPUTS)
privkeys = [self.nodes[0].get_deterministic_priv_key().key]
- assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithkey, tx, privkeys, sighashtype="all")
+ assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithkey, tx, privkeys, sighashtype="all")
def run_test(self):
self.successful_signing_test()
diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py
index cb99e483ec..f8df59d02a 100755
--- a/test/functional/rpc_uptime.py
+++ b/test/functional/rpc_uptime.py
@@ -23,7 +23,7 @@ class UptimeTest(BitcoinTestFramework):
self._test_uptime()
def _test_negative_time(self):
- assert_raises_rpc_error(-8, "Mocktime cannot be negative: -1.", self.nodes[0].setmocktime, -1)
+ assert_raises_rpc_error(-8, "Mocktime must be in the range [0, 9223372036], not -1.", self.nodes[0].setmocktime, -1)
def _test_uptime(self):
wait_time = 10
diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md
index b89b40f13d..4cd62c4ef3 100644
--- a/test/functional/test-shell.md
+++ b/test/functional/test-shell.md
@@ -123,11 +123,11 @@ We can also log custom events to the logger.
```
**Note: Please also consider the functional test
-[readme](../test/functional/README.md), which provides an overview of the
+[readme](/test/functional/README.md), which provides an overview of the
test-framework**. Modules such as
-[key.py](../test/functional/test_framework/key.py),
-[script.py](../test/functional/test_framework/script.py) and
-[messages.py](../test/functional/test_framework/messages.py) are particularly
+[key.py](/test/functional/test_framework/key.py),
+[script.py](/test/functional/test_framework/script.py) and
+[messages.py](/test/functional/test_framework/messages.py) are particularly
useful in constructing objects which can be passed to the bitcoind nodes managed
by a running `TestShell` object.
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index cfd923bab3..f0dc866f69 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -28,6 +28,7 @@ from .messages import (
ser_uint256,
tx_from_hex,
uint256_from_str,
+ WITNESS_SCALE_FACTOR,
)
from .script import (
CScript,
@@ -45,7 +46,6 @@ from .script_util import (
)
from .util import assert_equal
-WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
diff --git a/test/functional/test_framework/crypto/bip324_cipher.py b/test/functional/test_framework/crypto/bip324_cipher.py
index 56190647f2..c9f0fa0151 100644
--- a/test/functional/test_framework/crypto/bip324_cipher.py
+++ b/test/functional/test_framework/crypto/bip324_cipher.py
@@ -25,6 +25,8 @@ def pad16(x):
def aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext):
"""Encrypt a plaintext using ChaCha20Poly1305."""
+ if plaintext is None:
+ return None
ret = bytearray()
msg_len = len(plaintext)
for i in range((msg_len + 63) // 64):
@@ -42,7 +44,7 @@ def aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext):
def aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext):
"""Decrypt a ChaCha20Poly1305 ciphertext."""
- if len(ciphertext) < 16:
+ if ciphertext is None or len(ciphertext) < 16:
return None
msg_len = len(ciphertext) - 16
poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32])
@@ -191,11 +193,11 @@ class TestFrameworkAEAD(unittest.TestCase):
dec_aead = FSChaCha20Poly1305(key)
for _ in range(msg_idx):
- enc_aead.encrypt(b"", b"")
+ enc_aead.encrypt(b"", None)
ciphertext = enc_aead.encrypt(aad, plain)
self.assertEqual(hex_cipher, ciphertext.hex())
for _ in range(msg_idx):
- dec_aead.decrypt(b"", bytes(16))
+ dec_aead.decrypt(b"", None)
plaintext = dec_aead.decrypt(aad, ciphertext)
self.assertEqual(plain, plaintext)
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 1780678de1..4e496a9275 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -46,6 +46,7 @@ MAX_PROTOCOL_MESSAGE_LENGTH = 4000000 # Maximum length of incoming protocol mes
MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message
+NODE_NONE = 0
NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py
index 838f40fcaa..08d41fe97f 100644
--- a/test/functional/test_framework/netutil.py
+++ b/test/functional/test_framework/netutil.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
-Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
+Roughly based on https://web.archive.org/web/20190424172231/http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
@@ -158,3 +158,12 @@ def test_ipv6_local():
except socket.error:
have_ipv6 = False
return have_ipv6
+
+def test_unix_socket():
+ '''Return True if UNIX sockets are available on this platform.'''
+ try:
+ socket.AF_UNIX
+ except AttributeError:
+ return False
+ else:
+ return True
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index dc04696114..00bd1e4017 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -585,22 +585,22 @@ class P2PInterface(P2PConnection):
wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
- def wait_for_connect(self, timeout=60):
+ def wait_for_connect(self, *, timeout=60):
test_function = lambda: self.is_connected
self.wait_until(test_function, timeout=timeout, check_connected=False)
- def wait_for_disconnect(self, timeout=60):
+ def wait_for_disconnect(self, *, timeout=60):
test_function = lambda: not self.is_connected
self.wait_until(test_function, timeout=timeout, check_connected=False)
- def wait_for_reconnect(self, timeout=60):
+ def wait_for_reconnect(self, *, timeout=60):
def test_function():
return self.is_connected and self.last_message.get('version') and not self.supports_v2_p2p
self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
- def wait_for_tx(self, txid, timeout=60):
+ def wait_for_tx(self, txid, *, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
@@ -608,13 +608,13 @@ class P2PInterface(P2PConnection):
self.wait_until(test_function, timeout=timeout)
- def wait_for_block(self, blockhash, timeout=60):
+ def wait_for_block(self, blockhash, *, timeout=60):
def test_function():
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
- def wait_for_header(self, blockhash, timeout=60):
+ def wait_for_header(self, blockhash, *, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
@@ -623,7 +623,7 @@ class P2PInterface(P2PConnection):
self.wait_until(test_function, timeout=timeout)
- def wait_for_merkleblock(self, blockhash, timeout=60):
+ def wait_for_merkleblock(self, blockhash, *, timeout=60):
def test_function():
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
@@ -632,7 +632,7 @@ class P2PInterface(P2PConnection):
self.wait_until(test_function, timeout=timeout)
- def wait_for_getdata(self, hash_list, timeout=60):
+ def wait_for_getdata(self, hash_list, *, timeout=60):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
@@ -644,19 +644,21 @@ class P2PInterface(P2PConnection):
self.wait_until(test_function, timeout=timeout)
- def wait_for_getheaders(self, timeout=60):
- """Waits for a getheaders message.
+ def wait_for_getheaders(self, block_hash=None, *, timeout=60):
+ """Waits for a getheaders message containing a specific block hash.
- Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
- value must be explicitly cleared before calling this method, or this will return
- immediately with success. TODO: change this method to take a hash value and only
- return true if the correct block header has been requested."""
+ If no block hash is provided, checks whether any getheaders message has been received by the node."""
def test_function():
- return self.last_message.get("getheaders")
+ last_getheaders = self.last_message.pop("getheaders", None)
+ if block_hash is None:
+ return last_getheaders
+ if last_getheaders is None:
+ return False
+ return block_hash == last_getheaders.locator.vHave[0]
self.wait_until(test_function, timeout=timeout)
- def wait_for_inv(self, expected_inv, timeout=60):
+ def wait_for_inv(self, expected_inv, *, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
@@ -668,7 +670,7 @@ class P2PInterface(P2PConnection):
self.wait_until(test_function, timeout=timeout)
- def wait_for_verack(self, timeout=60):
+ def wait_for_verack(self, *, timeout=60):
def test_function():
return "verack" in self.last_message
@@ -681,11 +683,11 @@ class P2PInterface(P2PConnection):
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
- def send_and_ping(self, message, timeout=60):
+ def send_and_ping(self, message, *, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
- def sync_with_ping(self, timeout=60):
+ def sync_with_ping(self, *, timeout=60):
"""Ensure ProcessMessages and SendMessages is called on this connection"""
# Sending two pings back-to-back, requires that the node calls
# `ProcessMessage` twice, and thus ensures `SendMessages` must have
@@ -726,7 +728,7 @@ class NetworkThread(threading.Thread):
"""Start the network thread."""
self.network_event_loop.run_forever()
- def close(self, timeout=10):
+ def close(self, *, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout)
@@ -933,7 +935,7 @@ class P2PTxInvStore(P2PInterface):
with p2p_lock:
return list(self.tx_invs_received.keys())
- def wait_for_broadcast(self, txns, timeout=60):
+ def wait_for_broadcast(self, txns, *, timeout=60):
"""Waits for the txns (list of txids) to complete initial broadcast.
The mempool should mark unbroadcast=False for these transactions.
"""
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index 78d8580794..7b19d31e17 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -483,7 +483,7 @@ class CScript(bytes):
i = 0
while i < len(self):
sop_idx = i
- opcode = self[i]
+ opcode = CScriptOp(self[i])
i += 1
if opcode > OP_PUSHDATA4:
@@ -590,7 +590,7 @@ class CScript(bytes):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
- n += opcode.decode_op_n()
+ n += lastOpcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
@@ -747,7 +747,7 @@ def SegwitV0SignatureMsg(script, txTo, inIdx, hashtype, amount):
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
- ss += struct.pack("<i", txTo.nLockTime)
+ ss += txTo.nLockTime.to_bytes(4, "little")
ss += struct.pack("<I", hashtype)
return ss
@@ -782,6 +782,20 @@ class TestFrameworkScript(unittest.TestCase):
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
+ def test_legacy_sigopcount(self):
+ # test repeated single sig ops
+ for n_ops in range(1, 100, 10):
+ for singlesig_op in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
+ singlesigs_script = CScript([singlesig_op]*n_ops)
+ self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=False), n_ops)
+ self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=True), n_ops)
+ # test multisig op (including accurate counting, i.e. BIP16)
+ for n in range(1, 16+1):
+ for multisig_op in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
+ multisig_script = CScript([CScriptOp.encode_op_n(n), multisig_op])
+ self.assertEqual(multisig_script.GetSigOpCount(fAccurate=False), 20)
+ self.assertEqual(multisig_script.GetSigOpCount(fAccurate=True), n)
+
def BIP341_sha_prevouts(txTo):
return sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index d8ae20981d..a2f767cc98 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -96,6 +96,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain: str = 'regtest'
self.setup_clean_chain: bool = False
+ self.noban_tx_relay: bool = False
self.nodes: list[TestNode] = []
self.extra_args = None
self.network_thread = None
@@ -163,7 +164,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
- parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
+ parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs (must not exist)")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
@@ -191,6 +192,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
parser.add_argument("--timeout-factor", dest="timeout_factor", type=float, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts")
parser.add_argument("--v2transport", dest="v2transport", default=False, action="store_true",
help="use BIP324 v2 connections between all nodes by default")
+ parser.add_argument("--v1transport", dest="v1transport", default=False, action="store_true",
+ help="Explicitly use v1 transport (can be used to overwrite global --v2transport option)")
self.add_options(parser)
# Running TestShell in a Jupyter notebook causes an additional -f argument
@@ -206,6 +209,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
+ if self.options.v1transport:
+ self.options.v2transport=False
if "descriptors" not in self.options:
# Wallet is not required by the test at all and the value of self.options.descriptors won't matter.
@@ -494,6 +499,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
+ # Whitelist peers to speed up tx relay / mempool sync. Don't use it if testing tx relay or timing.
+ if self.noban_tx_relay:
+ for i in range(len(extra_args)):
+ extra_args[i] = extra_args[i] + ["-whitelist=noban,in,out@127.0.0.1"]
if versions is None:
versions = [None] * num_nodes
if binary is None:
@@ -577,10 +586,16 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# Wait for nodes to stop
node.wait_until_stopped()
- def restart_node(self, i, extra_args=None):
+ def restart_node(self, i, extra_args=None, clear_addrman=False):
"""Stop and start a test node"""
self.stop_node(i)
- self.start_node(i, extra_args)
+ if clear_addrman:
+ peers_dat = self.nodes[i].chain_path / "peers.dat"
+ os.remove(peers_dat)
+ with self.nodes[i].assert_debug_log(expected_msgs=[f'Creating peers.dat because the file was not found ("{peers_dat}")']):
+ self.start_node(i, extra_args)
+ else:
+ self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 838dcba141..67e0be5280 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -136,9 +136,7 @@ class TestNode():
self.args.append("-v2transport=1")
else:
self.args.append("-v2transport=0")
- else:
- # v2transport requested but not supported for node
- assert not v2transport
+ # if v2transport is requested via global flag but not supported for node version, ignore it
self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path)
self.use_cli = use_cli
@@ -667,7 +665,7 @@ class TestNode():
assert_msg += "with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
- def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=False, **kwargs):
+ def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, **kwargs):
"""Add an inbound p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
@@ -684,6 +682,9 @@ class TestNode():
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
+ if supports_v2_p2p is None:
+ supports_v2_p2p = self.use_v2transport
+
p2p_conn.p2p_connected_to_node = True
if self.use_v2transport:
@@ -693,6 +694,8 @@ class TestNode():
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
+ if supports_v2_p2p and wait_for_v2_handshake:
+ p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake)
if send_version:
p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg)
if wait_for_verack:
@@ -721,7 +724,7 @@ class TestNode():
return p2p_conn
- def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=False, advertise_v2_p2p=False, **kwargs):
+ def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, wait_for_disconnect=False, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs):
"""Add an outbound p2p connection from node. Must be an
"outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection.
@@ -749,6 +752,11 @@ class TestNode():
self.addconnection('%s:%d' % (address, port), connection_type, advertise_v2_p2p)
p2p_conn.p2p_connected_to_node = False
+ if supports_v2_p2p is None:
+ supports_v2_p2p = self.use_v2transport
+ if advertise_v2_p2p is None:
+ advertise_v2_p2p = self.use_v2transport
+
if advertise_v2_p2p:
kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2
assert self.use_v2transport # only a v2 TestNode could make a v2 outbound connection
@@ -763,7 +771,7 @@ class TestNode():
if reconnect:
p2p_conn.wait_for_reconnect()
- if connection_type == "feeler":
+ if connection_type == "feeler" or wait_for_disconnect:
# feeler connections are closed as soon as the node receives a `version` message
p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False)
p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False)
@@ -771,6 +779,8 @@ class TestNode():
p2p_conn.wait_for_connect()
self.p2ps.append(p2p_conn)
+ if supports_v2_p2p:
+ p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake)
p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg)
if wait_for_verack:
p2p_conn.wait_for_verack()
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index b4b05b1597..0de09b6440 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -52,7 +52,24 @@ def assert_fee_amount(fee, tx_size, feerate_BTC_kvB):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
+def summarise_dict_differences(thing1, thing2):
+ if not isinstance(thing1, dict) or not isinstance(thing2, dict):
+ return thing1, thing2
+ d1, d2 = {}, {}
+ for k in sorted(thing1.keys()):
+ if k not in thing2:
+ d1[k] = thing1[k]
+ elif thing1[k] != thing2[k]:
+ d1[k], d2[k] = summarise_dict_differences(thing1[k], thing2[k])
+ for k in sorted(thing2.keys()):
+ if k not in thing1:
+ d2[k] = thing2[k]
+ return d1, d2
+
def assert_equal(thing1, thing2, *args):
+ if thing1 != thing2 and not args and isinstance(thing1, dict) and isinstance(thing2, dict):
+ d1,d2 = summarise_dict_differences(thing1, thing2)
+ raise AssertionError("not(%s == %s)\n in particular not(%s == %s)" % (thing1, thing2, d1, d2))
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
@@ -479,6 +496,65 @@ def check_node_connections(*, node, num_in, num_out):
assert_equal(info["connections_in"], num_in)
assert_equal(info["connections_out"], num_out)
+def fill_mempool(test_framework, node, miniwallet):
+ """Fill mempool until eviction.
+
+ Allows for simpler testing of scenarios with floating mempoolminfee > minrelay
+ Requires -datacarriersize=100000 and
+ -maxmempool=5.
+ It will not ensure mempools become synced as it
+ is based on a single node and assumes -minrelaytxfee
+ is 1 sat/vbyte.
+ To avoid unintentional tx dependencies, it is recommended to use separate miniwallets for
+ mempool filling vs transactions in tests.
+ """
+ test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises")
+ txouts = gen_return_txouts()
+ relayfee = node.getnetworkinfo()['relayfee']
+
+ assert_equal(relayfee, Decimal('0.00001000'))
+
+ tx_batch_size = 1
+ num_of_batches = 75
+ # Generate UTXOs to flood the mempool
+ # 1 to create a tx initially that will be evicted from the mempool later
+ # 75 transactions each with a fee rate higher than the previous one
+ test_framework.generate(miniwallet, 1 + (num_of_batches * tx_batch_size))
+
+ # Mine COINBASE_MATURITY - 1 blocks so that the UTXOs are allowed to be spent
+ test_framework.generate(node, 100 - 1)
+
+ # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may
+ # change their effective feerate and thus the order in which they are selected for eviction.
+ confirmed_utxos = [miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)]
+ assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1)
+
+ test_framework.log.debug("Create a mempool tx that will be evicted")
+ tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, utxo_to_spend=confirmed_utxos[0], fee_rate=relayfee)["txid"]
+ del confirmed_utxos[0]
+
+ # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool
+ # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB)
+ # by 130 should result in a fee that corresponds to 2x of that fee rate
+ base_fee = relayfee * 130
+
+ test_framework.log.debug("Fill up the mempool with txs with higher fee rate")
+ with node.assert_debug_log(["rolling minimum fee bumped"]):
+ for batch_of_txid in range(num_of_batches):
+ fee = (batch_of_txid + 1) * base_fee
+ utxos = confirmed_utxos[:tx_batch_size]
+ create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts, utxos)
+ del confirmed_utxos[:tx_batch_size]
+
+ test_framework.log.debug("The tx should be evicted by now")
+ # The number of transactions created should be greater than the ones present in the mempool
+ assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool()))
+ # Initial tx created should not be present in the mempool anymore as it had a lower fee rate
+ assert tx_to_be_evicted_id not in node.getrawmempool()
+
+ test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee")
+ assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
+ assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
# Transaction/Block functions
#############################
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index 53c8e1b0cc..470ed08ed4 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -286,11 +286,12 @@ class MiniWallet:
utxos_to_spend: Optional[list[dict]] = None,
num_outputs=1,
amount_per_output=0,
+ version=2,
locktime=0,
sequence=0,
fee_per_output=1000,
target_weight=0,
- confirmed_only=False
+ confirmed_only=False,
):
"""
Create and return a transaction that spends the given UTXOs and creates a
@@ -313,6 +314,7 @@ class MiniWallet:
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=seq) for utxo_to_spend, seq in zip(utxos_to_spend, sequence)]
tx.vout = [CTxOut(amount_per_output, bytearray(self._scriptPubKey)) for _ in range(num_outputs)]
+ tx.nVersion = version
tx.nLockTime = locktime
self.sign_tx(tx)
@@ -337,14 +339,15 @@ class MiniWallet:
"tx": tx,
}
- def create_self_transfer(self, *,
+ def create_self_transfer(
+ self,
+ *,
fee_rate=Decimal("0.003"),
fee=Decimal("0"),
utxo_to_spend=None,
- locktime=0,
- sequence=0,
target_weight=0,
- confirmed_only=False
+ confirmed_only=False,
+ **kwargs,
):
"""Create and return a tx with the specified fee. If fee is 0, use fee_rate, where the resulting fee may be exact or at most one satoshi higher than needed."""
utxo_to_spend = utxo_to_spend or self.get_utxo(confirmed_only=confirmed_only)
@@ -360,7 +363,12 @@ class MiniWallet:
send_value = utxo_to_spend["value"] - (fee or (fee_rate * vsize / 1000))
# create tx
- tx = self.create_self_transfer_multi(utxos_to_spend=[utxo_to_spend], locktime=locktime, sequence=sequence, amount_per_output=int(COIN * send_value), target_weight=target_weight)
+ tx = self.create_self_transfer_multi(
+ utxos_to_spend=[utxo_to_spend],
+ amount_per_output=int(COIN * send_value),
+ target_weight=target_weight,
+ **kwargs,
+ )
if not target_weight:
assert_equal(tx["tx"].get_vsize(), vsize)
tx["new_utxo"] = tx.pop("new_utxos")[0]
diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py
index 44811918bf..2168e607b2 100755
--- a/test/functional/test_framework/wallet_util.py
+++ b/test/functional/test_framework/wallet_util.py
@@ -4,6 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
from collections import namedtuple
+import unittest
from test_framework.address import (
byte_to_base58,
@@ -15,6 +16,11 @@ from test_framework.address import (
script_to_p2wsh,
)
from test_framework.key import ECKey
+from test_framework.messages import (
+ CTxIn,
+ CTxInWitness,
+ WITNESS_SCALE_FACTOR,
+)
from test_framework.script_util import (
key_to_p2pkh_script,
key_to_p2wpkh_script,
@@ -123,6 +129,19 @@ def generate_keypair(compressed=True, wif=False):
privkey = bytes_to_wif(privkey.get_bytes(), compressed)
return privkey, pubkey
+def calculate_input_weight(scriptsig_hex, witness_stack_hex=None):
+ """Given a scriptSig and a list of witness stack items for an input in hex format,
+ calculate the total input weight. If the input has no witness data,
+ `witness_stack_hex` can be set to None."""
+ tx_in = CTxIn(scriptSig=bytes.fromhex(scriptsig_hex))
+ witness_size = 0
+ if witness_stack_hex is not None:
+ tx_inwit = CTxInWitness()
+ for witness_item_hex in witness_stack_hex:
+ tx_inwit.scriptWitness.stack.append(bytes.fromhex(witness_item_hex))
+ witness_size = len(tx_inwit.serialize())
+ return len(tx_in.serialize()) * WITNESS_SCALE_FACTOR + witness_size
+
class WalletUnlock():
"""
A context manager for unlocking a wallet with a passphrase and automatically locking it afterward.
@@ -141,3 +160,42 @@ class WalletUnlock():
def __exit__(self, *args):
_ = args
self.wallet.walletlock()
+
+
+class TestFrameworkWalletUtil(unittest.TestCase):
+ def test_calculate_input_weight(self):
+ SKELETON_BYTES = 32 + 4 + 4 # prevout-txid, prevout-index, sequence
+ SMALL_LEN_BYTES = 1 # bytes needed for encoding scriptSig / witness item lengths < 253
+ LARGE_LEN_BYTES = 3 # bytes needed for encoding scriptSig / witness item lengths >= 253
+
+ # empty scriptSig, no witness
+ self.assertEqual(calculate_input_weight(""),
+ (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR)
+ self.assertEqual(calculate_input_weight("", None),
+ (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR)
+ # small scriptSig, no witness
+ scriptSig_small = "00"*252
+ self.assertEqual(calculate_input_weight(scriptSig_small, None),
+ (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR)
+ # small scriptSig, empty witness stack
+ self.assertEqual(calculate_input_weight(scriptSig_small, []),
+ (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES)
+ # large scriptSig, no witness
+ scriptSig_large = "00"*253
+ self.assertEqual(calculate_input_weight(scriptSig_large, None),
+ (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR)
+ # large scriptSig, empty witness stack
+ self.assertEqual(calculate_input_weight(scriptSig_large, []),
+ (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES)
+ # empty scriptSig, 5 small witness stack items
+ self.assertEqual(calculate_input_weight("", ["00", "11", "22", "33", "44"]),
+ ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 5 * SMALL_LEN_BYTES + 5)
+ # empty scriptSig, 253 small witness stack items
+ self.assertEqual(calculate_input_weight("", ["00"]*253),
+ ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + LARGE_LEN_BYTES + 253 * SMALL_LEN_BYTES + 253)
+ # small scriptSig, 3 large witness stack items
+ self.assertEqual(calculate_input_weight(scriptSig_small, ["00"*253]*3),
+ ((SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253)
+ # large scriptSig, 3 large witness stack items
+ self.assertEqual(calculate_input_weight(scriptSig_large, ["00"*253]*3),
+ ((SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253)
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index d037ccf6dd..2b0b24ec05 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -26,7 +26,6 @@ import sys
import tempfile
import re
import logging
-import unittest
os.environ["REQUIRE_WALLET_TYPE_SET"] = "1"
@@ -70,22 +69,7 @@ if platform.system() != 'Windows' or sys.getwindowsversion() >= (10, 0, 14393):
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
-# List of framework modules containing unit tests. Should be kept in sync with
-# the output of `git grep unittest.TestCase ./test/functional/test_framework`
-TEST_FRAMEWORK_MODULES = [
- "address",
- "crypto.bip324_cipher",
- "blocktools",
- "crypto.chacha20",
- "crypto.ellswift",
- "key",
- "messages",
- "crypto.muhash",
- "crypto.poly1305",
- "crypto.ripemd160",
- "script",
- "segwit_addr",
-]
+TEST_FRAMEWORK_UNIT_TESTS = 'feature_framework_unit_tests.py'
EXTENDED_SCRIPTS = [
# These tests are not run by default.
@@ -120,7 +104,7 @@ BASE_SCRIPTS = [
'wallet_backup.py --legacy-wallet',
'wallet_backup.py --descriptors',
'feature_segwit.py --legacy-wallet',
- 'feature_segwit.py --descriptors',
+ 'feature_segwit.py --descriptors --v1transport',
'feature_segwit.py --descriptors --v2transport',
'p2p_tx_download.py',
'wallet_avoidreuse.py --legacy-wallet',
@@ -156,7 +140,7 @@ BASE_SCRIPTS = [
# vv Tests less than 30s vv
'p2p_invalid_messages.py',
'rpc_createmultisig.py',
- 'p2p_timeouts.py',
+ 'p2p_timeouts.py --v1transport',
'p2p_timeouts.py --v2transport',
'wallet_dump.py --legacy-wallet',
'rpc_signer.py',
@@ -181,6 +165,8 @@ BASE_SCRIPTS = [
'wallet_keypool_topup.py --legacy-wallet',
'wallet_keypool_topup.py --descriptors',
'wallet_fast_rescan.py --descriptors',
+ 'wallet_gethdkeys.py --descriptors',
+ 'wallet_createwalletdescriptor.py --descriptors',
'interface_zmq.py',
'rpc_invalid_address_message.py',
'rpc_validateaddress.py',
@@ -197,11 +183,13 @@ BASE_SCRIPTS = [
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
+ 'p2p_1p1c_network.py',
+ 'p2p_opportunistic_1p1c.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'wallet_avoid_mixing_output_types.py --descriptors',
'mempool_reorg.py',
- 'p2p_block_sync.py',
+ 'p2p_block_sync.py --v1transport',
'p2p_block_sync.py --v2transport',
'wallet_createwallet.py --legacy-wallet',
'wallet_createwallet.py --usecli',
@@ -230,13 +218,13 @@ BASE_SCRIPTS = [
'wallet_transactiontime_rescan.py --descriptors',
'wallet_transactiontime_rescan.py --legacy-wallet',
'p2p_addrv2_relay.py',
- 'p2p_compactblocks_hb.py',
+ 'p2p_compactblocks_hb.py --v1transport',
'p2p_compactblocks_hb.py --v2transport',
- 'p2p_disconnect_ban.py',
+ 'p2p_disconnect_ban.py --v1transport',
'p2p_disconnect_ban.py --v2transport',
'feature_posix_fs_permissions.py',
'rpc_decodescript.py',
- 'rpc_blockchain.py',
+ 'rpc_blockchain.py --v1transport',
'rpc_blockchain.py --v2transport',
'rpc_deprecated.py',
'wallet_disable.py',
@@ -246,26 +234,28 @@ BASE_SCRIPTS = [
'p2p_getaddr_caching.py',
'p2p_getdata.py',
'p2p_addrfetch.py',
- 'rpc_net.py',
+ 'rpc_net.py --v1transport',
'rpc_net.py --v2transport',
'wallet_keypool.py --legacy-wallet',
'wallet_keypool.py --descriptors',
'wallet_descriptor.py --descriptors',
'p2p_nobloomfilter_messages.py',
+ TEST_FRAMEWORK_UNIT_TESTS,
'p2p_filter.py',
- 'rpc_setban.py',
+ 'rpc_setban.py --v1transport',
'rpc_setban.py --v2transport',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
- 'p2p_invalid_block.py',
+ 'p2p_invalid_block.py --v1transport',
'p2p_invalid_block.py --v2transport',
- 'p2p_invalid_tx.py',
+ 'p2p_invalid_tx.py --v1transport',
'p2p_invalid_tx.py --v2transport',
'p2p_v2_transport.py',
'p2p_v2_encrypted.py',
'p2p_v2_earlykeyresponse.py',
'example_test.py',
+ 'mempool_accept_v3.py',
'wallet_txn_doublespend.py --legacy-wallet',
'wallet_multisig_descriptor_psbt.py --descriptors',
'wallet_txn_doublespend.py --descriptors',
@@ -285,12 +275,12 @@ BASE_SCRIPTS = [
'rpc_preciousblock.py',
'wallet_importprunedfunds.py --legacy-wallet',
'wallet_importprunedfunds.py --descriptors',
- 'p2p_leak_tx.py',
+ 'p2p_leak_tx.py --v1transport',
'p2p_leak_tx.py --v2transport',
'p2p_eviction.py',
- 'p2p_ibd_stalling.py',
+ 'p2p_ibd_stalling.py --v1transport',
'p2p_ibd_stalling.py --v2transport',
- 'p2p_net_deadlock.py',
+ 'p2p_net_deadlock.py --v1transport',
'p2p_net_deadlock.py --v2transport',
'wallet_signmessagewithaddress.py',
'rpc_signmessagewithprivkey.py',
@@ -307,6 +297,7 @@ BASE_SCRIPTS = [
'wallet_crosschain.py',
'mining_basic.py',
'feature_signet.py',
+ 'p2p_mutated_blocks.py',
'wallet_implicitsegwit.py --legacy-wallet',
'rpc_named_arguments.py',
'feature_startupnotify.py',
@@ -379,7 +370,7 @@ BASE_SCRIPTS = [
'feature_coinstatsindex.py',
'wallet_orphanedreward.py',
'wallet_timelock.py',
- 'p2p_node_network_limited.py',
+ 'p2p_node_network_limited.py --v1transport',
'p2p_node_network_limited.py --v2transport',
'p2p_permissions.py',
'feature_blocksdir.py',
@@ -393,6 +384,8 @@ BASE_SCRIPTS = [
'rpc_getdescriptorinfo.py',
'rpc_mempool_info.py',
'rpc_help.py',
+ 'p2p_handshake.py',
+ 'p2p_handshake.py --v2transport',
'feature_dirsymlinks.py',
'feature_help.py',
'feature_shutdown.py',
@@ -434,6 +427,7 @@ def main():
parser.add_argument('--failfast', '-F', action='store_true', help='stop execution after the first test failure')
parser.add_argument('--filter', help='filter scripts to run by regular expression')
+
args, unknown_args = parser.parse_known_args()
if not args.ansi:
global DEFAULT, BOLD, GREEN, RED
@@ -469,7 +463,7 @@ def main():
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
- sys.exit(0)
+ sys.exit(1)
# Build list of tests
test_list = []
@@ -518,7 +512,7 @@ def main():
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
- sys.exit(0)
+ sys.exit(1)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
@@ -562,21 +556,12 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
- # Test Framework Tests
- print("Running Unit Tests for Test Framework Modules")
tests_dir = src_dir + '/test/functional/'
# This allows `test_runner.py` to work from an out-of-source build directory using a symlink,
# a hard link or a copy on any platform. See https://github.com/bitcoin/bitcoin/pull/27561.
sys.path.append(tests_dir)
- test_framework_tests = unittest.TestSuite()
- for module in TEST_FRAMEWORK_MODULES:
- test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module)))
- result = unittest.TextTestRunner(verbosity=1, failfast=True).run(test_framework_tests)
- if not result.wasSuccessful():
- sys.exit("Early exiting after failure in TestFramework unit tests")
-
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
@@ -609,14 +594,12 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
all_passed = True
- i = 0
- while i < test_count:
+ while not job_queue.done():
if failfast and not all_passed:
break
for test_result, testdir, stdout, stderr, skip_reason in job_queue.get_next():
test_results.append(test_result)
- i += 1
- done_str = "{}/{} - {}{}{}".format(i, test_count, BOLD[1], test_result.name, BOLD[0])
+ done_str = f"{len(test_results)}/{test_count} - {BOLD[1]}{test_result.name}{BOLD[0]}"
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
@@ -701,14 +684,15 @@ class TestHandler:
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
- self.num_running = 0
self.jobs = []
self.use_term_control = use_term_control
+ def done(self):
+ return not (self.jobs or self.test_list)
+
def get_next(self):
- while self.num_running < self.num_jobs and self.test_list:
+ while len(self.jobs) < self.num_jobs and self.test_list:
# Add tests
- self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
@@ -752,7 +736,6 @@ class TestHandler:
skip_reason = re.search(r"Test Skipped: (.*)", stdout).group(1)
else:
status = "Failed"
- self.num_running -= 1
self.jobs.remove(job)
if self.use_term_control:
clearline = '\r' + (' ' * dot_count) + '\r'
diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py
index 2691507773..dda48aae1b 100755
--- a/test/functional/wallet_abandonconflict.py
+++ b/test/functional/wallet_abandonconflict.py
@@ -28,8 +28,7 @@ class AbandonConflictTest(BitcoinTestFramework):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
# whitelist peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
+ self.noban_tx_relay = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -232,7 +231,11 @@ class AbandonConflictTest(BitcoinTestFramework):
balance = newbalance
# Invalidate the block with the double spend. B & C's 10 BTC outputs should no longer be available
- self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ blk = self.nodes[0].getbestblockhash()
+ # mine 10 blocks so that when the blk is invalidated, the transactions are not
+ # returned to the mempool
+ self.generate(self.nodes[1], 10)
+ self.nodes[0].invalidateblock(blk)
assert_equal(alice.gettransaction(txAB1)["confirmations"], 0)
newbalance = alice.getbalance()
assert_equal(newbalance, balance - Decimal("20"))
diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py
index be5b3ebadb..6b27b32dea 100755
--- a/test/functional/wallet_address_types.py
+++ b/test/functional/wallet_address_types.py
@@ -79,9 +79,8 @@ class AddressTypeTest(BitcoinTestFramework):
["-changetype=p2sh-segwit"],
[],
]
- # whitelist all peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.supports_cli = False
def skip_test_if_missing_module(self):
diff --git a/test/functional/wallet_assumeutxo.py b/test/functional/wallet_assumeutxo.py
index 3c1a997bd1..30396da015 100755
--- a/test/functional/wallet_assumeutxo.py
+++ b/test/functional/wallet_assumeutxo.py
@@ -62,8 +62,6 @@ class AssumeutxoTest(BitcoinTestFramework):
for n in self.nodes:
n.setmocktime(n.getblockheader(n.getbestblockhash())['time'])
- self.sync_blocks()
-
n0.createwallet('w')
w = n0.get_wallet_rpc("w")
diff --git a/test/functional/wallet_avoid_mixing_output_types.py b/test/functional/wallet_avoid_mixing_output_types.py
index 861765f452..66fbf780e5 100755
--- a/test/functional/wallet_avoid_mixing_output_types.py
+++ b/test/functional/wallet_avoid_mixing_output_types.py
@@ -112,15 +112,15 @@ class AddressInputTypeGrouping(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
[
"-addresstype=bech32",
- "-whitelist=noban@127.0.0.1",
"-txindex",
],
[
"-addresstype=p2sh-segwit",
- "-whitelist=noban@127.0.0.1",
"-txindex",
],
]
diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py
index 9d3c55d6b6..4983bfda7f 100755
--- a/test/functional/wallet_avoidreuse.py
+++ b/test/functional/wallet_avoidreuse.py
@@ -69,9 +69,8 @@ class AvoidReuseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
- # This test isn't testing txn relay/timing, so set whitelist on the
- # peers for instant txn relay. This speeds up the test run time 2-3x.
- self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index eb3e0ae728..d03b08bcc4 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -50,13 +50,14 @@ class WalletBackupTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
- # nodes 1, 2,3 are spenders, let's give them a keypool=100
- # whitelist all peers to speed up tx relay / mempool sync
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
+ # nodes 1, 2, 3 are spenders, let's give them a keypool=100
self.extra_args = [
- ["-whitelist=noban@127.0.0.1", "-keypool=100"],
- ["-whitelist=noban@127.0.0.1", "-keypool=100"],
- ["-whitelist=noban@127.0.0.1", "-keypool=100"],
- ["-whitelist=noban@127.0.0.1"],
+ ["-keypool=100"],
+ ["-keypool=100"],
+ ["-keypool=100"],
+ [],
]
self.rpc_timeout = 120
diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py
index 4d6e6024c5..ab008a40cd 100755
--- a/test/functional/wallet_backwards_compatibility.py
+++ b/test/functional/wallet_backwards_compatibility.py
@@ -355,6 +355,25 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
down_wallet_name = f"re_down_{node.version}"
down_backup_path = os.path.join(self.options.tmpdir, f"{down_wallet_name}.dat")
wallet.backupwallet(down_backup_path)
+
+ # Check that taproot descriptors can be added to 0.21 wallets
+ # This must be done after the backup is created so that 0.21 can still load
+ # the backup
+ if self.options.descriptors and self.major_version_equals(node, 21):
+ assert_raises_rpc_error(-12, "No bech32m addresses available", wallet.getnewaddress, address_type="bech32m")
+ xpubs = wallet.gethdkeys(active_only=True)
+ assert_equal(len(xpubs), 1)
+ assert_equal(len(xpubs[0]["descriptors"]), 6)
+ wallet.createwalletdescriptor("bech32m")
+ xpubs = wallet.gethdkeys(active_only=True)
+ assert_equal(len(xpubs), 1)
+ assert_equal(len(xpubs[0]["descriptors"]), 8)
+ tr_descs = [desc["desc"] for desc in xpubs[0]["descriptors"] if desc["desc"].startswith("tr(")]
+ assert_equal(len(tr_descs), 2)
+ for desc in tr_descs:
+ assert info["hdmasterfingerprint"] in desc
+ wallet.getnewaddress(address_type="bech32m")
+
wallet.unloadwallet()
# Check that no automatic upgrade broke the downgrading the wallet
diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py
index af9270a321..c322ae52c1 100755
--- a/test/functional/wallet_balance.py
+++ b/test/functional/wallet_balance.py
@@ -53,15 +53,14 @@ class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
# Limit mempool descendants as a hack to have wallet txs rejected from the mempool.
# Set walletrejectlongchains=0 so the wallet still creates the transactions.
['-limitdescendantcount=3', '-walletrejectlongchains=0'],
[],
]
- # whitelist peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index 3daec4dbd1..1b2b8ec1f3 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -32,8 +32,10 @@ class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
- "-dustrelayfee=0", "-walletrejectlongchains=0", "-whitelist=noban@127.0.0.1"
+ "-dustrelayfee=0", "-walletrejectlongchains=0"
]] * self.num_nodes
self.setup_clean_chain = True
self.supports_cli = False
@@ -682,7 +684,7 @@ class WalletTest(BitcoinTestFramework):
"category": baz["category"],
"vout": baz["vout"]}
expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee',
- 'hex', 'lastprocessedblock', 'time', 'timereceived', 'trusted', 'txid', 'wtxid', 'walletconflicts'})
+ 'hex', 'lastprocessedblock', 'time', 'timereceived', 'trusted', 'txid', 'wtxid', 'walletconflicts', 'mempoolconflicts'})
verbose_field = "decoded"
expected_verbose_fields = expected_fields | {verbose_field}
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index fea933a93b..5b7db55f45 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -55,11 +55,12 @@ class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-addresstype=bech32",
- "-whitelist=noban@127.0.0.1",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py
index 802b718cd5..e5739a6a59 100755
--- a/test/functional/wallet_conflicts.py
+++ b/test/functional/wallet_conflicts.py
@@ -9,6 +9,7 @@ Test that wallet correctly tracks transactions that have been conflicted by bloc
from decimal import Decimal
+from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -28,6 +29,20 @@ class TxConflicts(BitcoinTestFramework):
return next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(from_tx_id)["details"] if tx_out["amount"] == Decimal(f"{search_value}"))
def run_test(self):
+ """
+ The following tests check the behavior of the wallet when
+ transaction conflicts are created. These conflicts are created
+ using raw transaction RPCs that double-spend UTXOs and have more
+ fees, replacing the original transaction.
+ """
+
+ self.test_block_conflicts()
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 7, self.nodes[2].getnewaddress())
+ self.test_mempool_conflict()
+ self.test_mempool_and_block_conflicts()
+ self.test_descendants_with_mempool_conflicts()
+
+ def test_block_conflicts(self):
self.log.info("Send tx from which to conflict outputs later")
txid_conflict_from_1 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txid_conflict_from_2 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
@@ -123,5 +138,291 @@ class TxConflicts(BitcoinTestFramework):
assert_equal(former_conflicted["confirmations"], 1)
assert_equal(former_conflicted["blockheight"], 217)
+ def test_mempool_conflict(self):
+ self.nodes[0].createwallet("alice")
+ alice = self.nodes[0].get_wallet_rpc("alice")
+
+ bob = self.nodes[1]
+
+ self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(3)])
+ self.generate(self.nodes[2], 1)
+
+ self.log.info("Test a scenario where a transaction has a mempool conflict")
+
+ unspents = alice.listunspent()
+ assert_equal(len(unspents), 3)
+ assert all([tx["amount"] == 25 for tx in unspents])
+
+ # tx1 spends unspent[0] and unspent[1]
+ raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[1]], outputs=[{bob.getnewaddress() : 49.9999}])
+ tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ # tx2 spends unspent[1] and unspent[2], conflicts with tx1
+ raw_tx = alice.createrawtransaction(inputs=[unspents[1], unspents[2]], outputs=[{bob.getnewaddress() : 49.99}])
+ tx2 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ # tx3 spends unspent[2], conflicts with tx2
+ raw_tx = alice.createrawtransaction(inputs=[unspents[2]], outputs=[{bob.getnewaddress() : 24.9899}])
+ tx3 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ # broadcast tx1
+ tx1_txid = alice.sendrawtransaction(tx1)
+
+ assert_equal(alice.listunspent(), [unspents[2]])
+ assert_equal(alice.getbalance(), 25)
+
+ # broadcast tx2, replaces tx1 in mempool
+ tx2_txid = alice.sendrawtransaction(tx2)
+
+ # Check that unspent[0] is now available because the transaction spending it has been replaced in the mempool
+ assert_equal(alice.listunspent(), [unspents[0]])
+ assert_equal(alice.getbalance(), 25)
+
+ assert_equal(alice.gettransaction(tx1_txid)["mempoolconflicts"], [tx2_txid])
+
+ self.log.info("Test scenario where a mempool conflict is removed")
+
+ # broadcast tx3, replaces tx2 in mempool
+ # Now that tx1's conflict has been removed, tx1 is now
+ # not conflicted, and instead is inactive until it is
+ # rebroadcasted. Now unspent[0] is not available, because
+ # tx1 is no longer conflicted.
+ alice.sendrawtransaction(tx3)
+
+ assert_equal(alice.gettransaction(tx1_txid)["mempoolconflicts"], [])
+ assert tx1_txid not in self.nodes[0].getrawmempool()
+
+ # now all of alice's outputs should be considered spent
+ # unspent[0]: spent by inactive tx1
+ # unspent[1]: spent by inactive tx1
+ # unspent[2]: spent by active tx3
+ assert_equal(alice.listunspent(), [])
+ assert_equal(alice.getbalance(), 0)
+
+ # Clean up for next test
+ bob.sendall([self.nodes[2].getnewaddress()])
+ self.generate(self.nodes[2], 1)
+
+ alice.unloadwallet()
+
+ def test_mempool_and_block_conflicts(self):
+ self.nodes[0].createwallet("alice_2")
+ alice = self.nodes[0].get_wallet_rpc("alice_2")
+ bob = self.nodes[1]
+
+ self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(3)])
+ self.generate(self.nodes[2], 1)
+
+ self.log.info("Test a scenario where a transaction has both a block conflict and a mempool conflict")
+ unspents = [{"txid" : element["txid"], "vout" : element["vout"]} for element in alice.listunspent()]
+
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+
+ # alice and bob nodes are disconnected so that transactions can be
+ # created by alice, but broadcasted from bob so that alice's wallet
+ # doesn't know about them
+ self.disconnect_nodes(0, 1)
+
+ # Sends funds to bob
+ raw_tx = alice.createrawtransaction(inputs=[unspents[0]], outputs=[{bob.getnewaddress() : 24.99999}])
+ raw_tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+ tx1_txid = bob.sendrawtransaction(raw_tx1) # broadcast original tx spending unspents[0] only to bob
+
+ # create a conflict to previous tx (also spends unspents[0]), but don't broadcast, sends funds back to alice
+ raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[2]], outputs=[{alice.getnewaddress() : 49.999}])
+ tx1_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ # Sends funds to bob
+ raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{bob.getnewaddress() : 24.9999}])
+ raw_tx2 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+ tx2_txid = bob.sendrawtransaction(raw_tx2) # broadcast another original tx spending unspents[1] only to bob
+
+ # create a conflict to previous tx (also spends unspents[1]), but don't broadcast, sends funds to alice
+ raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{alice.getnewaddress() : 24.9999}])
+ tx2_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ bob_unspents = [{"txid" : element, "vout" : 0} for element in [tx1_txid, tx2_txid]]
+
+ # tx1 and tx2 are now in bob's mempool, and they are unconflicted, so bob has these funds
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99989000"))
+
+ # spend both of bob's unspents, child tx of tx1 and tx2
+ raw_tx = bob.createrawtransaction(inputs=[bob_unspents[0], bob_unspents[1]], outputs=[{bob.getnewaddress() : 49.999}])
+ raw_tx3 = bob.signrawtransactionwithwallet(raw_tx)['hex']
+ tx3_txid = bob.sendrawtransaction(raw_tx3) # broadcast tx only to bob
+
+ # alice knows about 0 txs, bob knows about 3
+ assert_equal(len(alice.getrawmempool()), 0)
+ assert_equal(len(bob.getrawmempool()), 3)
+
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99900000"))
+
+ # bob broadcasts tx_1 conflict
+ tx1_conflict_txid = bob.sendrawtransaction(tx1_conflict)
+ assert_equal(len(alice.getrawmempool()), 0)
+ assert_equal(len(bob.getrawmempool()), 2) # tx1_conflict kicks out both tx1, and its child tx3
+
+ assert tx2_txid in bob.getrawmempool()
+ assert tx1_conflict_txid in bob.getrawmempool()
+
+ assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [tx1_conflict_txid])
+ assert_equal(bob.gettransaction(tx2_txid)["mempoolconflicts"], [])
+ assert_equal(bob.gettransaction(tx3_txid)["mempoolconflicts"], [tx1_conflict_txid])
+
+ # check that tx3 is now conflicted, so the output from tx2 can now be spent
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000"))
+
+ # we will be disconnecting this block in the future
+ alice.sendrawtransaction(tx2_conflict)
+ assert_equal(len(alice.getrawmempool()), 1) # currently alice's mempool is only aware of tx2_conflict
+ # 11 blocks are mined so that when they are invalidated, tx_2
+ # does not get put back into the mempool
+ blk = self.generate(self.nodes[0], 11, sync_fun=self.no_op)[0]
+ assert_equal(len(alice.getrawmempool()), 0) # tx2_conflict is now mined
+
+ self.connect_nodes(0, 1)
+ self.sync_blocks()
+ assert_equal(alice.getbestblockhash(), bob.getbestblockhash())
+
+ # now that tx2 has a block conflict, tx1_conflict should be the only tx in bob's mempool
+ assert tx1_conflict_txid in bob.getrawmempool()
+ assert_equal(len(bob.getrawmempool()), 1)
+
+ # tx3 should now also be block-conflicted by tx2_conflict
+ assert_equal(bob.gettransaction(tx3_txid)["confirmations"], -11)
+ # bob has no pending funds, since tx1, tx2, and tx3 are all conflicted
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+ bob.invalidateblock(blk) # remove tx2_conflict
+ # bob should still have no pending funds because tx1 and tx3 are still conflicted, and tx2 has not been re-broadcast
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+ assert_equal(len(bob.getrawmempool()), 1)
+ # check that tx3 is no longer block-conflicted
+ assert_equal(bob.gettransaction(tx3_txid)["confirmations"], 0)
+
+ bob.sendrawtransaction(raw_tx2)
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000"))
+
+ # create a conflict to previous tx (also spends unspents[2]), but don't broadcast, sends funds back to alice
+ raw_tx = alice.createrawtransaction(inputs=[unspents[2]], outputs=[{alice.getnewaddress() : 24.99}])
+ tx1_conflict_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex']
+
+ bob.sendrawtransaction(tx1_conflict_conflict) # kick tx1_conflict out of the mempool
+ bob.sendrawtransaction(raw_tx1) #re-broadcast tx1 because it is no longer conflicted
+
+ # Now bob has no pending funds because tx1 and tx2 are spent by tx3, which hasn't been re-broadcast yet
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+
+ bob.sendrawtransaction(raw_tx3)
+ assert_equal(len(bob.getrawmempool()), 4) # The mempool contains: tx1, tx2, tx1_conflict_conflict, tx3
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99900000"))
+
+ # Clean up for next test
+ bob.reconsiderblock(blk)
+ assert_equal(alice.getbestblockhash(), bob.getbestblockhash())
+ self.sync_mempools()
+ self.generate(self.nodes[2], 1)
+
+ alice.unloadwallet()
+
+ def test_descendants_with_mempool_conflicts(self):
+ self.nodes[0].createwallet("alice_3")
+ alice = self.nodes[0].get_wallet_rpc("alice_3")
+
+ self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(2)])
+ self.generate(self.nodes[2], 1)
+
+ self.nodes[1].createwallet("bob_1")
+ bob = self.nodes[1].get_wallet_rpc("bob_1")
+
+ self.nodes[2].createwallet("carol")
+ carol = self.nodes[2].get_wallet_rpc("carol")
+
+ self.log.info("Test a scenario where a transaction's parent has a mempool conflict")
+
+ unspents = alice.listunspent()
+ assert_equal(len(unspents), 2)
+ assert all([tx["amount"] == 25 for tx in unspents])
+
+ assert_equal(alice.getrawmempool(), [])
+
+ # Alice spends first utxo to bob in tx1
+ raw_tx = alice.createrawtransaction(inputs=[unspents[0]], outputs=[{bob.getnewaddress() : 24.9999}])
+ tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex']
+ tx1_txid = alice.sendrawtransaction(tx1)
+
+ self.sync_mempools()
+
+ assert_equal(alice.getbalance(), 25)
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000"))
+
+ assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [])
+
+ raw_tx = bob.createrawtransaction(inputs=[bob.listunspent(minconf=0)[0]], outputs=[{carol.getnewaddress() : 24.999}])
+ # Bob creates a child to tx1
+ tx1_child = bob.signrawtransactionwithwallet(raw_tx)['hex']
+ tx1_child_txid = bob.sendrawtransaction(tx1_child)
+
+ self.sync_mempools()
+
+ # Currently neither tx1 nor tx1_child should have any conflicts
+ assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [])
+ assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], [])
+ assert tx1_txid in bob.getrawmempool()
+ assert tx1_child_txid in bob.getrawmempool()
+ assert_equal(len(bob.getrawmempool()), 2)
+
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+ assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("24.99900000"))
+
+ # Alice spends first unspent again, conflicting with tx1
+ raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[1]], outputs=[{carol.getnewaddress() : 49.99}])
+ tx1_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex']
+ tx1_conflict_txid = alice.sendrawtransaction(tx1_conflict)
+
+ self.sync_mempools()
+
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+ assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("49.99000000"))
+
+ assert tx1_txid not in bob.getrawmempool()
+ assert tx1_child_txid not in bob.getrawmempool()
+ assert tx1_conflict_txid in bob.getrawmempool()
+ assert_equal(len(bob.getrawmempool()), 1)
+
+ # Now both tx1 and tx1_child are conflicted by tx1_conflict
+ assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [tx1_conflict_txid])
+ assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], [tx1_conflict_txid])
+
+ # Now create a conflict to tx1_conflict, so that it gets kicked out of the mempool
+ raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{carol.getnewaddress() : 24.9895}])
+ tx1_conflict_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex']
+ tx1_conflict_conflict_txid = alice.sendrawtransaction(tx1_conflict_conflict)
+
+ self.sync_mempools()
+
+ # Now that tx1_conflict has been removed, both tx1 and tx1_child
+ assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [])
+ assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], [])
+
+ # Both tx1 and tx1_child are still not in the mempool because they have not be re-broadcasted
+ assert tx1_txid not in bob.getrawmempool()
+ assert tx1_child_txid not in bob.getrawmempool()
+ assert tx1_conflict_txid not in bob.getrawmempool()
+ assert tx1_conflict_conflict_txid in bob.getrawmempool()
+ assert_equal(len(bob.getrawmempool()), 1)
+
+ assert_equal(alice.getbalance(), 0)
+ assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0)
+ assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("24.98950000"))
+
+ # Both tx1 and tx1_child can now be re-broadcasted
+ bob.sendrawtransaction(tx1)
+ bob.sendrawtransaction(tx1_child)
+ assert_equal(len(bob.getrawmempool()), 3)
+
+ alice.unloadwallet()
+ bob.unloadwallet()
+ carol.unloadwallet()
+
if __name__ == '__main__':
TxConflicts().main()
diff --git a/test/functional/wallet_createwalletdescriptor.py b/test/functional/wallet_createwalletdescriptor.py
new file mode 100755
index 0000000000..18e1703da3
--- /dev/null
+++ b/test/functional/wallet_createwalletdescriptor.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python3
+# Copyright (c) 2023 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test wallet createwalletdescriptor RPC."""
+
+from test_framework.descriptors import descsum_create
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
+from test_framework.wallet_util import WalletUnlock
+
+
+class WalletCreateDescriptorTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ self.add_wallet_options(parser, descriptors=True, legacy=False)
+
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def run_test(self):
+ self.test_basic()
+ self.test_imported_other_keys()
+ self.test_encrypted()
+
+ def test_basic(self):
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("blank", blank=True)
+ wallet = self.nodes[0].get_wallet_rpc("blank")
+
+ xpub_info = def_wallet.gethdkeys(private=True)
+ xpub = xpub_info[0]["xpub"]
+ xprv = xpub_info[0]["xprv"]
+ expected_descs = []
+ for desc in def_wallet.listdescriptors()["descriptors"]:
+ if desc["desc"].startswith("wpkh("):
+ expected_descs.append(desc["desc"])
+
+ assert_raises_rpc_error(-5, "Unable to determine which HD key to use from active descriptors. Please specify with 'hdkey'", wallet.createwalletdescriptor, "bech32")
+ assert_raises_rpc_error(-5, f"Private key for {xpub} is not known", wallet.createwalletdescriptor, type="bech32", hdkey=xpub)
+
+ self.log.info("Test createwalletdescriptor after importing active descriptor to blank wallet")
+ # Import one active descriptor
+ assert_equal(wallet.importdescriptors([{"desc": descsum_create(f"pkh({xprv}/44h/2h/0h/0/0/*)"), "timestamp": "now", "active": True}])[0]["success"], True)
+ assert_equal(len(wallet.listdescriptors()["descriptors"]), 1)
+ assert_equal(len(wallet.gethdkeys()), 1)
+
+ new_descs = wallet.createwalletdescriptor("bech32")["descs"]
+ assert_equal(len(new_descs), 2)
+ assert_equal(len(wallet.gethdkeys()), 1)
+ assert_equal(new_descs, expected_descs)
+
+ self.log.info("Test descriptor creation options")
+ old_descs = set([(d["desc"], d["active"], d["internal"]) for d in wallet.listdescriptors(private=True)["descriptors"]])
+ wallet.createwalletdescriptor(type="bech32m", internal=False)
+ curr_descs = set([(d["desc"], d["active"], d["internal"]) for d in wallet.listdescriptors(private=True)["descriptors"]])
+ new_descs = list(curr_descs - old_descs)
+ assert_equal(len(new_descs), 1)
+ assert_equal(len(wallet.gethdkeys()), 1)
+ assert_equal(new_descs[0][0], descsum_create(f"tr({xprv}/86h/1h/0h/0/*)"))
+ assert_equal(new_descs[0][1], True)
+ assert_equal(new_descs[0][2], False)
+
+ old_descs = curr_descs
+ wallet.createwalletdescriptor(type="bech32m", internal=True)
+ curr_descs = set([(d["desc"], d["active"], d["internal"]) for d in wallet.listdescriptors(private=True)["descriptors"]])
+ new_descs = list(curr_descs - old_descs)
+ assert_equal(len(new_descs), 1)
+ assert_equal(len(wallet.gethdkeys()), 1)
+ assert_equal(new_descs[0][0], descsum_create(f"tr({xprv}/86h/1h/0h/1/*)"))
+ assert_equal(new_descs[0][1], True)
+ assert_equal(new_descs[0][2], True)
+
+ def test_imported_other_keys(self):
+ self.log.info("Test createwalletdescriptor with multiple keys in active descriptors")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("multiple_keys")
+ wallet = self.nodes[0].get_wallet_rpc("multiple_keys")
+
+ wallet_xpub = wallet.gethdkeys()[0]["xpub"]
+
+ xpub_info = def_wallet.gethdkeys(private=True)
+ xpub = xpub_info[0]["xpub"]
+ xprv = xpub_info[0]["xprv"]
+
+ assert_equal(wallet.importdescriptors([{"desc": descsum_create(f"wpkh({xprv}/0/0/*)"), "timestamp": "now", "active": True}])[0]["success"], True)
+ assert_equal(len(wallet.gethdkeys()), 2)
+
+ assert_raises_rpc_error(-5, "Unable to determine which HD key to use from active descriptors. Please specify with 'hdkey'", wallet.createwalletdescriptor, "bech32")
+ assert_raises_rpc_error(-4, "Descriptor already exists", wallet.createwalletdescriptor, type="bech32m", hdkey=wallet_xpub)
+ assert_raises_rpc_error(-5, "Unable to parse HD key. Please provide a valid xpub", wallet.createwalletdescriptor, type="bech32m", hdkey=xprv)
+
+ # Able to replace tr() descriptor with other hd key
+ wallet.createwalletdescriptor(type="bech32m", hdkey=xpub)
+
+ def test_encrypted(self):
+ self.log.info("Test createwalletdescriptor with encrypted wallets")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("encrypted", blank=True, passphrase="pass")
+ wallet = self.nodes[0].get_wallet_rpc("encrypted")
+
+ xpub_info = def_wallet.gethdkeys(private=True)
+ xprv = xpub_info[0]["xprv"]
+
+ with WalletUnlock(wallet, "pass"):
+ assert_equal(wallet.importdescriptors([{"desc": descsum_create(f"wpkh({xprv}/0/0/*)"), "timestamp": "now", "active": True}])[0]["success"], True)
+ assert_equal(len(wallet.gethdkeys()), 1)
+
+ assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wallet.createwalletdescriptor, type="bech32m")
+
+ with WalletUnlock(wallet, "pass"):
+ wallet.createwalletdescriptor(type="bech32m")
+
+
+
+if __name__ == '__main__':
+ WalletCreateDescriptorTest().main()
diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py
index 083da3d0b7..71c883f166 100755
--- a/test/functional/wallet_fundrawtransaction.py
+++ b/test/functional/wallet_fundrawtransaction.py
@@ -45,9 +45,8 @@ class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
- # This test isn't testing tx relay. Set whitelist on the peers for
- # instant tx relay.
- self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.rpc_timeout = 90 # to prevent timeouts in `test_transaction_too_large`
def skip_test_if_missing_module(self):
diff --git a/test/functional/wallet_gethdkeys.py b/test/functional/wallet_gethdkeys.py
new file mode 100755
index 0000000000..f09b8c875a
--- /dev/null
+++ b/test/functional/wallet_gethdkeys.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+# Copyright (c) 2023 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test wallet gethdkeys RPC."""
+
+from test_framework.descriptors import descsum_create
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
+from test_framework.wallet_util import WalletUnlock
+
+
+class WalletGetHDKeyTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ self.add_wallet_options(parser, descriptors=True, legacy=False)
+
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def run_test(self):
+ self.test_basic_gethdkeys()
+ self.test_ranged_imports()
+ self.test_lone_key_imports()
+ self.test_ranged_multisig()
+ self.test_mixed_multisig()
+
+ def test_basic_gethdkeys(self):
+ self.log.info("Test gethdkeys basics")
+ self.nodes[0].createwallet("basic")
+ wallet = self.nodes[0].get_wallet_rpc("basic")
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 1)
+ assert_equal(xpub_info[0]["has_private"], True)
+
+ assert "xprv" not in xpub_info[0]
+ xpub = xpub_info[0]["xpub"]
+
+ xpub_info = wallet.gethdkeys(private=True)
+ xprv = xpub_info[0]["xprv"]
+ assert_equal(xpub_info[0]["xpub"], xpub)
+ assert_equal(xpub_info[0]["has_private"], True)
+
+ descs = wallet.listdescriptors(True)
+ for desc in descs["descriptors"]:
+ assert xprv in desc["desc"]
+
+ self.log.info("HD pubkey can be retrieved from encrypted wallets")
+ prev_xprv = xprv
+ wallet.encryptwallet("pass")
+ # HD key is rotated on encryption, there should now be 2 HD keys
+ assert_equal(len(wallet.gethdkeys()), 2)
+ # New key is active, should be able to get only that one and its descriptors
+ xpub_info = wallet.gethdkeys(active_only=True)
+ assert_equal(len(xpub_info), 1)
+ assert xpub_info[0]["xpub"] != xpub
+ assert "xprv" not in xpub_info[0]
+ assert_equal(xpub_info[0]["has_private"], True)
+
+ self.log.info("HD privkey can be retrieved from encrypted wallets")
+ assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first", wallet.gethdkeys, private=True)
+ with WalletUnlock(wallet, "pass"):
+ xpub_info = wallet.gethdkeys(active_only=True, private=True)[0]
+ assert xpub_info["xprv"] != xprv
+ for desc in wallet.listdescriptors(True)["descriptors"]:
+ if desc["active"]:
+ # After encrypting, HD key was rotated and should appear in all active descriptors
+ assert xpub_info["xprv"] in desc["desc"]
+ else:
+ # Inactive descriptors should have the previous HD key
+ assert prev_xprv in desc["desc"]
+
+ def test_ranged_imports(self):
+ self.log.info("Keys of imported ranged descriptors appear in gethdkeys")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("imports")
+ wallet = self.nodes[0].get_wallet_rpc("imports")
+
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 1)
+ active_xpub = xpub_info[0]["xpub"]
+
+ import_xpub = def_wallet.gethdkeys(active_only=True)[0]["xpub"]
+ desc_import = def_wallet.listdescriptors(True)["descriptors"]
+ for desc in desc_import:
+ desc["active"] = False
+ wallet.importdescriptors(desc_import)
+ assert_equal(wallet.gethdkeys(active_only=True), xpub_info)
+
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 2)
+ for x in xpub_info:
+ if x["xpub"] == active_xpub:
+ for desc in x["descriptors"]:
+ assert_equal(desc["active"], True)
+ elif x["xpub"] == import_xpub:
+ for desc in x["descriptors"]:
+ assert_equal(desc["active"], False)
+ else:
+ assert False
+
+
+ def test_lone_key_imports(self):
+ self.log.info("Non-HD keys do not appear in gethdkeys")
+ self.nodes[0].createwallet("lonekey", blank=True)
+ wallet = self.nodes[0].get_wallet_rpc("lonekey")
+
+ assert_equal(wallet.gethdkeys(), [])
+ wallet.importdescriptors([{"desc": descsum_create("wpkh(cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh)"), "timestamp": "now"}])
+ assert_equal(wallet.gethdkeys(), [])
+
+ self.log.info("HD keys of non-ranged descriptors should appear in gethdkeys")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ xpub_info = def_wallet.gethdkeys(private=True)
+ xpub = xpub_info[0]["xpub"]
+ xprv = xpub_info[0]["xprv"]
+ prv_desc = descsum_create(f"wpkh({xprv})")
+ pub_desc = descsum_create(f"wpkh({xpub})")
+ assert_equal(wallet.importdescriptors([{"desc": prv_desc, "timestamp": "now"}])[0]["success"], True)
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 1)
+ assert_equal(xpub_info[0]["xpub"], xpub)
+ assert_equal(len(xpub_info[0]["descriptors"]), 1)
+ assert_equal(xpub_info[0]["descriptors"][0]["desc"], pub_desc)
+ assert_equal(xpub_info[0]["descriptors"][0]["active"], False)
+
+ def test_ranged_multisig(self):
+ self.log.info("HD keys of a multisig appear in gethdkeys")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("ranged_multisig")
+ wallet = self.nodes[0].get_wallet_rpc("ranged_multisig")
+
+ xpub1 = wallet.gethdkeys()[0]["xpub"]
+ xprv1 = wallet.gethdkeys(private=True)[0]["xprv"]
+ xpub2 = def_wallet.gethdkeys()[0]["xpub"]
+
+ prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv1}/*,{xpub2}/*))")
+ pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub1}/*,{xpub2}/*))")
+ assert_equal(wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}])[0]["success"], True)
+
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 2)
+ for x in xpub_info:
+ if x["xpub"] == xpub1:
+ found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None)
+ assert found_desc is not None
+ assert_equal(found_desc["active"], False)
+ elif x["xpub"] == xpub2:
+ assert_equal(len(x["descriptors"]), 1)
+ assert_equal(x["descriptors"][0]["desc"], pub_multi_desc)
+ assert_equal(x["descriptors"][0]["active"], False)
+ else:
+ assert False
+
+ def test_mixed_multisig(self):
+ self.log.info("Non-HD keys of a multisig do not appear in gethdkeys")
+ def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet("single_multisig")
+ wallet = self.nodes[0].get_wallet_rpc("single_multisig")
+
+ xpub = wallet.gethdkeys()[0]["xpub"]
+ xprv = wallet.gethdkeys(private=True)[0]["xprv"]
+ pub = def_wallet.getaddressinfo(def_wallet.getnewaddress())["pubkey"]
+
+ prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv},{pub}))")
+ pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub},{pub}))")
+ import_res = wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}])
+ assert_equal(import_res[0]["success"], True)
+
+ xpub_info = wallet.gethdkeys()
+ assert_equal(len(xpub_info), 1)
+ assert_equal(xpub_info[0]["xpub"], xpub)
+ found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None)
+ assert found_desc is not None
+ assert_equal(found_desc["active"], False)
+
+
+if __name__ == '__main__':
+ WalletGetHDKeyTest().main()
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index bdb9081261..26477131cf 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -22,6 +22,8 @@ class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 5
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
[],
[],
@@ -31,7 +33,6 @@ class WalletGroupTest(BitcoinTestFramework):
]
for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1") # whitelist peers to speed up tx relay / mempool sync
args.append(f"-paytxfee={20 * 1e3 / 1e8}") # apply feerate of 20 sats/vB across all nodes
self.rpc_timeout = 480
@@ -41,11 +42,6 @@ class WalletGroupTest(BitcoinTestFramework):
def run_test(self):
self.log.info("Setting up")
- # To take full use of immediate tx relay, all nodes need to be reachable
- # via inbound peers, i.e. connect first to last to close the circle
- # (the default test network topology looks like this:
- # node0 <-- node1 <-- node2 <-- node3 <-- node4 <-- node5)
- self.connect_nodes(0, self.num_nodes - 1)
# Mine some coins
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py
index 0f4b7cfcb1..52161043ea 100755
--- a/test/functional/wallet_hd.py
+++ b/test/functional/wallet_hd.py
@@ -23,8 +23,7 @@ class WalletHDTest(BitcoinTestFramework):
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
# whitelist peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
+ self.noban_tx_relay = True
self.supports_cli = False
diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py
index e647fb2d5c..2a9435b370 100755
--- a/test/functional/wallet_import_rescan.py
+++ b/test/functional/wallet_import_rescan.py
@@ -160,6 +160,8 @@ class ImportRescanTest(BitcoinTestFramework):
self.num_nodes = 2 + len(IMPORT_NODES)
self.supports_cli = False
self.rpc_timeout = 120
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -177,7 +179,7 @@ class ImportRescanTest(BitcoinTestFramework):
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
- self.start_nodes(extra_args=[["-whitelist=noban@127.0.0.1"]] * self.num_nodes)
+ self.start_nodes()
for i in range(1, self.num_nodes):
self.connect_nodes(i, 0)
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index 1f1f92589c..f9d05a2fe4 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -36,12 +36,11 @@ class ImportDescriptorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [["-addresstype=legacy"],
["-addresstype=bech32", "-keypool=5"]
]
- # whitelist peers to speed up tx relay / mempool sync
- for args in self.extra_args:
- args.append("-whitelist=noban@127.0.0.1")
self.setup_clean_chain = True
self.wallet_names = []
@@ -689,7 +688,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
encrypted_wallet.walletpassphrase("passphrase", 99999)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread:
- with self.nodes[0].assert_debug_log(expected_msgs=["Rescan started from block 0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206... (slow variant inspecting all blocks)"], timeout=5):
+ with self.nodes[0].assert_debug_log(expected_msgs=["Rescan started from block 0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206... (slow variant inspecting all blocks)"], timeout=10):
importing = thread.submit(encrypted_wallet.importdescriptors, requests=[descriptor])
# Set the passphrase timeout to 1 to test that the wallet remains unlocked during the rescan
diff --git a/test/functional/wallet_importprunedfunds.py b/test/functional/wallet_importprunedfunds.py
index 5fe7c4b591..b3ae22cc44 100755
--- a/test/functional/wallet_importprunedfunds.py
+++ b/test/functional/wallet_importprunedfunds.py
@@ -120,7 +120,7 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
assert_equal(address_info['ismine'], True)
# Remove transactions
- assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", w1.removeprunedfunds, txnid1)
+ assert_raises_rpc_error(-4, f'Transaction {txnid1} does not belong to this wallet', w1.removeprunedfunds, txnid1)
assert not [tx for tx in w1.listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
wwatch.removeprunedfunds(txnid2)
diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py
index d2341fb12e..6ed8572347 100755
--- a/test/functional/wallet_keypool.py
+++ b/test/functional/wallet_keypool.py
@@ -103,11 +103,18 @@ class KeyPoolTest(BitcoinTestFramework):
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
- addr = set()
+ # remember keypool sizes
+ wi = nodes[0].getwalletinfo()
+ kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
+ # check that keypool sizes did not change
+ wi = nodes[0].getwalletinfo()
+ kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]
+ assert_equal(kp_size_before, kp_size_after)
# drain the external keys
+ addr = set()
addr.add(nodes[0].getnewaddress(address_type="bech32"))
addr.add(nodes[0].getnewaddress(address_type="bech32"))
addr.add(nodes[0].getnewaddress(address_type="bech32"))
@@ -115,8 +122,15 @@ class KeyPoolTest(BitcoinTestFramework):
addr.add(nodes[0].getnewaddress(address_type="bech32"))
addr.add(nodes[0].getnewaddress(address_type="bech32"))
assert len(addr) == 6
+ # remember keypool sizes
+ wi = nodes[0].getwalletinfo()
+ kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
+ # check that keypool sizes did not change
+ wi = nodes[0].getwalletinfo()
+ kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]
+ assert_equal(kp_size_before, kp_size_after)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py
index 48180e8294..e1bd85d8a9 100755
--- a/test/functional/wallet_keypool_topup.py
+++ b/test/functional/wallet_keypool_topup.py
@@ -25,8 +25,10 @@ class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 4
- self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
+ self.num_nodes = 5
+ self.extra_args = [[]]
+ for _ in range(self.num_nodes - 1):
+ self.extra_args.append(['-keypool=100'])
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -40,12 +42,13 @@ class KeypoolRestoreTest(BitcoinTestFramework):
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
- self.connect_nodes(0, 1)
- self.connect_nodes(0, 2)
- self.connect_nodes(0, 3)
-
- for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
+ for i in [1, 2, 3, 4]:
+ self.connect_nodes(0, i)
+ output_types = ["legacy", "p2sh-segwit", "bech32"]
+ if self.options.descriptors:
+ output_types.append("bech32m")
+ for i, output_type in enumerate(output_types):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
@@ -59,9 +62,10 @@ class KeypoolRestoreTest(BitcoinTestFramework):
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
- else:
+ elif i == 2:
assert not address_details["isscript"] and address_details["iswitness"]
-
+ elif i == 3:
+ assert address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
@@ -87,6 +91,8 @@ class KeypoolRestoreTest(BitcoinTestFramework):
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49h/1h/0h/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84h/1h/0h/0/110")
+ elif output_type == 'bech32m':
+ assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/86h/1h/0h/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py
index 8ec21484d1..d0f1336a5e 100755
--- a/test/functional/wallet_listreceivedby.py
+++ b/test/functional/wallet_listreceivedby.py
@@ -22,7 +22,7 @@ class ReceivedByTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# whitelist peers to speed up tx relay / mempool sync
- self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
+ self.noban_tx_relay = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
index a19a3ac2cb..fd586d546e 100755
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -26,7 +26,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
self.num_nodes = 4
self.setup_clean_chain = True
# whitelist peers to speed up tx relay / mempool sync
- self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
+ self.noban_tx_relay = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py
index 064ce12108..c820eaa6f6 100755
--- a/test/functional/wallet_listtransactions.py
+++ b/test/functional/wallet_listtransactions.py
@@ -26,9 +26,9 @@ class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
- # This test isn't testing txn relay/timing, so set whitelist on the
- # peers for instant txn relay. This speeds up the test run time 2-3x.
- self.extra_args = [["-whitelist=noban@127.0.0.1", "-walletrbf=0"]] * self.num_nodes
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
+ self.extra_args = [["-walletrbf=0"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py
index f9919716be..890b6a5c1b 100755
--- a/test/functional/wallet_migration.py
+++ b/test/functional/wallet_migration.py
@@ -529,11 +529,20 @@ class WalletMigrationTest(BitcoinTestFramework):
self.log.info("Test migration of the wallet named as the empty string")
wallet = self.create_legacy_wallet("")
- self.migrate_wallet(wallet)
+ # Set time to verify backup existence later
+ curr_time = int(time.time())
+ wallet.setmocktime(curr_time)
+
+ res = self.migrate_wallet(wallet)
info = wallet.getwalletinfo()
assert_equal(info["descriptors"], True)
assert_equal(info["format"], "sqlite")
+ # Check backup existence and its non-empty wallet filename
+ backup_path = self.nodes[0].wallets_path / f'default_wallet_{curr_time}.legacy.bak'
+ assert backup_path.exists()
+ assert_equal(str(backup_path), res['backup_path'])
+
def test_direct_file(self):
self.log.info("Test migration of a wallet that is not in a wallet directory")
wallet = self.create_legacy_wallet("plainfile")
diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py
index 86a2905c72..4271f3e481 100755
--- a/test/functional/wallet_reorgsrestore.py
+++ b/test/functional/wallet_reorgsrestore.py
@@ -45,6 +45,7 @@ class ReorgsRestoreTest(BitcoinTestFramework):
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.generate(self.nodes[0], 4, sync_fun=self.no_op)
+ self.sync_blocks([self.nodes[0], self.nodes[2]])
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index 6ce2a56bfc..0a0a8dba0d 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -9,10 +9,6 @@ from itertools import product
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create
-from test_framework.messages import (
- ser_compact_size,
- WITNESS_SCALE_FACTOR,
-)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -21,7 +17,10 @@ from test_framework.util import (
assert_raises_rpc_error,
count_bytes,
)
-from test_framework.wallet_util import generate_keypair
+from test_framework.wallet_util import (
+ calculate_input_weight,
+ generate_keypair,
+)
class WalletSendTest(BitcoinTestFramework):
@@ -30,10 +29,11 @@ class WalletSendTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
- # whitelist all peers to speed up tx relay / mempool sync
+ # whitelist peers to speed up tx relay / mempool sync
+ self.noban_tx_relay = True
self.extra_args = [
- ["-whitelist=127.0.0.1","-walletrbf=1"],
- ["-whitelist=127.0.0.1","-walletrbf=1"],
+ ["-walletrbf=1"],
+ ["-walletrbf=1"]
]
getcontext().prec = 8 # Satoshi precision for Decimal
@@ -542,12 +542,9 @@ class WalletSendTest(BitcoinTestFramework):
input_idx = i
break
psbt_in = dec["inputs"][input_idx]
- # Calculate the input weight
- # (prevout + sequence + length of scriptSig + scriptsig + 1 byte buffer) * WITNESS_SCALE_FACTOR + num scriptWitness stack items + (length of stack item + stack item) * N stack items + 1 byte buffer
- len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
- len_scriptsig += len(ser_compact_size(len_scriptsig)) + 1
- len_scriptwitness = (sum([(len(x) // 2) + len(ser_compact_size(len(x) // 2)) for x in psbt_in["final_scriptwitness"]]) + len(psbt_in["final_scriptwitness"]) + 1) if "final_scriptwitness" in psbt_in else 0
- input_weight = ((40 + len_scriptsig) * WITNESS_SCALE_FACTOR) + len_scriptwitness
+ scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else ""
+ witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None
+ input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex)
# Input weight error conditions
assert_raises_rpc_error(
@@ -558,6 +555,7 @@ class WalletSendTest(BitcoinTestFramework):
options={"inputs": [ext_utxo], "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
)
+ target_fee_rate_sat_vb = 10
# Funding should also work when input weights are provided
res = self.test_send(
from_wallet=ext_wallet,
@@ -567,14 +565,17 @@ class WalletSendTest(BitcoinTestFramework):
add_inputs=True,
psbt=True,
include_watching=True,
- fee_rate=10
+ fee_rate=target_fee_rate_sat_vb
)
signed = ext_wallet.walletprocesspsbt(res["psbt"])
signed = ext_fund.walletprocesspsbt(res["psbt"])
assert signed["complete"]
testres = self.nodes[0].testmempoolaccept([signed["hex"]])[0]
assert_equal(testres["allowed"], True)
- assert_fee_amount(testres["fees"]["base"], testres["vsize"], Decimal(0.0001))
+ actual_fee_rate_sat_vb = Decimal(testres["fees"]["base"]) * Decimal(1e8) / Decimal(testres["vsize"])
+ # Due to ECDSA signatures not always being the same length, the actual fee rate may be slightly different
+ # but rounded to nearest integer, it should be the same as the target fee rate
+ assert_equal(round(actual_fee_rate_sat_vb), target_fee_rate_sat_vb)
if __name__ == '__main__':
WalletSendTest().main()
diff --git a/test/functional/wallet_signer.py b/test/functional/wallet_signer.py
index 32a1887153..abfc3c1ba1 100755
--- a/test/functional/wallet_signer.py
+++ b/test/functional/wallet_signer.py
@@ -130,8 +130,9 @@ class WalletSignerTest(BitcoinTestFramework):
assert_equal(address_info['hdkeypath'], "m/86h/1h/0h/0/0")
self.log.info('Test walletdisplayaddress')
- result = hww.walletdisplayaddress(address1)
- assert_equal(result, {"address": address1})
+ for address in [address1, address2, address3]:
+ result = hww.walletdisplayaddress(address)
+ assert_equal(result, {"address": address})
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
@@ -140,6 +141,13 @@ class WalletSignerTest(BitcoinTestFramework):
)
self.clear_mock_result(self.nodes[1])
+ # Returned address MUST match:
+ address_fail = hww.getnewaddress(address_type="bech32")
+ assert_equal(address_fail, "bcrt1ql7zg7ukh3dwr25ex2zn9jse926f27xy2jz58tm")
+ assert_raises_rpc_error(-1, 'Signer echoed unexpected address wrong_address',
+ hww.walletdisplayaddress, address_fail
+ )
+
self.log.info('Prepare mock PSBT')
self.nodes[0].sendtoaddress(address4, 1)
self.generate(self.nodes[0], 1)
diff --git a/test/functional/wallet_signrawtransactionwithwallet.py b/test/functional/wallet_signrawtransactionwithwallet.py
index b0517f951d..612a2542e7 100755
--- a/test/functional/wallet_signrawtransactionwithwallet.py
+++ b/test/functional/wallet_signrawtransactionwithwallet.py
@@ -55,7 +55,7 @@ class SignRawTransactionWithWalletTest(BitcoinTestFramework):
def test_with_invalid_sighashtype(self):
self.log.info("Test signrawtransactionwithwallet raises if an invalid sighashtype is passed")
- assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithwallet, hexstring=RAW_TX, sighashtype="all")
+ assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].signrawtransactionwithwallet, hexstring=RAW_TX, sighashtype="all")
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index 4e24c07699..a635175e7c 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -11,13 +11,15 @@ import argparse
import configparser
import logging
import os
+import platform
+import random
import subprocess
import sys
def get_fuzz_env(*, target, source_dir):
symbolizer = os.environ.get('LLVM_SYMBOLIZER_PATH', "/usr/bin/llvm-symbolizer")
- return {
+ fuzz_env = {
'FUZZ': target,
'UBSAN_OPTIONS':
f'suppressions={source_dir}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1',
@@ -26,6 +28,10 @@ def get_fuzz_env(*, target, source_dir):
'ASAN_SYMBOLIZER_PATH':symbolizer,
'MSAN_SYMBOLIZER_PATH':symbolizer,
}
+ if platform.system() == "Windows":
+ # On Windows, `env` option must include valid `SystemRoot`.
+ fuzz_env = {**fuzz_env, 'SystemRoot': os.environ.get('SystemRoot')}
+ return fuzz_env
def main():
@@ -103,8 +109,13 @@ def main():
logging.error("Must have fuzz executable built")
sys.exit(1)
+ fuzz_bin=os.getenv("BITCOINFUZZ", default=os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'))
+
# Build list of tests
- test_list_all = parse_test_list(fuzz_bin=os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'))
+ test_list_all = parse_test_list(
+ fuzz_bin=fuzz_bin,
+ source_dir=config['environment']['SRCDIR'],
+ )
if not test_list_all:
logging.error("No fuzz targets found")
@@ -147,7 +158,7 @@ def main():
try:
help_output = subprocess.run(
args=[
- os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'),
+ fuzz_bin,
'-help=1',
],
env=get_fuzz_env(target=test_list_selection[0], source_dir=config['environment']['SRCDIR']),
@@ -169,7 +180,7 @@ def main():
return generate_corpus(
fuzz_pool=fuzz_pool,
src_dir=config['environment']['SRCDIR'],
- build_dir=config["environment"]["BUILDDIR"],
+ fuzz_bin=fuzz_bin,
corpus_dir=args.corpus_dir,
targets=test_list_selection,
)
@@ -180,7 +191,7 @@ def main():
corpus=args.corpus_dir,
test_list=test_list_selection,
src_dir=config['environment']['SRCDIR'],
- build_dir=config["environment"]["BUILDDIR"],
+ fuzz_bin=fuzz_bin,
merge_dirs=[Path(m_dir) for m_dir in args.m_dir],
)
return
@@ -190,7 +201,7 @@ def main():
corpus=args.corpus_dir,
test_list=test_list_selection,
src_dir=config['environment']['SRCDIR'],
- build_dir=config["environment"]["BUILDDIR"],
+ fuzz_bin=fuzz_bin,
using_libfuzzer=using_libfuzzer,
use_valgrind=args.valgrind,
empty_min_time=args.empty_min_time,
@@ -233,7 +244,7 @@ def transform_rpc_target(targets, src_dir):
return targets
-def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets):
+def generate_corpus(*, fuzz_pool, src_dir, fuzz_bin, corpus_dir, targets):
"""Generates new corpus.
Run {targets} without input, and outputs the generated corpus to
@@ -264,9 +275,13 @@ def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets):
for target, t_env in targets:
target_corpus_dir = corpus_dir / target
os.makedirs(target_corpus_dir, exist_ok=True)
+ use_value_profile = int(random.random() < .3)
command = [
- os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
- "-runs=100000",
+ fuzz_bin,
+ "-rss_limit_mb=8000",
+ "-max_total_time=6000",
+ "-reload=0",
+ f"-use_value_profile={use_value_profile}",
target_corpus_dir,
]
futures.append(fuzz_pool.submit(job, command, target, t_env))
@@ -275,12 +290,12 @@ def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets):
future.result()
-def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dirs):
+def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, fuzz_bin, merge_dirs):
logging.info(f"Merge the inputs from the passed dir into the corpus_dir. Passed dirs {merge_dirs}")
jobs = []
for t in test_list:
args = [
- os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
+ fuzz_bin,
'-rss_limit_mb=8000',
'-set_cover_merge=1',
# set_cover_merge is used instead of -merge=1 to reduce the overall
@@ -317,13 +332,13 @@ def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dirs
future.result()
-def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, using_libfuzzer, use_valgrind, empty_min_time):
+def run_once(*, fuzz_pool, corpus, test_list, src_dir, fuzz_bin, using_libfuzzer, use_valgrind, empty_min_time):
jobs = []
for t in test_list:
corpus_path = corpus / t
os.makedirs(corpus_path, exist_ok=True)
args = [
- os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
+ fuzz_bin,
]
empty_dir = not any(corpus_path.iterdir())
if using_libfuzzer:
@@ -378,11 +393,12 @@ def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, using_libfuzze
print(f"{t}{s}")
-def parse_test_list(*, fuzz_bin):
+def parse_test_list(*, fuzz_bin, source_dir):
test_list_all = subprocess.run(
fuzz_bin,
env={
- 'PRINT_ALL_FUZZ_TARGETS_AND_ABORT': ''
+ 'PRINT_ALL_FUZZ_TARGETS_AND_ABORT': '',
+ **get_fuzz_env(target="", source_dir=source_dir)
},
stdout=subprocess.PIPE,
text=True,
diff --git a/test/lint/README.md b/test/lint/README.md
index 1fba41d9ea..13c2099808 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -16,25 +16,29 @@ result is cached and it prevents issues when the image changes.
test runner
===========
-To run all the lint checks in the test runner outside the docker, use:
+To run all the lint checks in the test runner outside the docker you first need
+to install the rust toolchain using your package manager of choice or
+[rustup](https://www.rust-lang.org/tools/install).
+
+Then you can use:
```sh
-( cd ./test/lint/test_runner/ && cargo fmt && cargo clippy && cargo run )
+( cd ./test/lint/test_runner/ && cargo fmt && cargo clippy && RUST_BACKTRACE=1 cargo run )
```
#### Dependencies
| Lint test | Dependency |
|-----------|:----------:|
-| [`lint-python.py`](lint/lint-python.py) | [flake8](https://gitlab.com/pycqa/flake8)
-| [`lint-python.py`](lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
-| [`lint-python.py`](lint/lint-python.py) | [mypy](https://github.com/python/mypy)
-| [`lint-python.py`](lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
-| [`lint-python-dead-code.py`](lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
-| [`lint-shell.py`](lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck)
-| [`lint-spelling.py`](lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell)
+| [`lint-python.py`](/test/lint/lint-python.py) | [flake8](https://github.com/PyCQA/flake8)
+| [`lint-python.py`](/test/lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
+| [`lint-python.py`](/test/lint/lint-python.py) | [mypy](https://github.com/python/mypy)
+| [`lint-python.py`](/test/lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
+| [`lint-python-dead-code.py`](/test/lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
+| [`lint-shell.py`](/test/lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck)
+| [`lint-spelling.py`](/test/lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell)
-In use versions and install instructions are available in the [CI setup](../ci/lint/04_install.sh).
+In use versions and install instructions are available in the [CI setup](../../ci/lint/04_install.sh).
Please be aware that on Linux distributions all dependencies are usually available as packages, but could be outdated.
@@ -83,3 +87,7 @@ To do so, add the upstream repository as remote:
```
git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git
```
+
+lint_ignore_dirs.py
+===================
+Add list of common directories to ignore when running tests
diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh
index 55c9528dea..fe845ed19e 100755
--- a/test/lint/commit-script-check.sh
+++ b/test/lint/commit-script-check.sh
@@ -22,6 +22,11 @@ if ! sed --help 2>&1 | grep -q 'GNU'; then
exit 1;
fi
+if ! grep --help 2>&1 | grep -q 'GNU'; then
+ echo "Error: the installed grep package is not compatible. Please make sure you have GNU grep installed in your system.";
+ exit 1;
+fi
+
RET=0
PREV_BRANCH=$(git name-rev --name-only HEAD)
PREV_HEAD=$(git rev-parse HEAD)
diff --git a/test/lint/lint-git-commit-check.py b/test/lint/lint-git-commit-check.py
index 5897a17e70..5dc30cc755 100755
--- a/test/lint/lint-git-commit-check.py
+++ b/test/lint/lint-git-commit-check.py
@@ -23,31 +23,18 @@ def parse_args():
""",
epilog=f"""
You can manually set the commit-range with the COMMIT_RANGE
- environment variable (e.g. "COMMIT_RANGE='47ba2c3...ee50c9e'
- {sys.argv[0]}"). Defaults to current merge base when neither
- prev-commits nor the environment variable is set.
+ environment variable (e.g. "COMMIT_RANGE='HEAD~n..HEAD'
+ {sys.argv[0]}") for the last 'n' commits.
""")
-
- parser.add_argument("--prev-commits", "-p", required=False, help="The previous n commits to check")
-
return parser.parse_args()
def main():
- args = parse_args()
+ parse_args()
exit_code = 0
- if not os.getenv("COMMIT_RANGE"):
- if args.prev_commits:
- commit_range = "HEAD~" + args.prev_commits + "...HEAD"
- else:
- # This assumes that the target branch of the pull request will be master.
- merge_base = check_output(["git", "merge-base", "HEAD", "master"], text=True, encoding="utf8").rstrip("\n")
- commit_range = merge_base + "..HEAD"
- else:
- commit_range = os.getenv("COMMIT_RANGE")
- if commit_range == "SKIP_EMPTY_NOT_A_PR":
- sys.exit(0)
+ assert os.getenv("COMMIT_RANGE") # E.g. COMMIT_RANGE='HEAD~n..HEAD'
+ commit_range = os.getenv("COMMIT_RANGE")
commit_hashes = check_output(["git", "log", commit_range, "--format=%H"], text=True, encoding="utf8").splitlines()
diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py
index 291e528c1d..77af05c1c2 100755
--- a/test/lint/lint-include-guards.py
+++ b/test/lint/lint-include-guards.py
@@ -12,19 +12,17 @@ import re
import sys
from subprocess import check_output
+from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES
+
HEADER_ID_PREFIX = 'BITCOIN_'
HEADER_ID_SUFFIX = '_H'
EXCLUDE_FILES_WITH_PREFIX = ['contrib/devtools/bitcoin-tidy',
'src/crypto/ctaes',
- 'src/leveldb',
- 'src/crc32c',
- 'src/secp256k1',
- 'src/minisketch',
'src/tinyformat.h',
'src/bench/nanobench.h',
- 'src/test/fuzz/FuzzedDataProvider.h']
+ 'src/test/fuzz/FuzzedDataProvider.h'] + SHARED_EXCLUDED_SUBTREES
def _get_header_file_lst() -> list[str]:
diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py
index 6386a92c0f..90884299d5 100755
--- a/test/lint/lint-includes.py
+++ b/test/lint/lint-includes.py
@@ -14,13 +14,11 @@ import sys
from subprocess import check_output, CalledProcessError
+from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES
+
EXCLUDED_DIRS = ["contrib/devtools/bitcoin-tidy/",
- "src/leveldb/",
- "src/crc32c/",
- "src/secp256k1/",
- "src/minisketch/",
- ]
+ ] + SHARED_EXCLUDED_SUBTREES
EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp",
"boost/multi_index/detail/hash_index_iterator.hpp",
@@ -32,7 +30,6 @@ EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp",
"boost/multi_index/tag.hpp",
"boost/multi_index_container.hpp",
"boost/operators.hpp",
- "boost/process.hpp",
"boost/signals2/connection.hpp",
"boost/signals2/optional_last_value.hpp",
"boost/signals2/signal.hpp",
diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py
index ac0bddeaa6..3e578b218f 100755
--- a/test/lint/lint-spelling.py
+++ b/test/lint/lint-spelling.py
@@ -11,8 +11,11 @@ Note: Will exit successfully regardless of spelling errors.
from subprocess import check_output, STDOUT, CalledProcessError
+from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES
+
IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt'
-FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/leveldb/", ":(exclude)src/crc32c/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)contrib/guix/patches"]
+FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)contrib/guix/patches"]
+FILES_ARGS += [f":(exclude){dir}" for dir in SHARED_EXCLUDED_SUBTREES]
def check_codespell_install():
diff --git a/test/lint/lint-whitespace.py b/test/lint/lint-whitespace.py
deleted file mode 100755
index f5e4a776d0..0000000000
--- a/test/lint/lint-whitespace.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (c) 2017-2022 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-# Check for new lines in diff that introduce trailing whitespace or
-# tab characters instead of spaces.
-
-# We can't run this check unless we know the commit range for the PR.
-
-import argparse
-import os
-import re
-import sys
-
-from subprocess import check_output
-
-EXCLUDED_DIRS = ["depends/patches/",
- "contrib/guix/patches/",
- "src/leveldb/",
- "src/crc32c/",
- "src/secp256k1/",
- "src/minisketch/",
- "doc/release-notes/",
- "src/qt/locale"]
-
-def parse_args():
- """Parse command line arguments."""
- parser = argparse.ArgumentParser(
- description="""
- Check for new lines in diff that introduce trailing whitespace
- or tab characters instead of spaces in unstaged changes, the
- previous n commits, or a commit-range.
- """,
- epilog=f"""
- You can manually set the commit-range with the COMMIT_RANGE
- environment variable (e.g. "COMMIT_RANGE='47ba2c3...ee50c9e'
- {sys.argv[0]}"). Defaults to current merge base when neither
- prev-commits nor the environment variable is set.
- """)
-
- parser.add_argument("--prev-commits", "-p", required=False, help="The previous n commits to check")
-
- return parser.parse_args()
-
-
-def report_diff(selection):
- filename = ""
- seen = False
- seenln = False
-
- print("The following changes were suspected:")
-
- for line in selection:
- if re.match(r"^diff", line):
- filename = line
- seen = False
- elif re.match(r"^@@", line):
- linenumber = line
- seenln = False
- else:
- if not seen:
- # The first time a file is seen with trailing whitespace or a tab character, we print the
- # filename (preceded by a newline).
- print("")
- print(filename)
- seen = True
- if not seenln:
- print(linenumber)
- seenln = True
- print(line)
-
-
-def get_diff(commit_range, check_only_code):
- exclude_args = [":(exclude)" + dir for dir in EXCLUDED_DIRS]
-
- if check_only_code:
- what_files = ["*.cpp", "*.h", "*.md", "*.py", "*.sh"]
- else:
- what_files = ["."]
-
- diff = check_output(["git", "diff", "-U0", commit_range, "--"] + what_files + exclude_args, text=True, encoding="utf8")
-
- return diff
-
-
-def main():
- args = parse_args()
-
- if not os.getenv("COMMIT_RANGE"):
- if args.prev_commits:
- commit_range = "HEAD~" + args.prev_commits + "...HEAD"
- else:
- # This assumes that the target branch of the pull request will be master.
- merge_base = check_output(["git", "merge-base", "HEAD", "master"], text=True, encoding="utf8").rstrip("\n")
- commit_range = merge_base + "..HEAD"
- else:
- commit_range = os.getenv("COMMIT_RANGE")
- if commit_range == "SKIP_EMPTY_NOT_A_PR":
- sys.exit(0)
-
- whitespace_selection = []
- tab_selection = []
-
- # Check if trailing whitespace was found in the diff.
- for line in get_diff(commit_range, check_only_code=False).splitlines():
- if re.match(r"^(diff --git|\@@|^\+.*\s+$)", line):
- whitespace_selection.append(line)
-
- whitespace_additions = [i for i in whitespace_selection if i.startswith("+")]
-
- # Check if tab characters were found in the diff.
- for line in get_diff(commit_range, check_only_code=True).splitlines():
- if re.match(r"^(diff --git|\@@|^\+.*\t)", line):
- tab_selection.append(line)
-
- tab_additions = [i for i in tab_selection if i.startswith("+")]
-
- ret = 0
-
- if len(whitespace_additions) > 0:
- print("This diff appears to have added new lines with trailing whitespace.")
- report_diff(whitespace_selection)
- ret = 1
-
- if len(tab_additions) > 0:
- print("This diff appears to have added new lines with tab characters instead of spaces.")
- report_diff(tab_selection)
- ret = 1
-
- sys.exit(ret)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/lint/lint_ignore_dirs.py b/test/lint/lint_ignore_dirs.py
new file mode 100644
index 0000000000..af9ee7ef6b
--- /dev/null
+++ b/test/lint/lint_ignore_dirs.py
@@ -0,0 +1,5 @@
+SHARED_EXCLUDED_SUBTREES = ["src/leveldb/",
+ "src/crc32c/",
+ "src/secp256k1/",
+ "src/minisketch/",
+ ]
diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs
index 1dc79e97bd..d5dd98effe 100644
--- a/test/lint/test_runner/src/main.rs
+++ b/test/lint/test_runner/src/main.rs
@@ -4,7 +4,7 @@
use std::env;
use std::fs;
-use std::path::PathBuf;
+use std::path::{Path, PathBuf};
use std::process::Command;
use std::process::ExitCode;
@@ -14,7 +14,9 @@ type LintFn = fn() -> LintResult;
/// Return the git command
fn git() -> Command {
- Command::new("git")
+ let mut git = Command::new("git");
+ git.arg("--no-pager");
+ git
}
/// Return stdout
@@ -34,17 +36,30 @@ fn get_git_root() -> PathBuf {
PathBuf::from(check_output(git().args(["rev-parse", "--show-toplevel"])).unwrap())
}
+/// Return all subtree paths
+fn get_subtrees() -> Vec<&'static str> {
+ vec![
+ "src/crc32c",
+ "src/crypto/ctaes",
+ "src/leveldb",
+ "src/minisketch",
+ "src/secp256k1",
+ ]
+}
+
+/// Return the pathspecs to exclude all subtrees
+fn get_pathspecs_exclude_subtrees() -> Vec<String> {
+ get_subtrees()
+ .iter()
+ .map(|s| format!(":(exclude){}", s))
+ .collect()
+}
+
fn lint_subtree() -> LintResult {
// This only checks that the trees are pure subtrees, it is not doing a full
// check with -r to not have to fetch all the remotes.
let mut good = true;
- for subtree in [
- "src/crypto/ctaes",
- "src/secp256k1",
- "src/minisketch",
- "src/leveldb",
- "src/crc32c",
- ] {
+ for subtree in get_subtrees() {
good &= Command::new("test/lint/git-subtree-check.sh")
.arg(subtree)
.status()
@@ -82,6 +97,189 @@ fs:: namespace, which has unsafe filesystem functions marked as deleted.
}
}
+/// Return the pathspecs for whitespace related excludes
+fn get_pathspecs_exclude_whitespace() -> Vec<String> {
+ let mut list = get_pathspecs_exclude_subtrees();
+ list.extend(
+ [
+ // Permanent excludes
+ "*.patch",
+ "src/qt/locale",
+ "contrib/windeploy/win-codesign.cert",
+ "doc/README_windows.txt",
+ // Temporary excludes, or existing violations
+ "doc/release-notes/release-notes-0.*",
+ "contrib/init/bitcoind.openrc",
+ "contrib/macdeploy/macdeployqtplus",
+ "src/crypto/sha256_sse4.cpp",
+ "src/qt/res/src/*.svg",
+ "test/functional/test_framework/crypto/ellswift_decode_test_vectors.csv",
+ "test/functional/test_framework/crypto/xswiftec_inv_test_vectors.csv",
+ "contrib/qos/tc.sh",
+ "contrib/verify-commits/gpg.sh",
+ "src/univalue/include/univalue_escapes.h",
+ "src/univalue/test/object.cpp",
+ "test/lint/git-subtree-check.sh",
+ ]
+ .iter()
+ .map(|s| format!(":(exclude){}", s)),
+ );
+ list
+}
+
+fn lint_trailing_whitespace() -> LintResult {
+ let trailing_space = git()
+ .args(["grep", "-I", "--line-number", "\\s$", "--"])
+ .args(get_pathspecs_exclude_whitespace())
+ .status()
+ .expect("command error")
+ .success();
+ if trailing_space {
+ Err(r#"
+^^^
+Trailing whitespace (including Windows line endings [CR LF]) is problematic, because git may warn
+about it, or editors may remove it by default, forcing developers in the future to either undo the
+changes manually or spend time on review.
+
+Thus, it is best to remove the trailing space now.
+
+Please add any false positives, such as subtrees, Windows-related files, patch files, or externally
+sourced files to the exclude list.
+ "#
+ .to_string())
+ } else {
+ Ok(())
+ }
+}
+
+fn lint_tabs_whitespace() -> LintResult {
+ let tabs = git()
+ .args(["grep", "-I", "--line-number", "--perl-regexp", "^\\t", "--"])
+ .args(["*.cpp", "*.h", "*.md", "*.py", "*.sh"])
+ .args(get_pathspecs_exclude_whitespace())
+ .status()
+ .expect("command error")
+ .success();
+ if tabs {
+ Err(r#"
+^^^
+Use of tabs in this codebase is problematic, because existing code uses spaces and tabs will cause
+display issues and conflict with editor settings.
+
+Please remove the tabs.
+
+Please add any false positives, such as subtrees, or externally sourced files to the exclude list.
+ "#
+ .to_string())
+ } else {
+ Ok(())
+ }
+}
+
+fn lint_includes_build_config() -> LintResult {
+ let config_path = "./src/config/bitcoin-config.h.in";
+ if !Path::new(config_path).is_file() {
+ assert!(Command::new("./autogen.sh")
+ .status()
+ .expect("command error")
+ .success());
+ }
+ let defines_regex = format!(
+ r"^\s*(?!//).*({})",
+ check_output(Command::new("grep").args(["undef ", "--", config_path]))
+ .expect("grep failed")
+ .lines()
+ .map(|line| {
+ line.split("undef ")
+ .nth(1)
+ .unwrap_or_else(|| panic!("Could not extract name in line: {line}"))
+ })
+ .collect::<Vec<_>>()
+ .join("|")
+ );
+ let print_affected_files = |mode: bool| {
+ // * mode==true: Print files which use the define, but lack the include
+ // * mode==false: Print files which lack the define, but use the include
+ let defines_files = check_output(
+ git()
+ .args([
+ "grep",
+ "--perl-regexp",
+ if mode {
+ "--files-with-matches"
+ } else {
+ "--files-without-match"
+ },
+ &defines_regex,
+ "--",
+ "*.cpp",
+ "*.h",
+ ])
+ .args(get_pathspecs_exclude_subtrees())
+ .args([
+ // These are exceptions which don't use bitcoin-config.h, rather the Makefile.am adds
+ // these cppflags manually.
+ ":(exclude)src/crypto/sha256_arm_shani.cpp",
+ ":(exclude)src/crypto/sha256_avx2.cpp",
+ ":(exclude)src/crypto/sha256_sse41.cpp",
+ ":(exclude)src/crypto/sha256_x86_shani.cpp",
+ ]),
+ )
+ .expect("grep failed");
+ git()
+ .args([
+ "grep",
+ if mode {
+ "--files-without-match"
+ } else {
+ "--files-with-matches"
+ },
+ if mode {
+ "^#include <config/bitcoin-config.h> // IWYU pragma: keep$"
+ } else {
+ "#include <config/bitcoin-config.h>" // Catch redundant includes with and without the IWYU pragma
+ },
+ "--",
+ ])
+ .args(defines_files.lines())
+ .status()
+ .expect("command error")
+ .success()
+ };
+ let missing = print_affected_files(true);
+ if missing {
+ return Err(format!(
+ r#"
+^^^
+One or more files use a symbol declared in the bitcoin-config.h header. However, they are not
+including the header. This is problematic, because the header may or may not be indirectly
+included. If the indirect include were to be intentionally or accidentally removed, the build could
+still succeed, but silently be buggy. For example, a slower fallback algorithm could be picked,
+even though bitcoin-config.h indicates that a faster feature is available and should be used.
+
+If you are unsure which symbol is used, you can find it with this command:
+git grep --perl-regexp '{}' -- file_name
+
+Make sure to include it with the IWYU pragma. Otherwise, IWYU may falsely instruct to remove the
+include again.
+
+#include <config/bitcoin-config.h> // IWYU pragma: keep
+ "#,
+ defines_regex
+ ));
+ }
+ let redundant = print_affected_files(false);
+ if redundant {
+ return Err(r#"
+^^^
+None of the files use a symbol declared in the bitcoin-config.h header. However, they are including
+the header. Consider removing the unused include.
+ "#
+ .to_string());
+ }
+ Ok(())
+}
+
fn lint_doc() -> LintResult {
if Command::new("test/lint/check-doc.py")
.status()
@@ -123,6 +321,9 @@ fn main() -> ExitCode {
let test_list: Vec<(&str, LintFn)> = vec![
("subtree check", lint_subtree),
("std::filesystem check", lint_std_filesystem),
+ ("trailing whitespace check", lint_trailing_whitespace),
+ ("no-tabs check", lint_tabs_whitespace),
+ ("build config includes check", lint_includes_build_config),
("-help=1 documentation check", lint_doc),
("lint-*.py scripts", lint_all),
];
@@ -134,7 +335,7 @@ fn main() -> ExitCode {
// chdir to root before each lint test
env::set_current_dir(&git_root).unwrap();
if let Err(err) = lint_fn() {
- println!("{err}\n^---- Failure generated from {lint_name}!");
+ println!("{err}\n^---- ⚠️ Failure generated from {lint_name}!");
test_failed = true;
}
}
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index dadbe8c4f6..482667a26a 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -51,16 +51,18 @@ unsigned-integer-overflow:CCoinsViewCache::Uncache
unsigned-integer-overflow:CompressAmount
unsigned-integer-overflow:DecompressAmount
unsigned-integer-overflow:crypto/
+unsigned-integer-overflow:getchaintxstats*
unsigned-integer-overflow:MurmurHash3
unsigned-integer-overflow:CBlockPolicyEstimator::processBlockTx
unsigned-integer-overflow:TxConfirmStats::EstimateMedianVal
unsigned-integer-overflow:prevector.h
-unsigned-integer-overflow:script/interpreter.cpp
+unsigned-integer-overflow:EvalScript
unsigned-integer-overflow:xoroshiro128plusplus.h
implicit-integer-sign-change:CBlockPolicyEstimator::processBlockTx
implicit-integer-sign-change:SetStdinEcho
implicit-integer-sign-change:compressor.h
implicit-integer-sign-change:crypto/
+implicit-integer-sign-change:getchaintxstats*
implicit-integer-sign-change:TxConfirmStats::removeTx
implicit-integer-sign-change:prevector.h
implicit-integer-sign-change:verify_flags