aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rwxr-xr-xtest/functional/feature_assumeutxo.py30
-rwxr-xr-xtest/functional/feature_maxtipage.py4
-rwxr-xr-xtest/functional/feature_taproot.py2
-rwxr-xr-xtest/functional/feature_versionbits_warning.py8
-rwxr-xr-xtest/functional/mempool_accept.py21
-rwxr-xr-xtest/functional/mempool_limit.py10
-rwxr-xr-xtest/functional/p2p_1p1c_network.py166
-rwxr-xr-xtest/functional/p2p_opportunistic_1p1c.py415
-rwxr-xr-xtest/functional/p2p_outbound_eviction.py253
-rwxr-xr-xtest/functional/p2p_segwit.py2
-rwxr-xr-xtest/functional/p2p_tx_download.py6
-rwxr-xr-xtest/functional/rpc_packages.py16
-rwxr-xr-xtest/functional/rpc_rawtransaction.py4
-rw-r--r--test/functional/test-shell.md8
-rw-r--r--test/functional/test_framework/address.py7
-rw-r--r--test/functional/test_framework/blocktools.py2
-rw-r--r--test/functional/test_framework/mempool_util.py81
-rw-r--r--test/functional/test_framework/util.py59
-rw-r--r--test/functional/test_framework/wallet.py13
-rwxr-xr-xtest/functional/test_runner.py29
-rwxr-xr-xtest/functional/wallet_basic.py9
-rwxr-xr-xtest/functional/wallet_fundrawtransaction.py4
-rw-r--r--test/lint/README.md14
-rw-r--r--test/lint/test_runner/src/main.rs18
24 files changed, 1070 insertions, 111 deletions
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py
index 19cbbcffdb..0d6c92c9fa 100755
--- a/test/functional/feature_assumeutxo.py
+++ b/test/functional/feature_assumeutxo.py
@@ -13,8 +13,6 @@ The assumeutxo value generated and used here is committed to in
Interesting test cases could be loading an assumeutxo snapshot file with:
-- TODO: Valid hash but invalid snapshot file (bad coin height or
- bad other serialization)
- TODO: Valid snapshot file, but referencing a snapshot block that turns out to be
invalid, or has an invalid parent
- TODO: Valid snapshot file and snapshot block, but the block is not on the
@@ -98,18 +96,23 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info(" - snapshot file with alternated UTXO data")
cases = [
- [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b"], # wrong outpoint hash
- [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad"], # wrong outpoint index
- [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8"], # wrong coin code VARINT((coinbase ? 1 : 0) | (height << 1))
- [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5"], # another wrong coin code
+ # (content, offset, wrong_hash, custom_message)
+ [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash
+ [(1).to_bytes(4, "little"), 32, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index
+ [b"\x81", 36, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT
+ [b"\x80", 36, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code
+ [b"\x84\x58", 36, None, "[snapshot] bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0
+ [b"\xCA\xD2\x8F\x5A", 41, None, "[snapshot] bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY
]
- for content, offset, wrong_hash in cases:
+ for content, offset, wrong_hash, custom_message in cases:
with open(bad_snapshot_path, "wb") as f:
f.write(valid_snapshot_contents[:(32 + 8 + offset)])
f.write(content)
f.write(valid_snapshot_contents[(32 + 8 + offset + len(content)):])
- expected_error(log_msg=f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}")
+
+ log_msg = custom_message if custom_message is not None else f"[snapshot] bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}"
+ expected_error(log_msg=log_msg)
def test_headers_not_synced(self, valid_snapshot_path):
for node in self.nodes[1:]:
@@ -152,6 +155,12 @@ class AssumeutxoTest(BitcoinTestFramework):
self.restart_node(2, extra_args=self.extra_args[2])
+ def test_invalid_file_path(self):
+ self.log.info("Test bitcoind should fail when file path is invalid.")
+ node = self.nodes[0]
+ path = node.datadir_path / node.chain / "invalid" / "path"
+ assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path)
+
def run_test(self):
"""
Bring up two (disconnected) nodes, mine some new blocks on the first,
@@ -236,6 +245,7 @@ class AssumeutxoTest(BitcoinTestFramework):
self.test_invalid_mempool_state(dump_output['path'])
self.test_invalid_snapshot_scenarios(dump_output['path'])
self.test_invalid_chainstate_scenarios()
+ self.test_invalid_file_path()
self.log.info(f"Loading snapshot into second node from {dump_output['path']}")
loaded = n1.loadtxoutset(dump_output['path'])
@@ -395,6 +405,10 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash'])
assert_equal(snapshot['validated'], False)
+ self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.")
+ with n2.assert_debug_log(expected_msgs=["[snapshot] can't activate a snapshot-based chainstate more than once"]):
+ assert_raises_rpc_error(-32603, "Unable to load UTXO snapshot", n2.loadtxoutset, dump_output['path'])
+
self.connect_nodes(0, 2)
self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)
self.sync_blocks()
diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py
index 51f37ef1e0..a1774a5395 100755
--- a/test/functional/feature_maxtipage.py
+++ b/test/functional/feature_maxtipage.py
@@ -43,6 +43,10 @@ class MaxTipAgeTest(BitcoinTestFramework):
self.generate(node_miner, 1)
assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False)
+ # reset time to system time so we don't have a time offset with the ibd node the next
+ # time we connect to it, ensuring TimeOffsets::WarnIfOutOfSync() doesn't output to stderr
+ node_miner.setmocktime(0)
+
def run_test(self):
self.log.info("Test IBD with maximum tip age of 24 hours (default).")
self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False)
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index e85541d0ec..e7d65b4539 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -10,7 +10,6 @@ from test_framework.blocktools import (
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
- WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
@@ -20,6 +19,7 @@ from test_framework.messages import (
CTxOut,
SEQUENCE_FINAL,
tx_from_hex,
+ WITNESS_SCALE_FACTOR,
)
from test_framework.script import (
ANNEX_TAG,
diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py
index 073d3de812..2c330eb681 100755
--- a/test/functional/feature_versionbits_warning.py
+++ b/test/functional/feature_versionbits_warning.py
@@ -73,8 +73,8 @@ class VersionBitsWarningTest(BitcoinTestFramework):
self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
- assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
- assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
+ assert not VB_PATTERN.match(",".join(node.getmininginfo()["warnings"]))
+ assert not VB_PATTERN.match(",".join(node.getnetworkinfo()["warnings"]))
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION)
@@ -94,8 +94,8 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Generating one more block will be enough to generate an error.
self.generatetoaddress(node, 1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
- assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
- assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
+ assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getmininginfo()["warnings"])
+ assert WARN_UNKNOWN_RULES_ACTIVE in ",".join(node.getnetworkinfo()["warnings"])
# Check that the alert file shows the versionbits unknown rules warning
self.wait_until(lambda: self.versionbits_in_alert_file())
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 272e932fcc..b00be5f4f0 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -28,6 +28,8 @@ from test_framework.script import (
OP_HASH160,
OP_RETURN,
OP_TRUE,
+ SIGHASH_ALL,
+ sign_input_legacy,
)
from test_framework.script_util import (
DUMMY_MIN_OP_RETURN_SCRIPT,
@@ -386,5 +388,24 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
maxfeerate=0,
)
+ self.log.info('Spending a confirmed bare multisig is okay')
+ address = self.wallet.get_address()
+ tx = tx_from_hex(raw_tx_reference)
+ privkey, pubkey = generate_keypair()
+ tx.vout[0].scriptPubKey = keys_to_multisig_script([pubkey] * 3, k=1) # Some bare multisig script (1-of-3)
+ tx.rehash()
+ self.generateblock(node, address, [tx.serialize().hex()])
+ tx_spend = CTransaction()
+ tx_spend.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx_spend.vout.append(CTxOut(tx.vout[0].nValue - int(fee*COIN), script_to_p2wsh_script(CScript([OP_TRUE]))))
+ tx_spend.rehash()
+ sign_input_legacy(tx_spend, 0, tx.vout[0].scriptPubKey, privkey, sighash_type=SIGHASH_ALL)
+ tx_spend.vin[0].scriptSig = bytes(CScript([OP_0])) + tx_spend.vin[0].scriptSig
+ self.check_mempool_result(
+ result_expected=[{'txid': tx_spend.rehash(), 'allowed': True, 'vsize': tx_spend.get_vsize(), 'fees': { 'base': Decimal('0.00000700')}}],
+ rawtxs=[tx_spend.serialize().hex()],
+ maxfeerate=0,
+ )
+
if __name__ == '__main__':
MempoolAcceptanceTest().main()
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index e8a568f7ab..d46924f4ce 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -6,6 +6,9 @@
from decimal import Decimal
+from test_framework.mempool_util import (
+ fill_mempool,
+)
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -13,7 +16,6 @@ from test_framework.util import (
assert_fee_amount,
assert_greater_than,
assert_raises_rpc_error,
- fill_mempool,
)
from test_framework.wallet import (
COIN,
@@ -93,7 +95,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- fill_mempool(self, node, self.wallet)
+ fill_mempool(self, node)
current_info = node.getmempoolinfo()
mempoolmin_feerate = current_info["mempoolminfee"]
@@ -183,7 +185,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- fill_mempool(self, node, self.wallet)
+ fill_mempool(self, node)
current_info = node.getmempoolinfo()
mempoolmin_feerate = current_info["mempoolminfee"]
@@ -257,7 +259,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
- fill_mempool(self, node, self.wallet)
+ fill_mempool(self, node)
# Deliberately try to create a tx with a fee less than the minimum mempool fee to assert that it does not get added to the mempool
self.log.info('Create a mempool tx that will not pass mempoolminfee')
diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py
new file mode 100755
index 0000000000..ea59248506
--- /dev/null
+++ b/test/functional/p2p_1p1c_network.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024-present The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test that 1p1c package submission allows a 1p1c package to propagate in a "network" of nodes. Send
+various packages from different nodes on a network in which some nodes have already received some of
+the transactions (and submitted them to mempool, kept them as orphans or rejected them as
+too-low-feerate transactions). The packages should be received and accepted by all nodes.
+"""
+
+from decimal import Decimal
+from math import ceil
+
+from test_framework.mempool_util import (
+ fill_mempool,
+)
+from test_framework.messages import (
+ msg_tx,
+)
+from test_framework.p2p import (
+ P2PInterface,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_greater_than,
+)
+from test_framework.wallet import (
+ MiniWallet,
+ MiniWalletMode,
+)
+
+# 1sat/vB feerate denominated in BTC/KvB
+FEERATE_1SAT_VB = Decimal("0.00001000")
+
+class PackageRelayTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+ # hugely speeds up the test, as it involves multiple hops of tx relay.
+ self.noban_tx_relay = True
+ self.extra_args = [[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ ]] * self.num_nodes
+ self.supports_cli = False
+
+ def raise_network_minfee(self):
+ fill_mempool(self, self.nodes[0])
+
+ self.log.debug("Wait for the network to sync mempools")
+ self.sync_mempools()
+
+ self.log.debug("Check that all nodes' mempool minimum feerates are above min relay feerate")
+ for node in self.nodes:
+ assert_equal(node.getmempoolinfo()['minrelaytxfee'], FEERATE_1SAT_VB)
+ assert_greater_than(node.getmempoolinfo()['mempoolminfee'], FEERATE_1SAT_VB)
+
+ def create_basic_1p1c(self, wallet):
+ low_fee_parent = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, confirmed_only=True)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+ package_hex_basic = [low_fee_parent["hex"], high_fee_child["hex"]]
+ return package_hex_basic, low_fee_parent["tx"], high_fee_child["tx"]
+
+ def create_package_2outs(self, wallet):
+ # First create a tester tx to see the vsize, and then adjust the fees
+ utxo_for_2outs = wallet.get_utxo(confirmed_only=True)
+
+ low_fee_parent_2outs_tester = wallet.create_self_transfer_multi(
+ utxos_to_spend=[utxo_for_2outs],
+ num_outputs=2,
+ )
+
+ # Target 1sat/vB so the number of satoshis is equal to the vsize.
+ # Round up. The goal is to be between min relay feerate and mempool min feerate.
+ fee_2outs = ceil(low_fee_parent_2outs_tester["tx"].get_vsize() / 2)
+
+ low_fee_parent_2outs = wallet.create_self_transfer_multi(
+ utxos_to_spend=[utxo_for_2outs],
+ num_outputs=2,
+ fee_per_output=fee_2outs,
+ )
+
+ # Now create the child
+ high_fee_child_2outs = wallet.create_self_transfer_multi(
+ utxos_to_spend=low_fee_parent_2outs["new_utxos"][::-1],
+ fee_per_output=fee_2outs*100,
+ )
+ return [low_fee_parent_2outs["hex"], high_fee_child_2outs["hex"]], low_fee_parent_2outs["tx"], high_fee_child_2outs["tx"]
+
+ def create_package_2p1c(self, wallet):
+ parent1 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True)
+ parent2 = wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*20, confirmed_only=True)
+ child = wallet.create_self_transfer_multi(
+ utxos_to_spend=[parent1["new_utxo"], parent2["new_utxo"]],
+ fee_per_output=999*parent1["tx"].get_vsize(),
+ )
+ return [parent1["hex"], parent2["hex"], child["hex"]], parent1["tx"], parent2["tx"], child["tx"]
+
+ def create_packages(self):
+ # 1: Basic 1-parent-1-child package, parent 1sat/vB, child 999sat/vB
+ package_hex_1, parent_1, child_1 = self.create_basic_1p1c(self.wallet)
+
+ # 2: same as 1, parent's txid is the same as its wtxid.
+ package_hex_2, parent_2, child_2 = self.create_basic_1p1c(self.wallet_nonsegwit)
+
+ # 3: 2-parent-1-child package. Both parents are above mempool min feerate. No package submission happens.
+ # We require packages to be child-with-unconfirmed-parents and only allow 1-parent-1-child packages.
+ package_hex_3, parent_31, parent_32, child_3 = self.create_package_2p1c(self.wallet)
+
+ # 4: parent + child package where the child spends 2 different outputs from the parent.
+ package_hex_4, parent_4, child_4 = self.create_package_2outs(self.wallet)
+
+ # Assemble return results
+ packages_to_submit = [package_hex_1, package_hex_2, package_hex_3, package_hex_4]
+ # node0: sender
+ # node1: pre-received the children (orphan)
+ # node3: pre-received the parents (too low fee)
+ # All nodes receive parent_31 ahead of time.
+ txns_to_send = [
+ [],
+ [child_1, child_2, parent_31, child_3, child_4],
+ [parent_31],
+ [parent_1, parent_2, parent_31, parent_4]
+ ]
+
+ return packages_to_submit, txns_to_send
+
+ def run_test(self):
+ self.wallet = MiniWallet(self.nodes[1])
+ self.wallet_nonsegwit = MiniWallet(self.nodes[2], mode=MiniWalletMode.RAW_P2PK)
+ self.generate(self.wallet_nonsegwit, 10)
+ self.generate(self.wallet, 120)
+
+ self.log.info("Fill mempools with large transactions to raise mempool minimum feerates")
+ self.raise_network_minfee()
+
+ # Create the transactions.
+ self.wallet.rescan_utxos(include_mempool=True)
+ packages_to_submit, transactions_to_presend = self.create_packages()
+
+ self.peers = [self.nodes[i].add_p2p_connection(P2PInterface()) for i in range(self.num_nodes)]
+
+ self.log.info("Pre-send some transactions to nodes")
+ for (i, peer) in enumerate(self.peers):
+ for tx in transactions_to_presend[i]:
+ peer.send_and_ping(msg_tx(tx))
+ # This disconnect removes any sent orphans from the orphanage (EraseForPeer) and times
+ # out the in-flight requests. It is currently required for the test to pass right now,
+ # because the node will not reconsider an orphan tx and will not (re)try requesting
+ # orphan parents from multiple peers if the first one didn't respond.
+ # TODO: remove this in the future if the node tries orphan resolution with multiple peers.
+ peer.peer_disconnect()
+
+ self.log.info("Submit full packages to node0")
+ for package_hex in packages_to_submit:
+ submitpackage_result = self.nodes[0].submitpackage(package_hex)
+ assert_equal(submitpackage_result["package_msg"], "success")
+
+ self.log.info("Wait for mempools to sync")
+ self.sync_mempools(timeout=20)
+
+
+if __name__ == '__main__':
+ PackageRelayTest().main()
diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py
new file mode 100755
index 0000000000..aec6e95fbc
--- /dev/null
+++ b/test/functional/p2p_opportunistic_1p1c.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024-present The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test opportunistic 1p1c package submission logic.
+"""
+
+from decimal import Decimal
+import time
+from test_framework.mempool_util import (
+ fill_mempool,
+)
+from test_framework.messages import (
+ CInv,
+ CTxInWitness,
+ MAX_BIP125_RBF_SEQUENCE,
+ MSG_WTX,
+ msg_inv,
+ msg_tx,
+ tx_from_hex,
+)
+from test_framework.p2p import (
+ P2PInterface,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_greater_than,
+)
+from test_framework.wallet import (
+ MiniWallet,
+ MiniWalletMode,
+)
+
+# 1sat/vB feerate denominated in BTC/KvB
+FEERATE_1SAT_VB = Decimal("0.00001000")
+# Number of seconds to wait to ensure no getdata is received
+GETDATA_WAIT = 60
+
+def cleanup(func):
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(self, *args, **kwargs)
+ finally:
+ self.nodes[0].disconnect_p2ps()
+ # Do not clear the node's mempool, as each test requires mempool min feerate > min
+ # relay feerate. However, do check that this is the case.
+ assert self.nodes[0].getmempoolinfo()["mempoolminfee"] > self.nodes[0].getnetworkinfo()["relayfee"]
+ # Ensure we do not try to spend the same UTXOs in subsequent tests, as they will look like RBF attempts.
+ self.wallet.rescan_utxos(include_mempool=True)
+
+ # Resets if mocktime was used
+ self.nodes[0].setmocktime(0)
+ return wrapper
+
+class PackageRelayTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ ]]
+ self.supports_cli = False
+
+ def create_tx_below_mempoolminfee(self, wallet):
+ """Create a 1-input 1sat/vB transaction using a confirmed UTXO. Decrement and use
+ self.sequence so that subsequent calls to this function result in unique transactions."""
+
+ self.sequence -= 1
+ assert_greater_than(self.nodes[0].getmempoolinfo()["mempoolminfee"], FEERATE_1SAT_VB)
+
+ return wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB, sequence=self.sequence, confirmed_only=True)
+
+ @cleanup
+ def test_basic_child_then_parent(self):
+ node = self.nodes[0]
+ self.log.info("Check that opportunistic 1p1c logic works when child is received before parent")
+
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first (perhaps the low feerate parent didn't meet feefilter or the requests were sent to different nodes). It is missing an input.
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. Sender relays the parent. Parent+Child are evaluated as a package and accepted.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 4. Both transactions should now be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ node.disconnect_p2ps()
+
+ @cleanup
+ def test_basic_parent_then_child(self, wallet):
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(wallet)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=20*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+ peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay")
+
+ # 1. Parent is relayed first. It is too low feerate.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ peer_sender.wait_for_getdata([parent_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+
+ # Send again from peer_ignored, check that it is ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+
+ # 2. Child is relayed next. It is missing an input.
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 3. Node requests the missing parent by txid.
+ # It should do so even if it has previously rejected that parent for being too low feerate.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 4. Sender re-relays the parent. Parent+Child are evaluated as a package and accepted.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 5. Both transactions should now be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ @cleanup
+ def test_low_and_high_child(self, wallet):
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(wallet)
+ # This feerate is above mempoolminfee, but not enough to also bump the low feerate parent.
+ feerate_just_above = node.getmempoolinfo()["mempoolminfee"]
+ med_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=feerate_just_above)
+ high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+ peer_ignored = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=2, connection_type="outbound-full-relay")
+
+ self.log.info("Check that tx caches low fee parent + low fee child package rejections")
+
+ # 1. Send parent, rejected for being low feerate.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ peer_sender.wait_for_getdata([parent_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+
+ # Send again from peer_ignored, check that it is ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+
+ # 2. Send an (orphan) child that has a higher feerate, but not enough to bump the parent.
+ med_child_wtxid_int = int(med_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([med_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(med_fee_child["tx"]))
+
+ # 3. Node requests the orphan's missing parent.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 4. The low parent + low child are submitted as a package. They are not accepted due to low package feerate.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+ assert med_fee_child["txid"] not in node.getrawmempool()
+
+ # If peer_ignored announces the low feerate child, it should be ignored
+ peer_ignored.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=med_child_wtxid_int)]))
+ assert "getdata" not in peer_ignored.last_message
+ # If either peer sends the parent again, package evaluation should not be attempted
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+ peer_ignored.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ assert low_fee_parent["txid"] not in node.getrawmempool()
+ assert med_fee_child["txid"] not in node.getrawmempool()
+
+ # 5. Send the high feerate (orphan) child
+ high_child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ peer_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=high_child_wtxid_int)]))
+ peer_sender.wait_for_getdata([high_child_wtxid_int])
+ peer_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 6. Node requests the orphan's parent, even though it has already been rejected, both by
+ # itself and with a child. This is necessary, otherwise high_fee_child can be censored.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ peer_sender.wait_for_getdata([parent_txid_int])
+
+ # 7. The low feerate parent + high feerate child are submitted as a package.
+ peer_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ # 8. Both transactions should now be in mempool
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+ assert med_fee_child["txid"] not in node_mempool
+
+ @cleanup
+ def test_orphan_consensus_failure(self):
+ self.log.info("Check opportunistic 1p1c logic with consensus-invalid orphan causes disconnect of the correct peer")
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ coin = low_fee_parent["new_utxo"]
+ address = node.get_deterministic_priv_key().address
+ # Create raw transaction spending the parent, but with no signature (a consensus error).
+ hex_orphan_no_sig = node.createrawtransaction([{"txid": coin["txid"], "vout": coin["vout"]}], {address : coin["value"] - Decimal("0.0001")})
+ tx_orphan_bad_wit = tx_from_hex(hex_orphan_no_sig)
+ tx_orphan_bad_wit.wit.vtxinwit.append(CTxInWitness())
+ tx_orphan_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage']
+
+ bad_orphan_sender = node.add_p2p_connection(P2PInterface())
+ parent_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first. It is missing an input.
+ child_wtxid_int = int(tx_orphan_bad_wit.getwtxid(), 16)
+ bad_orphan_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)]))
+ bad_orphan_sender.wait_for_getdata([child_wtxid_int])
+ bad_orphan_sender.send_and_ping(msg_tx(tx_orphan_bad_wit))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(low_fee_parent["txid"], 16)
+ bad_orphan_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. A different peer relays the parent. Parent+Child are evaluated as a package and rejected.
+ parent_sender.send_message(msg_tx(low_fee_parent["tx"]))
+
+ # 4. Transactions should not be in mempool.
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] not in node_mempool
+ assert tx_orphan_bad_wit.rehash() not in node_mempool
+
+ # 5. Peer that sent a consensus-invalid transaction should be disconnected.
+ bad_orphan_sender.wait_for_disconnect()
+
+ # The peer that didn't provide the orphan should not be disconnected.
+ parent_sender.sync_with_ping()
+
+ @cleanup
+ def test_parent_consensus_failure(self):
+ self.log.info("Check opportunistic 1p1c logic with consensus-invalid parent causes disconnect of the correct peer")
+ node = self.nodes[0]
+ low_fee_parent = self.create_tx_below_mempoolminfee(self.wallet)
+ high_fee_child = self.wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*FEERATE_1SAT_VB)
+
+ # Create invalid version of parent with a bad signature.
+ tx_parent_bad_wit = tx_from_hex(low_fee_parent["hex"])
+ tx_parent_bad_wit.wit.vtxinwit.append(CTxInWitness())
+ tx_parent_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage']
+
+ package_sender = node.add_p2p_connection(P2PInterface())
+ fake_parent_sender = node.add_p2p_connection(P2PInterface())
+
+ # 1. Child is received first. It is missing an input.
+ child_wtxid_int = int(high_fee_child["tx"].getwtxid(), 16)
+ package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=child_wtxid_int)]))
+ package_sender.wait_for_getdata([child_wtxid_int])
+ package_sender.send_and_ping(msg_tx(high_fee_child["tx"]))
+
+ # 2. Node requests the missing parent by txid.
+ parent_txid_int = int(tx_parent_bad_wit.rehash(), 16)
+ package_sender.wait_for_getdata([parent_txid_int])
+
+ # 3. A different node relays the parent. The parent is first evaluated by itself and
+ # rejected for being too low feerate. Then it is evaluated as a package and, after passing
+ # feerate checks, rejected for having a bad signature (consensus error).
+ fake_parent_sender.send_message(msg_tx(tx_parent_bad_wit))
+
+ # 4. Transactions should not be in mempool.
+ node_mempool = node.getrawmempool()
+ assert tx_parent_bad_wit.rehash() not in node_mempool
+ assert high_fee_child["txid"] not in node_mempool
+
+ # 5. Peer sent a consensus-invalid transaction.
+ fake_parent_sender.wait_for_disconnect()
+
+ self.log.info("Check that fake parent does not cause orphan to be deleted and real package can still be submitted")
+ # 6. Child-sending should not have been punished and the orphan should remain in orphanage.
+ # It can send the "real" parent transaction, and the package is accepted.
+ parent_wtxid_int = int(low_fee_parent["tx"].getwtxid(), 16)
+ package_sender.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=parent_wtxid_int)]))
+ package_sender.wait_for_getdata([parent_wtxid_int])
+ package_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
+
+ node_mempool = node.getrawmempool()
+ assert low_fee_parent["txid"] in node_mempool
+ assert high_fee_child["txid"] in node_mempool
+
+ @cleanup
+ def test_multiple_parents(self):
+ self.log.info("Check that node does not request more than 1 previously-rejected low feerate parent")
+
+ node = self.nodes[0]
+ node.setmocktime(int(time.time()))
+
+ # 2-parent-1-child package where both parents are below mempool min feerate
+ parent_low_1 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit)
+ parent_low_2 = self.create_tx_below_mempoolminfee(self.wallet_nonsegwit)
+ child_bumping = self.wallet_nonsegwit.create_self_transfer_multi(
+ utxos_to_spend=[parent_low_1["new_utxo"], parent_low_2["new_utxo"]],
+ fee_per_output=999*parent_low_1["tx"].get_vsize(),
+ )
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+
+ # 1. Send both parents. Each should be rejected for being too low feerate.
+ # Send unsolicited so that we can later check that no "getdata" was ever received.
+ peer_sender.send_and_ping(msg_tx(parent_low_1["tx"]))
+ peer_sender.send_and_ping(msg_tx(parent_low_2["tx"]))
+
+ # parent_low_1 and parent_low_2 are rejected for being low feerate.
+ assert parent_low_1["txid"] not in node.getrawmempool()
+ assert parent_low_2["txid"] not in node.getrawmempool()
+
+ # 2. Send child.
+ peer_sender.send_and_ping(msg_tx(child_bumping["tx"]))
+
+ # 3. Node should not request any parents, as it should recognize that it will not accept
+ # multi-parent-1-child packages.
+ node.bumpmocktime(GETDATA_WAIT)
+ peer_sender.sync_with_ping()
+ assert "getdata" not in peer_sender.last_message
+
+ @cleanup
+ def test_other_parent_in_mempool(self):
+ self.log.info("Check opportunistic 1p1c fails if child already has another parent in mempool")
+ node = self.nodes[0]
+
+ # This parent needs CPFP
+ parent_low = self.create_tx_below_mempoolminfee(self.wallet)
+ # This parent does not need CPFP and can be submitted alone ahead of time
+ parent_high = self.wallet.create_self_transfer(fee_rate=FEERATE_1SAT_VB*10, confirmed_only=True)
+ child = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[parent_high["new_utxo"], parent_low["new_utxo"]],
+ fee_per_output=999*parent_low["tx"].get_vsize(),
+ )
+
+ peer_sender = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=1, connection_type="outbound-full-relay")
+
+ # 1. Send first parent which will be accepted.
+ peer_sender.send_and_ping(msg_tx(parent_high["tx"]))
+ assert parent_high["txid"] in node.getrawmempool()
+
+ # 2. Send child.
+ peer_sender.send_and_ping(msg_tx(child["tx"]))
+
+ # 3. Node requests parent_low. However, 1p1c fails because package-not-child-with-unconfirmed-parents
+ parent_low_txid_int = int(parent_low["txid"], 16)
+ peer_sender.wait_for_getdata([parent_low_txid_int])
+ peer_sender.send_and_ping(msg_tx(parent_low["tx"]))
+
+ node_mempool = node.getrawmempool()
+ assert parent_high["txid"] in node_mempool
+ assert parent_low["txid"] not in node_mempool
+ assert child["txid"] not in node_mempool
+
+ # Same error if submitted through submitpackage without parent_high
+ package_hex_missing_parent = [parent_low["hex"], child["hex"]]
+ result_missing_parent = node.submitpackage(package_hex_missing_parent)
+ assert_equal(result_missing_parent["package_msg"], "package-not-child-with-unconfirmed-parents")
+
+ def run_test(self):
+ node = self.nodes[0]
+ # To avoid creating transactions with the same txid (can happen if we set the same feerate
+ # and reuse the same input as a previous transaction that wasn't successfully submitted),
+ # we give each subtest a different nSequence for its transactions.
+ self.sequence = MAX_BIP125_RBF_SEQUENCE
+
+ self.wallet = MiniWallet(node)
+ self.wallet_nonsegwit = MiniWallet(node, mode=MiniWalletMode.RAW_P2PK)
+ self.generate(self.wallet_nonsegwit, 10)
+ self.generate(self.wallet, 20)
+
+ fill_mempool(self, node)
+
+ self.log.info("Check opportunistic 1p1c logic when parent (txid != wtxid) is received before child")
+ self.test_basic_parent_then_child(self.wallet)
+
+ self.log.info("Check opportunistic 1p1c logic when parent (txid == wtxid) is received before child")
+ self.test_basic_parent_then_child(self.wallet_nonsegwit)
+
+ self.log.info("Check opportunistic 1p1c logic when child is received before parent")
+ self.test_basic_child_then_parent()
+
+ self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid != wtxid)")
+ self.test_low_and_high_child(self.wallet)
+
+ self.log.info("Check opportunistic 1p1c logic when 2 candidate children exist (parent txid == wtxid)")
+ self.test_low_and_high_child(self.wallet_nonsegwit)
+
+ self.test_orphan_consensus_failure()
+ self.test_parent_consensus_failure()
+ self.test_multiple_parents()
+ self.test_other_parent_in_mempool()
+
+
+if __name__ == '__main__':
+ PackageRelayTest().main()
diff --git a/test/functional/p2p_outbound_eviction.py b/test/functional/p2p_outbound_eviction.py
new file mode 100755
index 0000000000..8d89688999
--- /dev/null
+++ b/test/functional/p2p_outbound_eviction.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python3
+# Copyright (c) 2019-2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+""" Test node outbound peer eviction logic
+
+A subset of our outbound peers are subject to eviction logic if they cannot keep up
+with our vision of the best chain. This criteria applies only to non-protected peers,
+and can be triggered by either not learning about any blocks from an outbound peer after
+a certain deadline, or by them not being able to catch up fast enough (under the same deadline).
+
+This tests the different eviction paths based on the peer's behavior and on whether they are protected
+or not.
+"""
+import time
+
+from test_framework.messages import (
+ from_hex,
+ msg_headers,
+ CBlockHeader,
+)
+from test_framework.p2p import P2PInterface
+from test_framework.test_framework import BitcoinTestFramework
+
+# Timeouts (in seconds)
+CHAIN_SYNC_TIMEOUT = 20 * 60
+HEADERS_RESPONSE_TIME = 2 * 60
+
+
+class P2POutEvict(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def test_outbound_eviction_unprotected(self):
+ # This tests the eviction logic for **unprotected** outbound peers (that is, PeerManagerImpl::ConsiderEviction)
+ node = self.nodes[0]
+ cur_mock_time = node.mocktime
+
+ # Get our tip header and its parent
+ tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False))
+ prev_header = from_hex(CBlockHeader(), node.getblockheader(f"{tip_header.hashPrevBlock:064x}", False))
+
+ self.log.info("Create an outbound connection and don't send any headers")
+ # Test disconnect due to no block being announced in 22+ minutes (headers are not even exchanged)
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay")
+ # Wait for over 20 min to trigger the first eviction timeout. This sets the last call past 2 min in the future.
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+ # Wait for over 2 more min to trigger the disconnection
+ peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock)
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Test that the peer gets evicted")
+ peer.wait_for_disconnect()
+
+ self.log.info("Create an outbound connection and send header but never catch up")
+ # Mimic a node that just falls behind for long enough
+ # This should also apply for a node doing IBD that does not catch up in time
+ # Connect a peer and make it send us headers ending in our tip's parent
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay")
+ peer.send_and_ping(msg_headers([prev_header]))
+
+ # Trigger the timeouts
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+ peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock)
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Test that the peer gets evicted")
+ peer.wait_for_disconnect()
+
+ self.log.info("Create an outbound connection and keep lagging behind, but not too much")
+ # Test that if the peer never catches up with our current tip, but it does with the
+ # expected work that we set when setting the timer (that is, our tip at the time)
+ # we do not disconnect the peer
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay")
+
+ self.log.info("Mine a block so our peer starts lagging")
+ prev_prev_hash = tip_header.hashPrevBlock
+ best_block_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"]
+ peer.sync_with_ping()
+
+ self.log.info("Keep catching up with the old tip and check that we are not evicted")
+ for i in range(10):
+ # Generate an additional block so the peers is 2 blocks behind
+ prev_header = from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False))
+ best_block_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"]
+ peer.sync_with_ping()
+
+ # Advance time but not enough to evict the peer
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+
+ # Wait until we get out last call (by receiving a getheaders)
+ peer.wait_for_getheaders(block_hash=prev_prev_hash)
+
+ # Send a header with the previous tip (so we go back to 1 block behind)
+ peer.send_and_ping(msg_headers([prev_header]))
+ prev_prev_hash = tip_header.hash
+
+ self.log.info("Create an outbound connection and take some time to catch up, but do it in time")
+ # Check that if the peer manages to catch up within time, the timeouts are removed (and the peer is not disconnected)
+ # We are reusing the peer from the previous case which already sent us a valid (but old) block and whose timer is ticking
+
+ # Send an updated headers message matching our tip
+ peer.send_and_ping(msg_headers([from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False))]))
+
+ # Wait for long enough for the timeouts to have triggered and check that we are still connected
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Test that the peer does not get evicted")
+ peer.sync_with_ping()
+
+ node.disconnect_p2ps()
+
+ def test_outbound_eviction_protected(self):
+ # This tests the eviction logic for **protected** outbound peers (that is, PeerManagerImpl::ConsiderEviction)
+ # Outbound connections are flagged as protected as long as they have sent us a connecting block with at least as
+ # much work as our current tip and we have enough empty protected_peers slots.
+ node = self.nodes[0]
+ cur_mock_time = node.mocktime
+ tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False))
+
+ self.log.info("Create an outbound connection to a peer that shares our tip so it gets granted protection")
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="outbound-full-relay")
+ peer.send_and_ping(msg_headers([tip_header]))
+
+ self.log.info("Mine a new block and sync with our peer")
+ self.generateblock(node, output="raw(42)", transactions=[])
+ peer.sync_with_ping()
+
+ self.log.info("Let enough time pass for the timeouts to go off")
+ # Trigger the timeouts and check how we are still connected
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+ peer.wait_for_getheaders(block_hash=tip_header.hashPrevBlock)
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Test that the node does not get evicted")
+ peer.sync_with_ping()
+
+ node.disconnect_p2ps()
+
+ def test_outbound_eviction_mixed(self):
+ # This tests the outbound eviction logic for a mix of protected and unprotected peers.
+ node = self.nodes[0]
+ cur_mock_time = node.mocktime
+
+ self.log.info("Create a mix of protected and unprotected outbound connections to check against eviction")
+
+ # Let's try this logic having multiple peers, some protected and some unprotected
+ # We protect up to 4 peers as long as they have provided a block with the same amount of work as our tip
+ self.log.info("The first 4 peers are protected by sending us a valid block with enough work")
+ tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False))
+ headers_message = msg_headers([tip_header])
+ protected_peers = []
+ for i in range(4):
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay")
+ peer.send_and_ping(headers_message)
+ protected_peers.append(peer)
+
+ # We can create 4 additional outbound connections to peers that are unprotected. 2 of them will be well behaved,
+ # whereas the other 2 will misbehave (1 sending no headers, 1 sending old ones)
+ self.log.info("The remaining 4 peers will be mixed between honest (2) and misbehaving peers (2)")
+ prev_header = from_hex(CBlockHeader(), node.getblockheader(f"{tip_header.hashPrevBlock:064x}", False))
+ headers_message = msg_headers([prev_header])
+ honest_unprotected_peers = []
+ for i in range(2):
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=4+i, connection_type="outbound-full-relay")
+ peer.send_and_ping(headers_message)
+ honest_unprotected_peers.append(peer)
+
+ misbehaving_unprotected_peers = []
+ for i in range(2):
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=6+i, connection_type="outbound-full-relay")
+ if i%2==0:
+ peer.send_and_ping(headers_message)
+ misbehaving_unprotected_peers.append(peer)
+
+ self.log.info("Mine a new block and keep the unprotected honest peer on sync, all the rest off-sync")
+ # Mine a block so all peers become outdated
+ target_hash = prev_header.rehash()
+ tip_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"]
+ tip_header = from_hex(CBlockHeader(), node.getblockheader(tip_hash, False))
+ tip_headers_message = msg_headers([tip_header])
+
+ # Let the timeouts hit and check back
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ for peer in protected_peers + misbehaving_unprotected_peers:
+ peer.sync_with_ping()
+ peer.wait_for_getheaders(block_hash=target_hash)
+ for peer in honest_unprotected_peers:
+ peer.send_and_ping(tip_headers_message)
+ peer.wait_for_getheaders(block_hash=target_hash)
+
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Check how none of the honest nor protected peers was evicted but all the misbehaving unprotected were")
+ for peer in protected_peers + honest_unprotected_peers:
+ peer.sync_with_ping()
+ for peer in misbehaving_unprotected_peers:
+ peer.wait_for_disconnect()
+
+ node.disconnect_p2ps()
+
+ def test_outbound_eviction_blocks_relay_only(self):
+ # The logic for outbound eviction protection only applies to outbound-full-relay peers
+ # This tests that other types of peers (blocks-relay-only for instance) are not granted protection
+ node = self.nodes[0]
+ cur_mock_time = node.mocktime
+ tip_header = from_hex(CBlockHeader(), node.getblockheader(node.getbestblockhash(), False))
+
+ self.log.info("Create an blocks-only outbound connection to a peer that shares our tip. This would usually grant protection")
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only")
+ peer.send_and_ping(msg_headers([tip_header]))
+
+ self.log.info("Mine a new block and sync with our peer")
+ self.generateblock(node, output="raw(42)", transactions=[])
+ peer.sync_with_ping()
+
+ self.log.info("Let enough time pass for the timeouts to go off")
+ # Trigger the timeouts and check how the peer gets evicted, since protection is only given to outbound-full-relay peers
+ cur_mock_time += (CHAIN_SYNC_TIMEOUT + 1)
+ node.setmocktime(cur_mock_time)
+ peer.sync_with_ping()
+ peer.wait_for_getheaders(block_hash=tip_header.hash)
+ cur_mock_time += (HEADERS_RESPONSE_TIME + 1)
+ node.setmocktime(cur_mock_time)
+ self.log.info("Test that the peer gets evicted")
+ peer.wait_for_disconnect()
+
+ node.disconnect_p2ps()
+
+
+ def run_test(self):
+ self.nodes[0].setmocktime(int(time.time()))
+ self.test_outbound_eviction_unprotected()
+ self.test_outbound_eviction_protected()
+ self.test_outbound_eviction_mixed()
+ self.test_outbound_eviction_blocks_relay_only()
+
+
+if __name__ == '__main__':
+ P2POutEvict().main()
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 213c748eda..45bbd7f1c3 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -1054,7 +1054,7 @@ class SegWitTest(BitcoinTestFramework):
@subtest
def test_max_witness_push_length(self):
- """Test that witness stack can only allow up to 520 byte pushes."""
+ """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes."""
block = self.build_next_block()
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 7a50f1e605..0af6b1d2c9 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -8,6 +8,9 @@ Test transaction download behavior
from decimal import Decimal
import time
+from test_framework.mempool_util import (
+ fill_mempool,
+)
from test_framework.messages import (
CInv,
MSG_TX,
@@ -24,7 +27,6 @@ from test_framework.p2p import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- fill_mempool,
)
from test_framework.wallet import MiniWallet
@@ -248,7 +250,7 @@ class TxDownloadTest(BitcoinTestFramework):
def test_rejects_filter_reset(self):
self.log.info('Check that rejected tx is not requested again')
node = self.nodes[0]
- fill_mempool(self, node, self.wallet)
+ fill_mempool(self, node)
self.wallet.rescan_utxos()
mempoolminfee = node.getmempoolinfo()['mempoolminfee']
peer = node.add_p2p_connection(TestP2PConn())
diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py
index 37c42f2533..8ac0afdaaa 100755
--- a/test/functional/rpc_packages.py
+++ b/test/functional/rpc_packages.py
@@ -8,6 +8,9 @@ from decimal import Decimal
import random
from test_framework.blocktools import COINBASE_MATURITY
+from test_framework.mempool_util import (
+ fill_mempool,
+)
from test_framework.messages import (
MAX_BIP125_RBF_SEQUENCE,
tx_from_hex,
@@ -18,7 +21,6 @@ from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
- fill_mempool,
)
from test_framework.wallet import (
DEFAULT_FEE,
@@ -26,6 +28,9 @@ from test_framework.wallet import (
)
+MAX_PACKAGE_COUNT = 25
+
+
class RPCPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
@@ -344,6 +349,13 @@ class RPCPackagesTest(BitcoinTestFramework):
assert_raises_rpc_error(-25, "package topology disallowed", node.submitpackage, chain_hex)
assert_equal(legacy_pool, node.getrawmempool())
+ assert_raises_rpc_error(-8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.", node.submitpackage, [])
+ assert_raises_rpc_error(-8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.", node.submitpackage, [chain_hex[0]] * 1)
+ assert_raises_rpc_error(
+ -8, f"Array must contain between 2 and {MAX_PACKAGE_COUNT} transactions.",
+ node.submitpackage, [chain_hex[0]] * (MAX_PACKAGE_COUNT + 1)
+ )
+
# Create a transaction chain such as only the parent gets accepted (by making the child's
# version non-standard). Make sure the parent does get broadcast.
self.log.info("If a package is partially submitted, transactions included in mempool get broadcast")
@@ -388,7 +400,7 @@ class RPCPackagesTest(BitcoinTestFramework):
])
self.wallet.rescan_utxos()
- fill_mempool(self, node, self.wallet)
+ fill_mempool(self, node)
minrelay = node.getmempoolinfo()["minrelaytxfee"]
parent = self.wallet.create_self_transfer(
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 12697e9d0c..3978c80dde 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -490,11 +490,11 @@ class RawTransactionsTest(BitcoinTestFramework):
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
- assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
+ assert_raises_rpc_error(-5, 'Pubkey "01020304" must have a length of either 33 or 65 bytes', self.nodes[0].createmultisig, 1, ["01020304"])
# createmultisig can only take public keys
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here
- assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
+ assert_raises_rpc_error(-5, f'Pubkey "{addr1}" must be a hex string', self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md
index b89b40f13d..4cd62c4ef3 100644
--- a/test/functional/test-shell.md
+++ b/test/functional/test-shell.md
@@ -123,11 +123,11 @@ We can also log custom events to the logger.
```
**Note: Please also consider the functional test
-[readme](../test/functional/README.md), which provides an overview of the
+[readme](/test/functional/README.md), which provides an overview of the
test-framework**. Modules such as
-[key.py](../test/functional/test_framework/key.py),
-[script.py](../test/functional/test_framework/script.py) and
-[messages.py](../test/functional/test_framework/messages.py) are particularly
+[key.py](/test/functional/test_framework/key.py),
+[script.py](/test/functional/test_framework/script.py) and
+[messages.py](/test/functional/test_framework/messages.py) are particularly
useful in constructing objects which can be passed to the bitcoind nodes managed
by a running `TestShell` object.
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
index 5b2e3289a9..bcb38b21cd 100644
--- a/test/functional/test_framework/address.py
+++ b/test/functional/test_framework/address.py
@@ -47,7 +47,7 @@ class AddressType(enum.Enum):
b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
-def create_deterministic_address_bcrt1_p2tr_op_true():
+def create_deterministic_address_bcrt1_p2tr_op_true(explicit_internal_key=None):
"""
Generates a deterministic bech32m address (segwit v1 output) that
can be spent with a witness stack of OP_TRUE and the control block
@@ -55,9 +55,10 @@ def create_deterministic_address_bcrt1_p2tr_op_true():
Returns a tuple with the generated address and the internal key.
"""
- internal_key = (1).to_bytes(32, 'big')
+ internal_key = explicit_internal_key or (1).to_bytes(32, 'big')
address = output_key_to_p2tr(taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).output_pubkey)
- assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka')
+ if explicit_internal_key is None:
+ assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka')
return (address, internal_key)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index cfd923bab3..f0dc866f69 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -28,6 +28,7 @@ from .messages import (
ser_uint256,
tx_from_hex,
uint256_from_str,
+ WITNESS_SCALE_FACTOR,
)
from .script import (
CScript,
@@ -45,7 +46,6 @@ from .script_util import (
)
from .util import assert_equal
-WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
diff --git a/test/functional/test_framework/mempool_util.py b/test/functional/test_framework/mempool_util.py
new file mode 100644
index 0000000000..148cc935ed
--- /dev/null
+++ b/test/functional/test_framework/mempool_util.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# Copyright (c) 2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Helpful routines for mempool testing."""
+from decimal import Decimal
+
+from .blocktools import (
+ COINBASE_MATURITY,
+)
+from .util import (
+ assert_equal,
+ assert_greater_than,
+ create_lots_of_big_transactions,
+ gen_return_txouts,
+)
+from .wallet import (
+ MiniWallet,
+)
+
+
+def fill_mempool(test_framework, node):
+ """Fill mempool until eviction.
+
+ Allows for simpler testing of scenarios with floating mempoolminfee > minrelay
+ Requires -datacarriersize=100000 and
+ -maxmempool=5.
+ It will not ensure mempools become synced as it
+ is based on a single node and assumes -minrelaytxfee
+ is 1 sat/vbyte.
+ To avoid unintentional tx dependencies, the mempool filling txs are created with a
+ tagged ephemeral miniwallet instance.
+ """
+ test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises")
+ txouts = gen_return_txouts()
+ relayfee = node.getnetworkinfo()['relayfee']
+
+ assert_equal(relayfee, Decimal('0.00001000'))
+
+ tx_batch_size = 1
+ num_of_batches = 75
+ # Generate UTXOs to flood the mempool
+ # 1 to create a tx initially that will be evicted from the mempool later
+ # 75 transactions each with a fee rate higher than the previous one
+ ephemeral_miniwallet = MiniWallet(node, tag_name="fill_mempool_ephemeral_wallet")
+ test_framework.generate(ephemeral_miniwallet, 1 + num_of_batches * tx_batch_size)
+
+ # Mine enough blocks so that the UTXOs are allowed to be spent
+ test_framework.generate(node, COINBASE_MATURITY - 1)
+
+ # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may
+ # change their effective feerate and thus the order in which they are selected for eviction.
+ confirmed_utxos = [ephemeral_miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)]
+ assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1)
+
+ test_framework.log.debug("Create a mempool tx that will be evicted")
+ tx_to_be_evicted_id = ephemeral_miniwallet.send_self_transfer(
+ from_node=node, utxo_to_spend=confirmed_utxos.pop(0), fee_rate=relayfee)["txid"]
+
+ # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool
+ # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB)
+ # by 130 should result in a fee that corresponds to 2x of that fee rate
+ base_fee = relayfee * 130
+
+ test_framework.log.debug("Fill up the mempool with txs with higher fee rate")
+ with node.assert_debug_log(["rolling minimum fee bumped"]):
+ for batch_of_txid in range(num_of_batches):
+ fee = (batch_of_txid + 1) * base_fee
+ utxos = confirmed_utxos[:tx_batch_size]
+ create_lots_of_big_transactions(ephemeral_miniwallet, node, fee, tx_batch_size, txouts, utxos)
+ del confirmed_utxos[:tx_batch_size]
+
+ test_framework.log.debug("The tx should be evicted by now")
+ # The number of transactions created should be greater than the ones present in the mempool
+ assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool()))
+ # Initial tx created should not be present in the mempool anymore as it had a lower fee rate
+ assert tx_to_be_evicted_id not in node.getrawmempool()
+
+ test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee")
+ assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
+ assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 0de09b6440..c5b69a3954 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -496,65 +496,6 @@ def check_node_connections(*, node, num_in, num_out):
assert_equal(info["connections_in"], num_in)
assert_equal(info["connections_out"], num_out)
-def fill_mempool(test_framework, node, miniwallet):
- """Fill mempool until eviction.
-
- Allows for simpler testing of scenarios with floating mempoolminfee > minrelay
- Requires -datacarriersize=100000 and
- -maxmempool=5.
- It will not ensure mempools become synced as it
- is based on a single node and assumes -minrelaytxfee
- is 1 sat/vbyte.
- To avoid unintentional tx dependencies, it is recommended to use separate miniwallets for
- mempool filling vs transactions in tests.
- """
- test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises")
- txouts = gen_return_txouts()
- relayfee = node.getnetworkinfo()['relayfee']
-
- assert_equal(relayfee, Decimal('0.00001000'))
-
- tx_batch_size = 1
- num_of_batches = 75
- # Generate UTXOs to flood the mempool
- # 1 to create a tx initially that will be evicted from the mempool later
- # 75 transactions each with a fee rate higher than the previous one
- test_framework.generate(miniwallet, 1 + (num_of_batches * tx_batch_size))
-
- # Mine COINBASE_MATURITY - 1 blocks so that the UTXOs are allowed to be spent
- test_framework.generate(node, 100 - 1)
-
- # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may
- # change their effective feerate and thus the order in which they are selected for eviction.
- confirmed_utxos = [miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)]
- assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1)
-
- test_framework.log.debug("Create a mempool tx that will be evicted")
- tx_to_be_evicted_id = miniwallet.send_self_transfer(from_node=node, utxo_to_spend=confirmed_utxos[0], fee_rate=relayfee)["txid"]
- del confirmed_utxos[0]
-
- # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool
- # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB)
- # by 130 should result in a fee that corresponds to 2x of that fee rate
- base_fee = relayfee * 130
-
- test_framework.log.debug("Fill up the mempool with txs with higher fee rate")
- with node.assert_debug_log(["rolling minimum fee bumped"]):
- for batch_of_txid in range(num_of_batches):
- fee = (batch_of_txid + 1) * base_fee
- utxos = confirmed_utxos[:tx_batch_size]
- create_lots_of_big_transactions(miniwallet, node, fee, tx_batch_size, txouts, utxos)
- del confirmed_utxos[:tx_batch_size]
-
- test_framework.log.debug("The tx should be evicted by now")
- # The number of transactions created should be greater than the ones present in the mempool
- assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool()))
- # Initial tx created should not be present in the mempool anymore as it had a lower fee rate
- assert tx_to_be_evicted_id not in node.getrawmempool()
-
- test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee")
- assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
- assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
# Transaction/Block functions
#############################
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index 470ed08ed4..4433cbcc55 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -32,6 +32,7 @@ from test_framework.messages import (
CTxIn,
CTxInWitness,
CTxOut,
+ hash256,
)
from test_framework.script import (
CScript,
@@ -65,7 +66,10 @@ class MiniWalletMode(Enum):
However, if the transactions need to be modified by the user (e.g. prepending
scriptSig for testing opcodes that are activated by a soft-fork), or the txs
should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK
- can be useful. Summary of modes:
+ can be useful. In order to avoid mixing of UTXOs between different MiniWallet
+ instances, a tag name can be passed to the default mode, to create different
+ output scripts. Note that the UTXOs from the pre-generated test chain can
+ only be spent if no tag is passed. Summary of modes:
| output | | tx is | can modify | needs
mode | description | address | standard | scriptSig | signing
@@ -80,22 +84,25 @@ class MiniWalletMode(Enum):
class MiniWallet:
- def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE):
+ def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE, tag_name=None):
self._test_node = test_node
self._utxos = []
self._mode = mode
assert isinstance(mode, MiniWalletMode)
if mode == MiniWalletMode.RAW_OP_TRUE:
+ assert tag_name is None
self._scriptPubKey = bytes(CScript([OP_TRUE]))
elif mode == MiniWalletMode.RAW_P2PK:
# use simple deterministic private key (k=1)
+ assert tag_name is None
self._priv_key = ECKey()
self._priv_key.set((1).to_bytes(32, 'big'), True)
pub_key = self._priv_key.get_pubkey()
self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
- self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true()
+ internal_key = None if tag_name is None else hash256(tag_name.encode())
+ self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true(internal_key)
self._scriptPubKey = address_to_scriptpubkey(self._address)
# When the pre-mined test framework chain is used, it contains coinbase
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index f6eccab2b3..690ab64c83 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -29,6 +29,13 @@ import logging
os.environ["REQUIRE_WALLET_TYPE_SET"] = "1"
+# Minimum amount of space to run the tests.
+MIN_FREE_SPACE = 1.1 * 1024 * 1024 * 1024
+# Additional space to run an extra job.
+ADDITIONAL_SPACE_PER_JOB = 100 * 1024 * 1024
+# Minimum amount of space required for --nocleanup
+MIN_NO_CLEANUP_SPACE = 12 * 1024 * 1024 * 1024
+
# Formatting. Default colors to empty strings.
DEFAULT, BOLD, GREEN, RED = ("", ""), ("", ""), ("", ""), ("", "")
try:
@@ -183,6 +190,8 @@ BASE_SCRIPTS = [
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
+ 'p2p_1p1c_network.py',
+ 'p2p_opportunistic_1p1c.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'wallet_avoid_mixing_output_types.py --descriptors',
@@ -276,6 +285,7 @@ BASE_SCRIPTS = [
'p2p_leak_tx.py --v1transport',
'p2p_leak_tx.py --v2transport',
'p2p_eviction.py',
+ 'p2p_outbound_eviction.py',
'p2p_ibd_stalling.py --v1transport',
'p2p_ibd_stalling.py --v2transport',
'p2p_net_deadlock.py --v1transport',
@@ -424,6 +434,8 @@ def main():
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', '-F', action='store_true', help='stop execution after the first test failure')
parser.add_argument('--filter', help='filter scripts to run by regular expression')
+ parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
+ help="Leave bitcoinds and test.* datadir on exit or error")
args, unknown_args = parser.parse_known_args()
@@ -518,6 +530,13 @@ def main():
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
+ # Warn if there is not enough space on tmpdir to run the tests with --nocleanup
+ if args.nocleanup:
+ if shutil.disk_usage(tmpdir).free < MIN_NO_CLEANUP_SPACE:
+ print(f"{BOLD[1]}WARNING!{BOLD[0]} There may be insufficient free space in {tmpdir} to run the functional test suite with --nocleanup. "
+ f"A minimum of {MIN_NO_CLEANUP_SPACE // (1024 * 1024 * 1024)} GB of free space is required.")
+ passon_args.append("--nocleanup")
+
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
@@ -554,6 +573,11 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
+ # Warn if there is not enough space on the testing dir
+ min_space = MIN_FREE_SPACE + (jobs - 1) * ADDITIONAL_SPACE_PER_JOB
+ if shutil.disk_usage(tmpdir).free < min_space:
+ print(f"{BOLD[1]}WARNING!{BOLD[0]} There may be insufficient free space in {tmpdir} to run the Bitcoin functional test suite. "
+ f"Running the test suite with fewer than {min_space // (1024 * 1024)} MB of free space might cause tests to fail.")
tests_dir = src_dir + '/test/functional/'
# This allows `test_runner.py` to work from an out-of-source build directory using a symlink,
@@ -623,6 +647,11 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
logging.debug("Early exiting after test failure")
break
+ if "[Errno 28] No space left on device" in stdout:
+ sys.exit(f"Early exiting after test failure due to insuffient free space in {tmpdir}\n"
+ f"Test execution data left in {tmpdir}.\n"
+ f"Additional storage is needed to execute testing.")
+
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index 56228d2bad..1b2b8ec1f3 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -461,10 +461,13 @@ class WalletTest(BitcoinTestFramework):
assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid")
# This will raise an exception for attempting to import a pubkey that isn't in hex
- assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")
+ assert_raises_rpc_error(-5, 'Pubkey "not hex" must be a hex string', self.nodes[0].importpubkey, "not hex")
- # This will raise an exception for importing an invalid pubkey
- assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")
+ # This will raise exceptions for importing a pubkeys with invalid length / invalid coordinates
+ too_short_pubkey = "5361746f736869204e616b616d6f746f"
+ assert_raises_rpc_error(-5, f'Pubkey "{too_short_pubkey}" must have a length of either 33 or 65 bytes', self.nodes[0].importpubkey, too_short_pubkey)
+ not_on_curve_pubkey = bytes([4] + [0]*64).hex() # pubkey with coordinates (0,0) is not on curve
+ assert_raises_rpc_error(-5, f'Pubkey "{not_on_curve_pubkey}" must be cryptographically valid', self.nodes[0].importpubkey, not_on_curve_pubkey)
# Bech32m addresses cannot be imported into a legacy wallet
assert_raises_rpc_error(-5, "Bech32m addresses cannot be imported into legacy wallets", self.nodes[0].importaddress, "bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py
index ff4648e638..71c883f166 100755
--- a/test/functional/wallet_fundrawtransaction.py
+++ b/test/functional/wallet_fundrawtransaction.py
@@ -1054,8 +1054,8 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.fundrawtransaction, raw_tx)
# Error conditions
- assert_raises_rpc_error(-5, "'not a pubkey' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]})
- assert_raises_rpc_error(-5, "'01234567890a0b0c0d0e0f' is not a valid public key", wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]})
+ assert_raises_rpc_error(-5, 'Pubkey "not a pubkey" must be a hex string', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]})
+ assert_raises_rpc_error(-5, 'Pubkey "01234567890a0b0c0d0e0f" must have a length of either 33 or 65 bytes', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]})
assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"scripts":["not a script"]})
assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, solving_data={"descriptors":["not a descriptor"]})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"]}])
diff --git a/test/lint/README.md b/test/lint/README.md
index 9d167bac72..13c2099808 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -30,13 +30,13 @@ Then you can use:
| Lint test | Dependency |
|-----------|:----------:|
-| [`lint-python.py`](lint/lint-python.py) | [flake8](https://gitlab.com/pycqa/flake8)
-| [`lint-python.py`](lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
-| [`lint-python.py`](lint/lint-python.py) | [mypy](https://github.com/python/mypy)
-| [`lint-python.py`](lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
-| [`lint-python-dead-code.py`](lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
-| [`lint-shell.py`](lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck)
-| [`lint-spelling.py`](lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell)
+| [`lint-python.py`](/test/lint/lint-python.py) | [flake8](https://github.com/PyCQA/flake8)
+| [`lint-python.py`](/test/lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
+| [`lint-python.py`](/test/lint/lint-python.py) | [mypy](https://github.com/python/mypy)
+| [`lint-python.py`](/test/lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
+| [`lint-python-dead-code.py`](/test/lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
+| [`lint-shell.py`](/test/lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck)
+| [`lint-spelling.py`](/test/lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell)
In use versions and install instructions are available in the [CI setup](../../ci/lint/04_install.sh).
diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs
index e22e047e4b..d5dd98effe 100644
--- a/test/lint/test_runner/src/main.rs
+++ b/test/lint/test_runner/src/main.rs
@@ -137,9 +137,9 @@ fn lint_trailing_whitespace() -> LintResult {
if trailing_space {
Err(r#"
^^^
-Trailing whitespace is problematic, because git may warn about it, or editors may remove it by
-default, forcing developers in the future to either undo the changes manually or spend time on
-review.
+Trailing whitespace (including Windows line endings [CR LF]) is problematic, because git may warn
+about it, or editors may remove it by default, forcing developers in the future to either undo the
+changes manually or spend time on review.
Thus, it is best to remove the trailing space now.
@@ -178,7 +178,6 @@ Please add any false positives, such as subtrees, or externally sourced files to
fn lint_includes_build_config() -> LintResult {
let config_path = "./src/config/bitcoin-config.h.in";
- let include_directive = "#include <config/bitcoin-config.h>";
if !Path::new(config_path).is_file() {
assert!(Command::new("./autogen.sh")
.status()
@@ -235,7 +234,11 @@ fn lint_includes_build_config() -> LintResult {
} else {
"--files-with-matches"
},
- include_directive,
+ if mode {
+ "^#include <config/bitcoin-config.h> // IWYU pragma: keep$"
+ } else {
+ "#include <config/bitcoin-config.h>" // Catch redundant includes with and without the IWYU pragma
+ },
"--",
])
.args(defines_files.lines())
@@ -256,6 +259,11 @@ even though bitcoin-config.h indicates that a faster feature is available and sh
If you are unsure which symbol is used, you can find it with this command:
git grep --perl-regexp '{}' -- file_name
+
+Make sure to include it with the IWYU pragma. Otherwise, IWYU may falsely instruct to remove the
+include again.
+
+#include <config/bitcoin-config.h> // IWYU pragma: keep
"#,
defines_regex
));