diff options
Diffstat (limited to 'test')
-rwxr-xr-x | test/functional/feature_assumeutxo.py | 246 | ||||
-rwxr-xr-x | test/functional/p2p_v2_transport.py | 127 | ||||
-rwxr-xr-x | test/functional/rpc_net.py | 2 | ||||
-rwxr-xr-x | test/functional/test_framework/messages.py | 1 | ||||
-rwxr-xr-x | test/functional/test_framework/test_framework.py | 33 | ||||
-rwxr-xr-x | test/functional/test_runner.py | 12 | ||||
-rwxr-xr-x | test/lint/lint-assertions.py | 12 | ||||
-rwxr-xr-x | test/lint/lint-shell.py | 8 |
8 files changed, 421 insertions, 20 deletions
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py new file mode 100755 index 0000000000..be1aa18993 --- /dev/null +++ b/test/functional/feature_assumeutxo.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test for assumeutxo, a means of quickly bootstrapping a node using +a serialized version of the UTXO set at a certain height, which corresponds +to a hash that has been compiled into bitcoind. + +The assumeutxo value generated and used here is committed to in +`CRegTestParams::m_assumeutxo_data` in `src/chainparams.cpp`. + +## Possible test improvements + +- TODO: test submitting a transaction and verifying it appears in mempool +- TODO: test what happens with -reindex and -reindex-chainstate before the + snapshot is validated, and make sure it's deleted successfully. + +Interesting test cases could be loading an assumeutxo snapshot file with: + +- TODO: An invalid hash +- TODO: Valid hash but invalid snapshot file (bad coin height or truncated file or + bad other serialization) +- TODO: Valid snapshot file, but referencing an unknown block +- TODO: Valid snapshot file, but referencing a snapshot block that turns out to be + invalid, or has an invalid parent +- TODO: Valid snapshot file and snapshot block, but the block is not on the + most-work chain + +Interesting starting states could be loading a snapshot when the current chain tip is: + +- TODO: An ancestor of snapshot block +- TODO: Not an ancestor of the snapshot block but has less work +- TODO: The snapshot block +- TODO: A descendant of the snapshot block +- TODO: Not an ancestor or a descendant of the snapshot block and has more work + +""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, wait_until_helper + +START_HEIGHT = 199 +SNAPSHOT_BASE_HEIGHT = 299 +FINAL_HEIGHT = 399 +COMPLETE_IDX = {'synced': True, 'best_block_height': FINAL_HEIGHT} + + +class AssumeutxoTest(BitcoinTestFramework): + + def set_test_params(self): + """Use the pregenerated, deterministic chain up to height 199.""" + self.num_nodes = 3 + self.rpc_timeout = 120 + self.extra_args = [ + [], + ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"], + ["-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"], + ] + + def setup_network(self): + """Start with the nodes disconnected so that one can generate a snapshot + including blocks the other hasn't yet seen.""" + self.add_nodes(3) + self.start_nodes(extra_args=self.extra_args) + + def run_test(self): + """ + Bring up two (disconnected) nodes, mine some new blocks on the first, + and generate a UTXO snapshot. + + Load the snapshot into the second, ensure it syncs to tip and completes + background validation when connected to the first. + """ + n0 = self.nodes[0] + n1 = self.nodes[1] + n2 = self.nodes[2] + + # Mock time for a deterministic chain + for n in self.nodes: + n.setmocktime(n.getblockheader(n.getbestblockhash())['time']) + + self.sync_blocks() + + def no_sync(): + pass + + # Generate a series of blocks that `n0` will have in the snapshot, + # but that n1 doesn't yet see. In order for the snapshot to activate, + # though, we have to ferry over the new headers to n1 so that it + # isn't waiting forever to see the header of the snapshot's base block + # while disconnected from n0. + for i in range(100): + self.generate(n0, nblocks=1, sync_fun=no_sync) + newblock = n0.getblock(n0.getbestblockhash(), 0) + + # make n1 aware of the new header, but don't give it the block. + n1.submitheader(newblock) + n2.submitheader(newblock) + + # Ensure everyone is seeing the same headers. + for n in self.nodes: + assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT) + + self.log.info("-- Testing assumeutxo + some indexes + pruning") + + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}") + dump_output = n0.dumptxoutset('utxos.dat') + + assert_equal( + dump_output['txoutset_hash'], + 'ef45ccdca5898b6c2145e4581d2b88c56564dd389e4bd75a1aaf6961d3edd3c0') + assert_equal(dump_output['nchaintx'], 300) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This + # will allow us to test n1's sync-to-tip on top of a snapshot. + self.generate(n0, nblocks=100, sync_fun=no_sync) + + assert_equal(n0.getblockcount(), FINAL_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + self.log.info(f"Loading snapshot into second node from {dump_output['path']}") + loaded = n1.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + + monitor = n1.getchainstates() + assert_equal(monitor['normal']['blocks'], START_HEIGHT) + assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + + assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + PAUSE_HEIGHT = FINAL_HEIGHT - 40 + + self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) + self.restart_node(1, extra_args=[ + f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]]) + + # Finally connect the nodes and let them sync. + self.connect_nodes(0, 1) + + n1.wait_until_stopped(timeout=5) + + self.log.info("Checking that blocks are segmented on disk") + assert self.has_blockfile(n1, "00000"), "normal blockfile missing" + assert self.has_blockfile(n1, "00001"), "assumed blockfile missing" + assert not self.has_blockfile(n1, "00002"), "too many blockfiles" + + self.log.info("Restarted node before snapshot validation completed, reloading...") + self.restart_node(1, extra_args=self.extra_args[1]) + self.connect_nodes(0, 1) + + self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") + wait_until_helper(lambda: n1.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + self.sync_blocks(nodes=(n0, n1)) + + self.log.info("Ensuring background validation completes") + # N.B.: the `snapshot` key disappears once the background validation is complete. + wait_until_helper(lambda: not n1.getchainstates().get('snapshot')) + + # Ensure indexes have synced. + completed_idx_state = { + 'basic block filter index': COMPLETE_IDX, + 'coinstatsindex': COMPLETE_IDX, + } + self.wait_until(lambda: n1.getindexinfo() == completed_idx_state) + + + for i in (0, 1): + n = self.nodes[i] + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) + assert_equal(n.getchainstates().get('snapshot'), None) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + + # Node 2: all indexes + reindex + # ----------------------------- + + self.log.info("-- Testing all indexes + reindex") + assert_equal(n2.getblockcount(), START_HEIGHT) + + self.log.info(f"Loading snapshot into third node from {dump_output['path']}") + loaded = n2.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + + monitor = n2.getchainstates() + assert_equal(monitor['normal']['blocks'], START_HEIGHT) + assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + + self.connect_nodes(0, 2) + wait_until_helper(lambda: n2.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + self.sync_blocks() + + self.log.info("Ensuring background validation completes") + wait_until_helper(lambda: not n2.getchainstates().get('snapshot')) + + completed_idx_state = { + 'basic block filter index': COMPLETE_IDX, + 'coinstatsindex': COMPLETE_IDX, + 'txindex': COMPLETE_IDX, + } + self.wait_until(lambda: n2.getindexinfo() == completed_idx_state) + + for i in (0, 2): + n = self.nodes[i] + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) + assert_equal(n.getchainstates().get('snapshot'), None) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + self.log.info("Test -reindex-chainstate of an assumeutxo-synced node") + self.restart_node(2, extra_args=[ + '-reindex-chainstate=1', *self.extra_args[2]]) + assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT) + wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + + self.log.info("Test -reindex of an assumeutxo-synced node") + self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]]) + self.connect_nodes(0, 2) + wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + + +if __name__ == '__main__': + AssumeutxoTest().main() diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py new file mode 100755 index 0000000000..2455bf2e2d --- /dev/null +++ b/test/functional/p2p_v2_transport.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test v2 transport +""" + +from test_framework.messages import NODE_P2P_V2 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class V2TransportTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 5 + self.extra_args = [["-v2transport=1"], ["-v2transport=1"], ["-v2transport=0"], ["-v2transport=0"], ["-v2transport=0"]] + + def run_test(self): + sending_handshake = "start sending v2 handshake to peer" + downgrading_to_v1 = "retrying with v1 transport protocol for peer" + self.disconnect_nodes(0, 1) + self.disconnect_nodes(1, 2) + self.disconnect_nodes(2, 3) + self.disconnect_nodes(3, 4) + + # verify local services + network_info = self.nodes[2].getnetworkinfo() + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, 0) + assert "P2P_V2" not in network_info["localservicesnames"] + network_info = self.nodes[1].getnetworkinfo() + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, NODE_P2P_V2) + assert "P2P_V2" in network_info["localservicesnames"] + + # V2 nodes can sync with V2 nodes + assert_equal(self.nodes[0].getblockcount(), 0) + assert_equal(self.nodes[1].getblockcount(), 0) + with self.nodes[0].assert_debug_log(expected_msgs=[sending_handshake], + unexpected_msgs=[downgrading_to_v1]): + self.connect_nodes(0, 1, peer_advertises_v2=True) + self.generate(self.nodes[0], 5, sync_fun=lambda: self.sync_all(self.nodes[0:2])) + assert_equal(self.nodes[1].getblockcount(), 5) + # verify there is a v2 connection between node 0 and 1 + node_0_info = self.nodes[0].getpeerinfo() + node_1_info = self.nodes[0].getpeerinfo() + assert_equal(len(node_0_info), 1) + assert_equal(len(node_1_info), 1) + assert_equal(node_0_info[0]["transport_protocol_type"], "v2") + assert_equal(node_1_info[0]["transport_protocol_type"], "v2") + assert_equal(len(node_0_info[0]["session_id"]), 64) + assert_equal(len(node_1_info[0]["session_id"]), 64) + assert_equal(node_0_info[0]["session_id"], node_1_info[0]["session_id"]) + + # V1 nodes can sync with each other + assert_equal(self.nodes[2].getblockcount(), 0) + assert_equal(self.nodes[3].getblockcount(), 0) + with self.nodes[2].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(2, 3, peer_advertises_v2=False) + self.generate(self.nodes[2], 8, sync_fun=lambda: self.sync_all(self.nodes[2:4])) + assert_equal(self.nodes[3].getblockcount(), 8) + assert self.nodes[0].getbestblockhash() != self.nodes[2].getbestblockhash() + # verify there is a v1 connection between node 2 and 3 + node_2_info = self.nodes[2].getpeerinfo() + node_3_info = self.nodes[3].getpeerinfo() + assert_equal(len(node_2_info), 1) + assert_equal(len(node_3_info), 1) + assert_equal(node_2_info[0]["transport_protocol_type"], "v1") + assert_equal(node_3_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_2_info[0]["session_id"]), 0) + assert_equal(len(node_3_info[0]["session_id"]), 0) + + # V1 nodes can sync with V2 nodes + self.disconnect_nodes(0, 1) + self.disconnect_nodes(2, 3) + with self.nodes[2].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(2, 1, peer_advertises_v2=False) # cannot enable v2 on v1 node + self.sync_all(self.nodes[1:3]) + assert_equal(self.nodes[1].getblockcount(), 8) + assert self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash() + # verify there is a v1 connection between node 1 and 2 + node_1_info = self.nodes[1].getpeerinfo() + node_2_info = self.nodes[2].getpeerinfo() + assert_equal(len(node_1_info), 1) + assert_equal(len(node_2_info), 1) + assert_equal(node_1_info[0]["transport_protocol_type"], "v1") + assert_equal(node_2_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_1_info[0]["session_id"]), 0) + assert_equal(len(node_2_info[0]["session_id"]), 0) + + # V2 nodes can sync with V1 nodes + self.disconnect_nodes(1, 2) + with self.nodes[0].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(0, 3, peer_advertises_v2=False) + self.sync_all([self.nodes[0], self.nodes[3]]) + assert_equal(self.nodes[0].getblockcount(), 8) + # verify there is a v1 connection between node 0 and 3 + node_0_info = self.nodes[0].getpeerinfo() + node_3_info = self.nodes[3].getpeerinfo() + assert_equal(len(node_0_info), 1) + assert_equal(len(node_3_info), 1) + assert_equal(node_0_info[0]["transport_protocol_type"], "v1") + assert_equal(node_3_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_0_info[0]["session_id"]), 0) + assert_equal(len(node_3_info[0]["session_id"]), 0) + + # V2 node mines another block and everyone gets it + self.connect_nodes(0, 1, peer_advertises_v2=True) + self.connect_nodes(1, 2, peer_advertises_v2=False) + self.generate(self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:4])) + assert_equal(self.nodes[0].getblockcount(), 9) # sync_all() verifies tip hashes match + + # V1 node mines another block and everyone gets it + self.generate(self.nodes[3], 2, sync_fun=lambda: self.sync_all(self.nodes[0:4])) + assert_equal(self.nodes[2].getblockcount(), 11) # sync_all() verifies tip hashes match + + assert_equal(self.nodes[4].getblockcount(), 0) + # Peer 4 is v1 p2p, but is falsely advertised as v2. + with self.nodes[1].assert_debug_log(expected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(1, 4, peer_advertises_v2=True) + self.sync_all() + assert_equal(self.nodes[4].getblockcount(), 11) + +if __name__ == '__main__': + V2TransportTest().main() diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index da68066fc5..2c7f974d0b 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -144,11 +144,13 @@ class NetTest(BitcoinTestFramework): "relaytxes": False, "services": "0000000000000000", "servicesnames": [], + "session_id": "", "startingheight": -1, "subver": "", "synced_blocks": -1, "synced_headers": -1, "timeoffset": 0, + "transport_protocol_type": "v1", "version": 0, }, ) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 4d635556f4..8f3aea8785 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -52,6 +52,7 @@ NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) NODE_COMPACT_FILTERS = (1 << 6) NODE_NETWORK_LIMITED = (1 << 10) +NODE_P2P_V2 = (1 << 11) MSG_TX = 1 MSG_BLOCK = 2 diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 73e7516ea7..ab7fed335c 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -189,6 +189,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): parser.add_argument("--randomseed", type=int, help="set a random seed for deterministically reproducing a previous test run") parser.add_argument("--timeout-factor", dest="timeout_factor", type=float, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") + parser.add_argument("--v2transport", dest="v2transport", default=False, action="store_true", + help="use BIP324 v2 connections between all nodes by default") self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument @@ -504,6 +506,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): assert_equal(len(binary), num_nodes) assert_equal(len(binary_cli), num_nodes) for i in range(num_nodes): + args = list(extra_args[i]) + if self.options.v2transport and ("-v2transport=0" not in args): + args.append("-v2transport=1") test_node_i = TestNode( i, get_datadir_path(self.options.tmpdir, i), @@ -517,7 +522,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): coverage_dir=self.options.coveragedir, cwd=self.options.tmpdir, extra_conf=extra_confs[i], - extra_args=extra_args[i], + extra_args=args, use_cli=self.options.usecli, start_perf=self.options.perf, use_valgrind=self.options.valgrind, @@ -581,13 +586,23 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) - def connect_nodes(self, a, b): + def connect_nodes(self, a, b, *, peer_advertises_v2=None): from_connection = self.nodes[a] to_connection = self.nodes[b] from_num_peers = 1 + len(from_connection.getpeerinfo()) to_num_peers = 1 + len(to_connection.getpeerinfo()) ip_port = "127.0.0.1:" + str(p2p_port(b)) - from_connection.addnode(ip_port, "onetry") + + if peer_advertises_v2 is None: + peer_advertises_v2 = self.options.v2transport + + if peer_advertises_v2: + from_connection.addnode(node=ip_port, command="onetry", v2transport=True) + else: + # skip the optional third argument (default false) for + # compatibility with older clients + from_connection.addnode(ip_port, "onetry") + # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: @@ -595,12 +610,12 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # * Must have a verack message before anything else self.wait_until(lambda: sum(peer['version'] != 0 for peer in from_connection.getpeerinfo()) == from_num_peers) self.wait_until(lambda: sum(peer['version'] != 0 for peer in to_connection.getpeerinfo()) == to_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()) == to_num_peers) + self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in from_connection.getpeerinfo()) == from_num_peers) + self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in to_connection.getpeerinfo()) == to_num_peers) # The message bytes are counted before processing the message, so make # sure it was fully processed by waiting for a ping. - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in to_connection.getpeerinfo()) == to_num_peers) + self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in from_connection.getpeerinfo()) == from_num_peers) + self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in to_connection.getpeerinfo()) == to_num_peers) def disconnect_nodes(self, a, b): def disconnect_nodes_helper(node_a, node_b): @@ -979,3 +994,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): def is_bdb_compiled(self): """Checks whether the wallet module was compiled with BDB support.""" return self.config["components"].getboolean("USE_BDB") + + def has_blockfile(self, node, filenum: str): + blocksdir = os.path.join(node.datadir, self.chain, 'blocks', '') + return os.path.isfile(os.path.join(blocksdir, f"blk{filenum}.dat")) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 32aee3aa80..933ea276e7 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -117,6 +117,7 @@ BASE_SCRIPTS = [ 'wallet_backup.py --descriptors', 'feature_segwit.py --legacy-wallet', 'feature_segwit.py --descriptors', + 'feature_segwit.py --descriptors --v2transport', 'p2p_tx_download.py', 'wallet_avoidreuse.py --legacy-wallet', 'wallet_avoidreuse.py --descriptors', @@ -195,6 +196,7 @@ BASE_SCRIPTS = [ 'wallet_avoid_mixing_output_types.py --descriptors', 'mempool_reorg.py', 'p2p_block_sync.py', + 'p2p_block_sync.py --v2transport', 'wallet_createwallet.py --legacy-wallet', 'wallet_createwallet.py --usecli', 'wallet_createwallet.py --descriptors', @@ -221,10 +223,13 @@ BASE_SCRIPTS = [ 'wallet_transactiontime_rescan.py --legacy-wallet', 'p2p_addrv2_relay.py', 'p2p_compactblocks_hb.py', + 'p2p_compactblocks_hb.py --v2transport', 'p2p_disconnect_ban.py', + 'p2p_disconnect_ban.py --v2transport', 'feature_posix_fs_permissions.py', 'rpc_decodescript.py', 'rpc_blockchain.py', + 'rpc_blockchain.py --v2transport', 'rpc_deprecated.py', 'wallet_disable.py', 'wallet_change_address.py --legacy-wallet', @@ -245,7 +250,10 @@ BASE_SCRIPTS = [ 'mining_prioritisetransaction.py', 'p2p_invalid_locator.py', 'p2p_invalid_block.py', + 'p2p_invalid_block.py --v2transport', 'p2p_invalid_tx.py', + 'p2p_invalid_tx.py --v2transport', + 'p2p_v2_transport.py', 'example_test.py', 'wallet_txn_doublespend.py --legacy-wallet', 'wallet_multisig_descriptor_psbt.py --descriptors', @@ -267,9 +275,12 @@ BASE_SCRIPTS = [ 'wallet_importprunedfunds.py --legacy-wallet', 'wallet_importprunedfunds.py --descriptors', 'p2p_leak_tx.py', + 'p2p_leak_tx.py --v2transport', 'p2p_eviction.py', 'p2p_ibd_stalling.py', + 'p2p_ibd_stalling.py --v2transport', 'p2p_net_deadlock.py', + 'p2p_net_deadlock.py --v2transport', 'wallet_signmessagewithaddress.py', 'rpc_signmessagewithprivkey.py', 'rpc_generate.py', @@ -324,6 +335,7 @@ BASE_SCRIPTS = [ 'wallet_coinbase_category.py --descriptors', 'feature_filelock.py', 'feature_loadblock.py', + 'feature_assumeutxo.py', 'p2p_dos_header_tree.py', 'p2p_add_connections.py', 'feature_bind_port_discover.py', diff --git a/test/lint/lint-assertions.py b/test/lint/lint-assertions.py index 6da59b0d48..d9f86b22b8 100755 --- a/test/lint/lint-assertions.py +++ b/test/lint/lint-assertions.py @@ -23,20 +23,10 @@ def git_grep(params: [], error_msg: ""): def main(): - # PRE31-C (SEI CERT C Coding Standard): - # "Assertions should not contain assignments, increment, or decrement operators." - exit_code = git_grep([ - "-E", - r"[^_]assert\(.*(\+\+|\-\-|[^=!<>]=[^=!<>]).*\);", - "--", - "*.cpp", - "*.h", - ], "Assertions should not have side effects:") - # Aborting the whole process is undesirable for RPC code. So nonfatal # checks should be used over assert. See: src/util/check.h # src/rpc/server.cpp is excluded from this check since it's mostly meta-code. - exit_code |= git_grep([ + exit_code = git_grep([ "-nE", r"\<(A|a)ss(ume|ert) *\(.*\);", "--", diff --git a/test/lint/lint-shell.py b/test/lint/lint-shell.py index 1646bf0d3e..db84ca3d39 100755 --- a/test/lint/lint-shell.py +++ b/test/lint/lint-shell.py @@ -67,9 +67,13 @@ def main(): '*.sh', ] files = get_files(files_cmd) - # remove everything that doesn't match this regex reg = re.compile(r'src/[leveldb,secp256k1,minisketch]') - files[:] = [file for file in files if not reg.match(file)] + + def should_exclude(fname: str) -> bool: + return bool(reg.match(fname)) or 'test_utxo_snapshots.sh' in fname + + # remove everything that doesn't match this regex + files[:] = [file for file in files if not should_exclude(file)] # build the `shellcheck` command shellcheck_cmd = [ |