aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml41
-rw-r--r--REVIEWERS17
-rw-r--r--build-aux/m4/ax_boost_base.m461
-rw-r--r--ci/README.md3
-rwxr-xr-xci/lint/04_install.sh39
-rwxr-xr-xci/lint/06_script.sh6
-rw-r--r--ci/lint/Dockerfile29
-rwxr-xr-xci/lint/docker-entrypoint.sh12
-rwxr-xr-xci/test/00_setup_env.sh7
-rwxr-xr-xci/test/00_setup_env_android.sh2
-rwxr-xr-xci/test/00_setup_env_i686_centos.sh1
-rwxr-xr-xci/test/00_setup_env_native_fuzz_with_msan.sh2
-rwxr-xr-xci/test/00_setup_env_native_msan.sh2
-rwxr-xr-xci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh1
-rwxr-xr-xci/test/00_setup_env_native_qt5.sh1
-rwxr-xr-xci/test/01_base_install.sh33
-rwxr-xr-xci/test/04_install.sh42
-rwxr-xr-xci/test/06_script_b.sh12
-rw-r--r--ci/test_imagefile10
-rw-r--r--configure.ac67
-rwxr-xr-xcontrib/devtools/security-check.py2
-rwxr-xr-xcontrib/devtools/symbol-check.py22
-rwxr-xr-xcontrib/devtools/test-symbol-check.py25
-rwxr-xr-xcontrib/guix/libexec/build.sh7
-rw-r--r--contrib/guix/manifest.scm51
-rw-r--r--contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch62
-rw-r--r--contrib/guix/patches/glibc-2.24-guix-prefix.patch25
-rw-r--r--contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch100
-rw-r--r--contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch87
-rw-r--r--contrib/guix/patches/glibc-2.27-fcommon.patch (renamed from contrib/guix/patches/glibc-2.24-fcommon.patch)10
-rw-r--r--contrib/guix/patches/glibc-2.27-guix-prefix.patch3
-rw-r--r--contrib/guix/patches/glibc-ldd-x86_64.patch4
-rwxr-xr-xcontrib/install_db4.sh259
-rw-r--r--contrib/seeds/README.md2
-rwxr-xr-xcontrib/seeds/makeseeds.py4
-rwxr-xr-xcontrib/signet/getcoins.py2
-rw-r--r--contrib/verify-commits/allow-revsig-commits175
-rw-r--r--contrib/verify-commits/trusted-git-root2
-rw-r--r--contrib/verify-commits/trusted-keys1
-rw-r--r--contrib/verifybinaries/README.md11
-rw-r--r--depends/README.md2
-rw-r--r--depends/funcs.mk10
-rw-r--r--depends/hosts/default.mk5
-rw-r--r--depends/packages/bdb.mk2
-rw-r--r--depends/packages/sqlite.mk5
-rw-r--r--depends/packages/systemtap.mk9
-rw-r--r--depends/packages/zeromq.mk4
-rw-r--r--depends/patches/systemtap/fix_variadic_warning.patch16
-rw-r--r--doc/build-freebsd.md35
-rw-r--r--doc/build-openbsd.md8
-rw-r--r--doc/build-unix.md38
-rw-r--r--doc/dependencies.md4
-rw-r--r--doc/developer-notes.md4
-rw-r--r--doc/fuzzing.md4
-rw-r--r--doc/init.md2
-rw-r--r--doc/reduce-memory.md6
-rw-r--r--doc/release-notes-23395.md8
-rw-r--r--doc/release-notes-25574.md13
-rw-r--r--doc/release-notes-25957.md9
-rw-r--r--doc/release-notes-26471.md13
-rw-r--r--doc/release-notes-26896.md7
-rw-r--r--doc/release-notes-27037.md5
-rw-r--r--doc/release-notes-27068.md6
-rwxr-xr-xshare/rpcauth/rpcauth.py14
-rw-r--r--src/.clang-tidy3
-rw-r--r--src/Makefile.am5
-rw-r--r--src/Makefile.minisketch.include2
-rw-r--r--src/Makefile.test.include5
-rw-r--r--src/Makefile.test_util.include8
-rw-r--r--src/addrdb.cpp9
-rw-r--r--src/addrman.cpp84
-rw-r--r--src/addrman.h10
-rw-r--r--src/addrman_impl.h12
-rw-r--r--src/arith_uint256.h28
-rw-r--r--src/bench/chacha20.cpp4
-rw-r--r--src/bench/coin_selection.cpp2
-rw-r--r--src/bench/crypto_hash.cpp4
-rw-r--r--src/bench/gcs_filter.cpp2
-rw-r--r--src/bench/load_external.cpp2
-rw-r--r--src/bench/nanobench.h463
-rw-r--r--src/bench/prevector.cpp2
-rw-r--r--src/bench/wallet_balance.cpp2
-rw-r--r--src/bench/wallet_create_tx.cpp4
-rw-r--r--src/bench/wallet_loading.cpp2
-rw-r--r--src/bitcoin-chainstate.cpp1
-rw-r--r--src/bitcoin-cli.cpp18
-rw-r--r--src/bitcoin-util.cpp2
-rw-r--r--src/blockencodings.cpp20
-rw-r--r--src/blockencodings.h10
-rw-r--r--src/blockfilter.cpp15
-rw-r--r--src/coins.cpp76
-rw-r--r--src/coins.h30
-rw-r--r--src/common/bloom.cpp4
-rw-r--r--src/consensus/params.h1
-rw-r--r--src/core_io.h3
-rw-r--r--src/core_read.cpp2
-rw-r--r--src/core_write.cpp6
-rw-r--r--src/crypto/chacha20.cpp260
-rw-r--r--src/crypto/chacha20.h66
-rw-r--r--src/crypto/chacha_poly_aead.cpp13
-rw-r--r--src/crypto/muhash.cpp2
-rw-r--r--src/crypto/ripemd160.cpp2
-rw-r--r--src/crypto/ripemd160.h2
-rw-r--r--src/crypto/sha1.cpp2
-rw-r--r--src/crypto/sha1.h2
-rw-r--r--src/crypto/sha256.cpp2
-rw-r--r--src/crypto/sha256.h2
-rw-r--r--src/crypto/sha512.cpp2
-rw-r--r--src/crypto/sha512.h2
-rw-r--r--src/cuckoocache.h11
-rw-r--r--src/dbwrapper.cpp32
-rw-r--r--src/dbwrapper.h49
-rw-r--r--src/external_signer.cpp2
-rw-r--r--src/external_signer.h2
-rw-r--r--src/fs.cpp32
-rw-r--r--src/fs.h2
-rw-r--r--src/hash.h67
-rw-r--r--src/httprpc.cpp2
-rw-r--r--src/httpserver.cpp6
-rw-r--r--src/i2p.cpp19
-rw-r--r--src/index/base.cpp14
-rw-r--r--src/index/blockfilterindex.cpp4
-rw-r--r--src/init.cpp57
-rw-r--r--src/interfaces/node.h7
-rw-r--r--src/kernel/chainstatemanager_opts.h6
-rw-r--r--src/kernel/coinstats.cpp5
-rw-r--r--src/kernel/coinstats.h2
-rw-r--r--src/kernel/cs_main.cpp1
-rw-r--r--src/kernel/mempool_options.h2
-rw-r--r--src/key.cpp3
-rw-r--r--src/key.h6
-rw-r--r--src/logging/timer.h26
-rw-r--r--src/mapport.cpp14
-rw-r--r--src/mapport.h8
-rw-r--r--src/net.cpp138
-rw-r--r--src/net.h40
-rw-r--r--src/net_processing.cpp121
-rw-r--r--src/netaddress.cpp48
-rw-r--r--src/netaddress.h9
-rw-r--r--src/netbase.cpp16
-rw-r--r--src/node/blockstorage.cpp12
-rw-r--r--src/node/blockstorage.h14
-rw-r--r--src/node/chainstate.cpp34
-rw-r--r--src/node/chainstate.h9
-rw-r--r--src/node/chainstatemanager_args.cpp7
-rw-r--r--src/node/coins_view_args.cpp16
-rw-r--r--src/node/coins_view_args.h15
-rw-r--r--src/node/database_args.cpp18
-rw-r--r--src/node/database_args.h15
-rw-r--r--src/node/interfaces.cpp8
-rw-r--r--src/node/miner.cpp39
-rw-r--r--src/node/miner.h19
-rw-r--r--src/node/utxo_snapshot.cpp5
-rw-r--r--src/node/utxo_snapshot.h9
-rw-r--r--src/policy/fees.cpp2
-rw-r--r--src/policy/fees.h18
-rw-r--r--src/psbt.cpp12
-rw-r--r--src/psbt.h12
-rw-r--r--src/qt/askpassphrasedialog.cpp38
-rw-r--r--src/qt/bitcoingui.cpp7
-rw-r--r--src/qt/clientmodel.cpp13
-rw-r--r--src/qt/clientmodel.h7
-rw-r--r--src/qt/optionsdialog.cpp31
-rw-r--r--src/qt/optionsmodel.cpp148
-rw-r--r--src/qt/optionsmodel.h18
-rw-r--r--src/qt/overviewpage.cpp16
-rw-r--r--src/qt/overviewpage.h1
-rw-r--r--src/qt/recentrequeststablemodel.cpp4
-rw-r--r--src/qt/rpcconsole.cpp4
-rw-r--r--src/qt/sendcoinsdialog.cpp4
-rw-r--r--src/qt/test/addressbooktests.cpp2
-rw-r--r--src/qt/test/wallettests.cpp4
-rw-r--r--src/qt/transactionfilterproxy.cpp17
-rw-r--r--src/qt/transactionfilterproxy.h6
-rw-r--r--src/qt/transactiontablemodel.cpp5
-rw-r--r--src/qt/walletmodel.cpp7
-rw-r--r--src/qt/walletmodel.h18
-rw-r--r--src/random.cpp39
-rw-r--r--src/random.h21
-rw-r--r--src/randomenv.cpp14
-rw-r--r--src/rest.cpp20
-rw-r--r--src/rpc/blockchain.cpp21
-rw-r--r--src/rpc/mempool.cpp7
-rw-r--r--src/rpc/mining.cpp6
-rw-r--r--src/rpc/net.cpp36
-rw-r--r--src/rpc/node.cpp26
-rw-r--r--src/rpc/output_script.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp15
-rw-r--r--src/rpc/rawtransaction_util.cpp43
-rw-r--r--src/rpc/rawtransaction_util.h7
-rw-r--r--src/rpc/request.cpp2
-rw-r--r--src/rpc/server.cpp2
-rw-r--r--src/rpc/server_util.cpp14
-rw-r--r--src/rpc/server_util.h3
-rw-r--r--src/rpc/txoutproof.cpp6
-rw-r--r--src/rpc/util.cpp161
-rw-r--r--src/rpc/util.h32
-rw-r--r--src/script/descriptor.cpp15
-rw-r--r--src/script/descriptor.h12
-rw-r--r--src/script/interpreter.cpp31
-rw-r--r--src/script/interpreter.h5
-rw-r--r--src/script/miniscript.cpp76
-rw-r--r--src/script/miniscript.h338
-rw-r--r--src/script/sign.cpp133
-rw-r--r--src/script/sign.h26
-rw-r--r--src/script/standard.cpp25
-rw-r--r--src/script/standard.h10
-rw-r--r--src/serialize.h4
-rw-r--r--src/span.h4
-rw-r--r--src/streams.h80
-rw-r--r--src/support/allocators/secure.h1
-rw-r--r--src/support/lockedpool.cpp7
-rw-r--r--src/support/lockedpool.h6
-rw-r--r--src/sync.h2
-rw-r--r--src/test/addrman_tests.cpp154
-rw-r--r--src/test/base58_tests.cpp4
-rw-r--r--src/test/blockencodings_tests.cpp7
-rw-r--r--src/test/blockfilter_tests.cpp2
-rw-r--r--src/test/bloom_tests.cpp9
-rw-r--r--src/test/checkqueue_tests.cpp1
-rw-r--r--src/test/coins_tests.cpp285
-rw-r--r--src/test/crypto_tests.cpp168
-rw-r--r--src/test/cuckoocache_tests.cpp2
-rw-r--r--src/test/dbwrapper_tests.cpp66
-rw-r--r--src/test/descriptor_tests.cpp69
-rw-r--r--src/test/fuzz/addrman.cpp4
-rw-r--r--src/test/fuzz/coins_view.cpp5
-rw-r--r--src/test/fuzz/coinscache_sim.cpp478
-rw-r--r--src/test/fuzz/crypto_chacha20.cpp118
-rw-r--r--src/test/fuzz/crypto_diff_fuzz_chacha20.cpp33
-rw-r--r--src/test/fuzz/http_request.cpp2
-rw-r--r--src/test/fuzz/integer.cpp6
-rw-r--r--src/test/fuzz/key.cpp2
-rw-r--r--src/test/fuzz/miniscript.cpp808
-rw-r--r--src/test/fuzz/netaddress.cpp7
-rw-r--r--src/test/fuzz/partially_downloaded_block.cpp142
-rw-r--r--src/test/fuzz/prevector.cpp4
-rw-r--r--src/test/fuzz/process_message.cpp4
-rw-r--r--src/test/fuzz/process_messages.cpp4
-rw-r--r--src/test/fuzz/rpc.cpp2
-rw-r--r--src/test/fuzz/script_sign.cpp6
-rw-r--r--src/test/fuzz/string.cpp4
-rw-r--r--src/test/fuzz/tx_in.cpp5
-rw-r--r--src/test/fuzz/tx_out.cpp5
-rw-r--r--src/test/fuzz/tx_pool.cpp4
-rw-r--r--src/test/fuzz/txorphan.cpp10
-rw-r--r--src/test/fuzz/util.h2
-rw-r--r--src/test/hash_tests.cpp1
-rw-r--r--src/test/key_io_tests.cpp3
-rw-r--r--src/test/key_tests.cpp8
-rw-r--r--src/test/logging_tests.cpp15
-rw-r--r--src/test/merkle_tests.cpp7
-rw-r--r--src/test/miner_tests.cpp1
-rw-r--r--src/test/miniscript_tests.cpp232
-rw-r--r--src/test/minisketch_tests.cpp1
-rw-r--r--src/test/multisig_tests.cpp3
-rw-r--r--src/test/net_tests.cpp34
-rw-r--r--src/test/netbase_tests.cpp6
-rw-r--r--src/test/orphanage_tests.cpp7
-rw-r--r--src/test/pmt_tests.cpp3
-rw-r--r--src/test/pow_tests.cpp1
-rw-r--r--src/test/prevector_tests.cpp5
-rw-r--r--src/test/script_p2sh_tests.cpp15
-rw-r--r--src/test/script_standard_tests.cpp5
-rw-r--r--src/test/script_tests.cpp46
-rw-r--r--src/test/serfloat_tests.cpp3
-rw-r--r--src/test/serialize_tests.cpp13
-rw-r--r--src/test/sighash_tests.cpp6
-rw-r--r--src/test/skiplist_tests.cpp1
-rw-r--r--src/test/streams_tests.cpp29
-rw-r--r--src/test/transaction_tests.cpp56
-rw-r--r--src/test/txpackage_tests.cpp1
-rw-r--r--src/test/txrequest_tests.cpp1
-rw-r--r--src/test/uint256_tests.cpp2
-rw-r--r--src/test/util/blockfilter.cpp1
-rw-r--r--src/test/util/coins.cpp27
-rw-r--r--src/test/util/coins.h19
-rw-r--r--src/test/util/json.cpp17
-rw-r--r--src/test/util/json.h14
-rw-r--r--src/test/util/net.cpp7
-rw-r--r--src/test/util/net.h4
-rw-r--r--src/test/util/random.h45
-rw-r--r--src/test/util/setup_common.cpp16
-rw-r--r--src/test/util/setup_common.h6
-rw-r--r--src/test/util/xoroshiro128plusplus.h71
-rw-r--r--src/test/util_tests.cpp1
-rw-r--r--src/test/validation_block_tests.cpp1
-rw-r--r--src/test/validation_chainstate_tests.cpp18
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp2
-rw-r--r--src/test/validation_flush_tests.cpp27
-rw-r--r--src/test/versionbits_tests.cpp3
-rw-r--r--src/test/xoroshiro128plusplus_tests.cpp29
-rw-r--r--src/tinyformat.h9
-rw-r--r--src/torcontrol.cpp10
-rw-r--r--src/torcontrol.h2
-rw-r--r--src/txdb.cpp31
-rw-r--r--src/txdb.h23
-rw-r--r--src/txmempool.cpp2
-rw-r--r--src/txmempool.h2
-rw-r--r--src/txorphanage.cpp25
-rw-r--r--src/txorphanage.h19
-rw-r--r--src/uint256.cpp15
-rw-r--r--src/uint256.h80
-rw-r--r--src/univalue/include/univalue_utffilter.h12
-rw-r--r--src/util/check.cpp7
-rw-r--r--src/util/check.h8
-rw-r--r--src/util/fees.cpp2
-rw-r--r--src/util/fees.h2
-rw-r--r--src/util/hasher.cpp5
-rw-r--r--src/util/hasher.h2
-rw-r--r--src/util/sock.h4
-rw-r--r--src/util/system.cpp9
-rw-r--r--src/util/system.h4
-rw-r--r--src/validation.cpp111
-rw-r--r--src/validation.h12
-rw-r--r--src/validationinterface.cpp2
-rw-r--r--src/wallet/bdb.cpp80
-rw-r--r--src/wallet/bdb.h65
-rw-r--r--src/wallet/db.h64
-rw-r--r--src/wallet/dump.cpp24
-rw-r--r--src/wallet/external_signer_scriptpubkeyman.h8
-rw-r--r--src/wallet/feebumper.cpp21
-rw-r--r--src/wallet/feebumper.h3
-rw-r--r--src/wallet/init.cpp3
-rw-r--r--src/wallet/interfaces.cpp3
-rw-r--r--src/wallet/rpc/addresses.cpp4
-rw-r--r--src/wallet/rpc/backup.cpp21
-rw-r--r--src/wallet/rpc/coins.cpp11
-rw-r--r--src/wallet/rpc/encrypt.cpp63
-rw-r--r--src/wallet/rpc/spend.cpp97
-rw-r--r--src/wallet/rpc/transactions.cpp17
-rw-r--r--src/wallet/rpc/util.cpp1
-rw-r--r--src/wallet/rpc/wallet.cpp45
-rw-r--r--src/wallet/salvage.cpp4
-rw-r--r--src/wallet/scriptpubkeyman.cpp24
-rw-r--r--src/wallet/scriptpubkeyman.h28
-rw-r--r--src/wallet/spend.cpp11
-rw-r--r--src/wallet/sqlite.cpp56
-rw-r--r--src/wallet/sqlite.h26
-rw-r--r--src/wallet/test/coinselector_tests.cpp39
-rw-r--r--src/wallet/test/ismine_tests.cpp76
-rw-r--r--src/wallet/test/scriptpubkeyman_tests.cpp2
-rw-r--r--src/wallet/test/spend_tests.cpp4
-rw-r--r--src/wallet/test/util.cpp16
-rw-r--r--src/wallet/test/util.h2
-rw-r--r--src/wallet/test/wallet_crypto_tests.cpp1
-rw-r--r--src/wallet/test/wallet_test_fixture.cpp2
-rw-r--r--src/wallet/test/wallet_tests.cpp44
-rw-r--r--src/wallet/test/walletload_tests.cpp26
-rw-r--r--src/wallet/transaction.h1
-rw-r--r--src/wallet/wallet.cpp146
-rw-r--r--src/wallet/wallet.h34
-rw-r--r--src/wallet/walletdb.cpp55
-rw-r--r--src/wallet/walletdb.h2
-rw-r--r--src/wallet/wallettool.cpp10
-rw-r--r--src/zmq/zmqabstractnotifier.h4
-rw-r--r--src/zmq/zmqnotificationinterface.cpp2
-rw-r--r--src/zmq/zmqnotificationinterface.h2
-rw-r--r--test/README.md41
-rw-r--r--test/functional/data/rpc_decodescript.json2
-rwxr-xr-xtest/functional/feature_block.py50
-rwxr-xr-xtest/functional/feature_config_args.py38
-rwxr-xr-xtest/functional/feature_dbcrash.py2
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py3
-rwxr-xr-xtest/functional/feature_notifications.py10
-rwxr-xr-xtest/functional/feature_posix_fs_permissions.py43
-rwxr-xr-xtest/functional/feature_pruning.py4
-rwxr-xr-xtest/functional/feature_rbf.py4
-rwxr-xr-xtest/functional/feature_taproot.py18
-rwxr-xr-xtest/functional/mempool_updatefromblock.py57
-rwxr-xr-xtest/functional/p2p_disconnect_ban.py4
-rwxr-xr-xtest/functional/p2p_eviction.py32
-rwxr-xr-xtest/functional/p2p_headers_sync_with_minchainwork.py1
-rwxr-xr-xtest/functional/p2p_ibd_stalling.py164
-rwxr-xr-xtest/functional/p2p_invalid_messages.py34
-rwxr-xr-xtest/functional/p2p_node_network_limited.py2
-rwxr-xr-xtest/functional/p2p_permissions.py10
-rwxr-xr-xtest/functional/p2p_tx_download.py25
-rwxr-xr-xtest/functional/rpc_blockchain.py12
-rwxr-xr-xtest/functional/rpc_decodescript.py15
-rwxr-xr-xtest/functional/rpc_preciousblock.py2
-rw-r--r--test/functional/test_framework/authproxy.py5
-rwxr-xr-xtest/functional/test_framework/p2p.py2
-rwxr-xr-xtest/functional/test_framework/test_framework.py17
-rwxr-xr-xtest/functional/test_framework/test_node.py17
-rwxr-xr-xtest/functional/test_runner.py4
-rwxr-xr-xtest/functional/wallet_backwards_compatibility.py4
-rwxr-xr-xtest/functional/wallet_bumpfee.py22
-rwxr-xr-xtest/functional/wallet_change_address.py108
-rwxr-xr-xtest/functional/wallet_crosschain.py6
-rwxr-xr-xtest/functional/wallet_encryption.py11
-rwxr-xr-xtest/functional/wallet_groups.py5
-rwxr-xr-xtest/functional/wallet_importdescriptors.py42
-rwxr-xr-xtest/functional/wallet_migration.py74
-rwxr-xr-xtest/functional/wallet_miniscript.py211
-rwxr-xr-xtest/functional/wallet_orphanedreward.py37
-rwxr-xr-xtest/functional/wallet_pruning.py11
-rwxr-xr-xtest/functional/wallet_transactiontime_rescan.py39
-rwxr-xr-xtest/get_previous_releases.py9
-rw-r--r--test/lint/README.md18
-rwxr-xr-xtest/lint/lint-locale-dependence.py3
-rwxr-xr-xtest/lint/lint-python.py1
-rw-r--r--test/sanitizer_suppressions/ubsan2
-rwxr-xr-xtest/util/rpcauth-test.py11
-rwxr-xr-xtest/util/test_runner.py4
405 files changed, 7812 insertions, 3490 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 97fe9866ae..673bc427e1 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -1,4 +1,5 @@
env: # Global defaults
+ CIRRUS_CLONE_DEPTH: 1
PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y"
MAKEJOBS: "-j10"
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
@@ -27,7 +28,7 @@ base_template: &BASE_TEMPLATE
# Unconditionally install git (used in fingerprint_script).
- bash -c "$PACKAGE_MANAGER_INSTALL git"
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
- - git fetch $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge"
+ - git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge"
- git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts
# Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD"
@@ -38,7 +39,7 @@ main_template: &MAIN_TEMPLATE
ci_script:
- ./ci/test_run_all.sh
-global_task_template: &GLOBAL_TASK_TEMPLATE
+container_depends_template: &CONTAINER_DEPENDS_TEMPLATE
<< : *BASE_TEMPLATE
container:
# https://cirrus-ci.org/faq/#are-there-any-limits
@@ -48,15 +49,10 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers
depends_built_cache:
folder: "depends/built"
- fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-list -1 HEAD ./depends)
- << : *MAIN_TEMPLATE
+ fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:depends)
-macos_native_task_template: &MACOS_NATIVE_TASK_TEMPLATE
- << : *BASE_TEMPLATE
- check_clang_script:
- - clang --version
- brew_install_script:
- - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt
+global_task_template: &GLOBAL_TASK_TEMPLATE
+ << : *CONTAINER_DEPENDS_TEMPLATE
<< : *MAIN_TEMPLATE
compute_credits_template: &CREDITS_TEMPLATE
@@ -76,6 +72,8 @@ task:
python_cache:
folder: "/tmp/python"
fingerprint_script: cat .python-version /etc/os-release
+ unshallow_script:
+ - git fetch --unshallow --no-tags
lint_script:
- ./ci/lint_run_all.sh
env:
@@ -305,13 +303,13 @@ task:
task:
name: 'macOS 10.15 [gui, no tests] [focal]'
- << : *BASE_TEMPLATE
+ << : *CONTAINER_DEPENDS_TEMPLATE
+ container:
+ image: ubuntu:focal
macos_sdk_cache:
folder: "depends/SDKs/$MACOS_SDK"
fingerprint_key: "$MACOS_SDK"
<< : *MAIN_TEMPLATE
- container:
- image: ubuntu:focal
env:
MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers"
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
@@ -322,7 +320,12 @@ task:
macos_instance:
# Use latest image, but hardcode version to avoid silent upgrades (and breaks)
image: ghcr.io/cirruslabs/macos-ventura-xcode:14.1 # https://cirrus-ci.org/guide/macOS
- << : *MACOS_NATIVE_TASK_TEMPLATE
+ << : *BASE_TEMPLATE
+ check_clang_script:
+ - clang --version
+ brew_install_script:
+ - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt
+ << : *MAIN_TEMPLATE
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
CI_USE_APT_INSTALL: "no"
@@ -330,17 +333,17 @@ task:
FILE_ENV: "./ci/test/00_setup_env_mac_native_arm64.sh"
task:
- name: 'ARM64 Android APK [focal]'
- << : *BASE_TEMPLATE
+ name: 'ARM64 Android APK [jammy]'
+ << : *CONTAINER_DEPENDS_TEMPLATE
+ container:
+ image: ubuntu:jammy
android_sdk_cache:
folder: "depends/SDKs/android"
fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313"
depends_sources_cache:
folder: "depends/sources"
- fingerprint_script: git rev-list -1 HEAD ./depends
+ fingerprint_script: git rev-parse HEAD:depends/packages
<< : *MAIN_TEMPLATE
- container:
- image: ubuntu:focal
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
FILE_ENV: "./ci/test/00_setup_env_android.sh"
diff --git a/REVIEWERS b/REVIEWERS
deleted file mode 100644
index cb1bafa496..0000000000
--- a/REVIEWERS
+++ /dev/null
@@ -1,17 +0,0 @@
-# ==============================================================================
-# Bitcoin Core REVIEWERS
-# ==============================================================================
-
-# Configuration of automated review requests for the bitcoin/bitcoin repo
-# via DrahtBot.
-
-# Order is not important; if a modified file or directory matches a fnmatch,
-# the reviewer will be mentioned in a PR comment requesting a review.
-
-# Regular contributors are free to add their names to specific directories or
-# files provided that they are willing to provide a review.
-
-# Absence from this list should not be interpreted as a discouragement to
-# review a pull request. Peer review is always welcome and is a critical
-# component of the progress of the codebase. Information on peer review
-# guidelines can be found in the CONTRIBUTING.md doc.
diff --git a/build-aux/m4/ax_boost_base.m4 b/build-aux/m4/ax_boost_base.m4
index 6c944b160f..f6620882a2 100644
--- a/build-aux/m4/ax_boost_base.m4
+++ b/build-aux/m4/ax_boost_base.m4
@@ -8,7 +8,7 @@
#
# DESCRIPTION
#
-# Test for the Boost C++ libraries of a particular version (or newer)
+# Test for the Boost C++ headers of a particular version (or newer)
#
# If no path to the installed boost library is given the macro searchs
# under /usr, /usr/local, /opt, /opt/local and /opt/homebrew and evaluates
@@ -17,12 +17,14 @@
#
# This macro calls:
#
-# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS)
+# AC_SUBST(BOOST_CPPFLAGS)
#
# And sets:
#
# HAVE_BOOST
#
+# Note that this macro has been modified compared to upstream.
+#
# LICENSE
#
# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
@@ -59,26 +61,10 @@ AC_ARG_WITH([boost],
],
[want_boost="yes"])
-
-AC_ARG_WITH([boost-libdir],
- [AS_HELP_STRING([--with-boost-libdir=LIB_DIR],
- [Force given directory for boost libraries.
- Note that this will override library path detection,
- so use this parameter only if default library detection fails
- and you know exactly where your boost libraries are located.])],
- [
- AS_IF([test -d "$withval"],
- [_AX_BOOST_BASE_boost_lib_path="$withval"],
- [AC_MSG_ERROR([--with-boost-libdir expected directory name])])
- ],
- [_AX_BOOST_BASE_boost_lib_path=""])
-
-BOOST_LDFLAGS=""
BOOST_CPPFLAGS=""
AS_IF([test "x$want_boost" = "xyes"],
[_AX_BOOST_BASE_RUNDETECT([$1],[$2],[$3])])
AC_SUBST(BOOST_CPPFLAGS)
-AC_SUBST(BOOST_LDFLAGS)
])
@@ -139,7 +125,6 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) lib path in "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"])
AS_IF([test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ],[
AC_MSG_RESULT([yes])
- BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp";
break;
],
[AC_MSG_RESULT([no])])
@@ -156,27 +141,17 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
for libsubdir in $search_libsubdirs ; do
if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
done
- BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir"
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include"
break;
fi
done
])
- dnl overwrite ld flags if we have required special directory with
- dnl --with-boost-libdir parameter
- AS_IF([test "x$_AX_BOOST_BASE_boost_lib_path" != "x"],
- [BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path"])
-
- AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION)])
+ AC_MSG_CHECKING([for Boost headers >= $1 ($WANT_BOOST_VERSION)])
CPPFLAGS_SAVED="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
export CPPFLAGS
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
AC_REQUIRE([AC_PROG_CXX])
AC_LANG_PUSH(C++)
AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[
@@ -193,11 +168,8 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
dnl built and installed without the --layout=system option or for a staged(not installed) version
if test "x$succeeded" != "xyes" ; then
CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
BOOST_CPPFLAGS=
- if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
- BOOST_LDFLAGS=
- fi
+
_version=0
if test -n "$_AX_BOOST_BASE_boost_path" ; then
if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then
@@ -216,14 +188,6 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path"
fi
fi
- dnl if we found something and BOOST_LDFLAGS was unset before
- dnl (because "$_AX_BOOST_BASE_boost_lib_path" = ""), set it here.
- if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then
- for libsubdir in $libsubdirs ; do
- if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
- done
- BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir"
- fi
fi
else
if test "x$cross_compiling" != "xyes" ; then
@@ -242,12 +206,6 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE"
- if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
- for libsubdir in $libsubdirs ; do
- if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
- done
- BOOST_LDFLAGS="-L$best_path/$libsubdir"
- fi
fi
if test -n "$BOOST_ROOT" ; then
@@ -259,10 +217,9 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'`
stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'`
V_CHECK=`expr $stage_version_shorten \>\= $_version`
- if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
+ if test "x$V_CHECK" = "x1" ; then
AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT)
BOOST_CPPFLAGS="-I$BOOST_ROOT"
- BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir"
fi
fi
fi
@@ -270,8 +227,6 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
export CPPFLAGS
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
AC_LANG_PUSH(C++)
AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[
@@ -298,6 +253,4 @@ AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
fi
CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
-
])
diff --git a/ci/README.md b/ci/README.md
index 3c5f04c39e..de798607df 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -8,8 +8,7 @@ Be aware that the tests will be built and run in-place, so please run at your ow
If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first.
The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory.
-While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories,
-such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run
+While it should be fine to run
the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and
testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci
system in a virtual machine with a Linux operating system of your choice.
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index 4ad1ec2fd3..f7147582dc 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -13,23 +13,25 @@ ${CI_RETRY_EXE} apt-get update
# - gpg (used by verify-commits)
${CI_RETRY_EXE} apt-get install -y curl xz-utils git gpg
-PYTHON_PATH=/tmp/python
-if [ ! -d "${PYTHON_PATH}/bin" ]; then
- (
- git clone https://github.com/pyenv/pyenv.git
- cd pyenv/plugins/python-build || exit 1
- ./install.sh
- )
- # For dependencies see https://github.com/pyenv/pyenv/wiki#suggested-build-environment
- ${CI_RETRY_EXE} apt-get install -y build-essential libssl-dev zlib1g-dev \
- libbz2-dev libreadline-dev libsqlite3-dev curl llvm \
- libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev \
- clang
- env CC=clang python-build "$(cat "${BASE_ROOT_DIR}/.python-version")" "${PYTHON_PATH}"
+if [ -z "${SKIP_PYTHON_INSTALL}" ]; then
+ PYTHON_PATH=/tmp/python
+ if [ ! -d "${PYTHON_PATH}/bin" ]; then
+ (
+ git clone https://github.com/pyenv/pyenv.git
+ cd pyenv/plugins/python-build || exit 1
+ ./install.sh
+ )
+ # For dependencies see https://github.com/pyenv/pyenv/wiki#suggested-build-environment
+ ${CI_RETRY_EXE} apt-get install -y build-essential libssl-dev zlib1g-dev \
+ libbz2-dev libreadline-dev libsqlite3-dev curl llvm \
+ libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev \
+ clang
+ env CC=clang python-build "$(cat "${BASE_ROOT_DIR}/.python-version")" "${PYTHON_PATH}"
+ fi
+ export PATH="${PYTHON_PATH}/bin:${PATH}"
+ command -v python3
+ python3 --version
fi
-export PATH="${PYTHON_PATH}/bin:${PATH}"
-command -v python3
-python3 --version
${CI_RETRY_EXE} pip3 install codespell==2.2.1
${CI_RETRY_EXE} pip3 install flake8==5.0.4
@@ -38,5 +40,6 @@ ${CI_RETRY_EXE} pip3 install pyzmq==24.0.1
${CI_RETRY_EXE} pip3 install vulture==2.6
SHELLCHECK_VERSION=v0.8.0
-curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar --xz -xf - --directory /tmp/
-export PATH="/tmp/shellcheck-${SHELLCHECK_VERSION}:${PATH}"
+curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | \
+ tar --xz -xf - --directory /tmp/
+mv "/tmp/shellcheck-${SHELLCHECK_VERSION}/shellcheck" /usr/bin/
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index c14d7473d3..fa28f6126c 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -6,7 +6,11 @@
export LC_ALL=C
-if [ -n "$CIRRUS_PR" ]; then
+if [ -n "$LOCAL_BRANCH" ]; then
+ # To faithfully recreate CI linting locally, specify all commits on the current
+ # branch.
+ COMMIT_RANGE="$(git merge-base HEAD master)..HEAD"
+elif [ -n "$CIRRUS_PR" ]; then
COMMIT_RANGE="HEAD~..HEAD"
echo
git log --no-merges --oneline "$COMMIT_RANGE"
diff --git a/ci/lint/Dockerfile b/ci/lint/Dockerfile
new file mode 100644
index 0000000000..03c20c7286
--- /dev/null
+++ b/ci/lint/Dockerfile
@@ -0,0 +1,29 @@
+# See test/lint/README.md for usage.
+#
+# This container basically has to live in this directory in order to pull in the CI
+# install scripts. If it lived in the root directory, it would have to pull in the
+# entire repo as docker context during build; if it lived elsewhere, it wouldn't be
+# able to make back-references to pull in the install scripts. So here it lives.
+
+FROM python:3.7-buster
+
+ENV DEBIAN_FRONTEND=noninteractive
+ENV LC_ALL=C.UTF-8
+
+# This is used by the 04_install.sh script; we can't read the Python version from
+# .python-version for the same reasons as above, and it's more efficient to pull a
+# preexisting Python image than it is to build from source.
+ENV SKIP_PYTHON_INSTALL=1
+
+# Must be built from ./ci/lint/ for these paths to work.
+COPY ./docker-entrypoint.sh /entrypoint.sh
+COPY ./04_install.sh /install.sh
+
+RUN /install.sh && \
+ echo 'alias lint="./ci/lint/06_script.sh"' >> ~/.bashrc && \
+ chmod 755 /entrypoint.sh && \
+ rm -rf /var/lib/apt/lists/*
+
+
+WORKDIR /bitcoin
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/ci/lint/docker-entrypoint.sh b/ci/lint/docker-entrypoint.sh
new file mode 100755
index 0000000000..3fdbbb0761
--- /dev/null
+++ b/ci/lint/docker-entrypoint.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+export LC_ALL=C
+
+# Fixes permission issues when there is a container UID/GID mismatch with the owner
+# of the mounted bitcoin src dir.
+git config --global --add safe.directory /bitcoin
+
+if [ -z "$1" ]; then
+ LOCAL_BRANCH=1 bash -ic "./ci/lint/06_script.sh"
+else
+ exec "$@"
+fi
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 07c20f632d..ab830b8ec0 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -8,11 +8,10 @@ export LC_ALL=C.UTF-8
# The root dir.
# The ci system copies this folder.
-# This is where the depends build is done.
BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd )
export BASE_ROOT_DIR
# The depends dir.
-# This folder exists on the ci host and ci guest. Changes are propagated back and forth.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends}
# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...)
# This folder only exists on the ci host.
@@ -58,12 +57,14 @@ export CCACHE_SIZE=${CCACHE_SIZE:-100M}
export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp}
export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1}
# The cache dir.
-# This folder exists on the ci host and ci guest. Changes are propagated back and forth.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache}
# Folder where the build result is put (bin and lib).
export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST}
# Folder where the build is done (dist and out-of-tree build).
export BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build}
+# The folder for previous release binaries.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST}
export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks}
export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps bison}
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index e1830b4f49..1834bd0bc4 100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
@@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8
export HOST=aarch64-linux-android
export PACKAGES="unzip openjdk-8-jdk gradle"
export CONTAINER_NAME=ci_android
-export CI_IMAGE_NAME_TAG="ubuntu:focal"
+export CI_IMAGE_NAME_TAG="ubuntu:jammy"
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index e8f963c8a9..8a931d44e5 100755
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -12,6 +12,7 @@ export CI_IMAGE_NAME_TAG=quay.io/centos/centos:stream8
export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python38 python38-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison"
export PIP_PACKAGES="pyzmq"
export GOAL="install"
+export NO_WERROR=1 # GCC 8
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
export CONFIG_SHELL="/bin/dash"
export TEST_RUNNER_ENV="LC_ALL=en_US.UTF-8"
diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh
index d35701160a..7886f6efc9 100755
--- a/ci/test/00_setup_env_native_fuzz_with_msan.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh
@@ -15,7 +15,7 @@ export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
export CONTAINER_NAME="ci_native_fuzz_msan"
export PACKAGES="clang-12 llvm-12 cmake"
# BDB generates false-positives and will be removed in future
-export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' sqlite_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'"
+export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="install"
export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export USE_MEMORY_SANITIZER="true"
diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh
index 48049a0a3c..1f9209bafb 100755
--- a/ci/test/00_setup_env_native_msan.sh
+++ b/ci/test/00_setup_env_native_msan.sh
@@ -15,7 +15,7 @@ export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
export CONTAINER_NAME="ci_native_msan"
export PACKAGES="clang-12 llvm-12 cmake"
# BDB generates false-positives and will be removed in future
-export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' sqlite_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'"
+export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="install"
export BITCOIN_CONFIG="--with-sanitizers=memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export USE_MEMORY_SANITIZER="true"
diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
index 08bb5d1eab..06bc2401c5 100755
--- a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
+++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
@@ -13,4 +13,5 @@ export PACKAGES="-t buster-backports python3-zmq clang-8 llvm-8 libc++abi-8-dev
export APPEND_APT_SOURCES_LIST="deb http://deb.debian.org/debian buster-backports main"
export DEP_OPTS="NO_WALLET=1 CC=clang-8 CXX='clang++-8 -stdlib=libc++'"
export GOAL="install"
+export NO_WERROR=1
export BITCOIN_CONFIG="--enable-reduce-exports CC=clang-8 CXX='clang++-8 -stdlib=libc++' --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 3f39185ae8..5cc0addd33 100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -15,6 +15,7 @@ export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude fe
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
+export NO_WERROR=1
export DOWNLOAD_PREVIOUS_RELEASES="true"
export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports \
--enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" CC=gcc-8 CXX=g++-8"
diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh
new file mode 100755
index 0000000000..c2469d7ca9
--- /dev/null
+++ b/ci/test/01_base_install.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2018-2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+CI_EXEC_ROOT () { bash -c "$*"; }
+export -f CI_EXEC_ROOT
+
+if [ -n "$DPKG_ADD_ARCH" ]; then
+ CI_EXEC_ROOT dpkg --add-architecture "$DPKG_ADD_ARCH"
+fi
+
+if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then
+ ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y install epel-release
+ ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y --allowerasing install "$CI_BASE_PACKAGES" "$PACKAGES"
+elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
+ if [[ "${ADD_UNTRUSTED_BPFCC_PPA}" == "true" ]]; then
+ # Ubuntu 22.04 LTS and Debian 11 both have an outdated bpfcc-tools packages.
+ # The iovisor PPA is outdated as well. The next Ubuntu and Debian releases will contain updated
+ # packages. Meanwhile, use an untrusted PPA to install an up-to-date version of the bpfcc-tools
+ # package.
+ # TODO: drop this once we can use newer images in GCE
+ CI_EXEC_ROOT add-apt-repository ppa:hadret/bpfcc
+ fi
+ if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then
+ CI_EXEC_ROOT echo "${APPEND_APT_SOURCES_LIST}" \>\> /etc/apt/sources.list
+ fi
+ ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get update
+ ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$CI_BASE_PACKAGES"
+fi
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 9bfe555243..62bc3a963d 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -33,7 +33,15 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# the name isn't important, so long as we use the same UID
LOCAL_USER=nonroot
- ${CI_RETRY_EXE} docker pull "$CI_IMAGE_NAME_TAG"
+ DOCKER_BUILDKIT=1 ${CI_RETRY_EXE} docker build \
+ --file "${BASE_ROOT_DIR}/ci/test_imagefile" \
+ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \
+ --build-arg "FILE_ENV=${FILE_ENV}" \
+ --tag="${CONTAINER_NAME}" \
+ "${BASE_ROOT_DIR}"
+ docker volume create "${CONTAINER_NAME}_ccache" || true
+ docker volume create "${CONTAINER_NAME}_depends" || true
+ docker volume create "${CONTAINER_NAME}_previous_releases" || true
if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then
echo "Restart docker before run to stop and clear all containers started with --rm"
@@ -43,13 +51,13 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# shellcheck disable=SC2086
CI_CONTAINER_ID=$(docker run $CI_CONTAINER_CAP --rm --interactive --detach --tty \
--mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \
- --mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \
- --mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \
- --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \
+ --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \
+ --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR" \
+ --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \
-w $BASE_ROOT_DIR \
--env-file /tmp/env \
--name $CONTAINER_NAME \
- $CI_IMAGE_NAME_TAG)
+ $CONTAINER_NAME)
export CI_CONTAINER_ID
# Create a non-root user inside the container which matches the local user.
@@ -63,6 +71,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
export CI_EXEC_CMD_PREFIX="docker exec -u $LOCAL_UID $CI_CONTAINER_ID"
else
echo "Running on host system without docker wrapper"
+ "${BASE_ROOT_DIR}/ci/test/01_base_install.sh"
fi
CI_EXEC () {
@@ -76,29 +85,6 @@ export -f CI_EXEC_ROOT
CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}"
-if [ -n "$DPKG_ADD_ARCH" ]; then
- CI_EXEC_ROOT dpkg --add-architecture "$DPKG_ADD_ARCH"
-fi
-
-if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then
- ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y install epel-release
- ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y --allowerasing install "$CI_BASE_PACKAGES" "$PACKAGES"
-elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
- if [[ "${ADD_UNTRUSTED_BPFCC_PPA}" == "true" ]]; then
- # Ubuntu 22.04 LTS and Debian 11 both have an outdated bpfcc-tools packages.
- # The iovisor PPA is outdated as well. The next Ubuntu and Debian releases will contain updated
- # packages. Meanwhile, use an untrusted PPA to install an up-to-date version of the bpfcc-tools
- # package.
- # TODO: drop this once we can use newer images in GCE
- CI_EXEC_ROOT add-apt-repository ppa:hadret/bpfcc
- fi
- if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then
- CI_EXEC_ROOT echo "${APPEND_APT_SOURCES_LIST}" >> /etc/apt/sources.list
- fi
- ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get update
- ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$CI_BASE_PACKAGES"
-fi
-
if [ -n "$PIP_PACKAGES" ]; then
if [ "$CI_OS_NAME" == "macos" ]; then
sudo -H pip3 install --upgrade pip
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index f792a9f192..115d727ca3 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -9,12 +9,14 @@ export LC_ALL=C.UTF-8
if [[ $HOST = *-mingw32 ]]; then
# Generate all binaries, so that they can be wrapped
CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
+ CI_EXEC make "$MAKEJOBS" -C src minisketch/test.exe VERBOSE=1
CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh"
fi
if [ -n "$QEMU_USER_CMD" ]; then
# Generate all binaries, so that they can be wrapped
CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
+ CI_EXEC make "$MAKEJOBS" -C src minisketch/test VERBOSE=1
CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh"
fi
@@ -48,6 +50,7 @@ if [ "${RUN_TIDY}" = "true" ]; then
" src/node/chainstate.cpp"\
" src/node/chainstatemanager_args.cpp"\
" src/node/mempool_args.cpp"\
+ " src/node/utxo_snapshot.cpp"\
" src/node/validation_cache_args.cpp"\
" src/policy/feerate.cpp"\
" src/policy/packages.cpp"\
@@ -57,6 +60,8 @@ if [ "${RUN_TIDY}" = "true" ]; then
" src/rpc/signmessage.cpp"\
" src/test/fuzz/txorphan.cpp"\
" src/test/fuzz/util/"\
+ " src/test/util/coins.cpp"\
+ " src/uint256.cpp"\
" src/util/bip32.cpp"\
" src/util/bytevectorhash.cpp"\
" src/util/check.cpp"\
@@ -72,7 +77,12 @@ if [ "${RUN_TIDY}" = "true" ]; then
" src/util/syserror.cpp"\
" src/util/threadinterrupt.cpp"\
" src/zmq"\
- " -p . ${MAKEJOBS} -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp"
+ " -p . ${MAKEJOBS}"\
+ " -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp"\
+ " |& tee /tmp/iwyu_ci.out"
+ export P_CI_DIR="${BASE_ROOT_DIR}/src"
+ CI_EXEC "python3 ${DIR_IWYU}/include-what-you-use/fix_includes.py --nosafe_headers < /tmp/iwyu_ci.out"
+ CI_EXEC "git --no-pager diff"
fi
if [ "$RUN_SECURITY_TESTS" = "true" ]; then
diff --git a/ci/test_imagefile b/ci/test_imagefile
new file mode 100644
index 0000000000..4854708d1a
--- /dev/null
+++ b/ci/test_imagefile
@@ -0,0 +1,10 @@
+ARG CI_IMAGE_NAME_TAG
+FROM ${CI_IMAGE_NAME_TAG}
+
+ARG FILE_ENV
+ENV FILE_ENV=${FILE_ENV}
+
+COPY ./ci/retry/retry /usr/bin/retry
+COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_base_install/ci/test/
+
+RUN ["bash", "-c", "cd /ci_base_install/ && set -o errexit && source ./ci/test/00_setup_env.sh && ./ci/test/01_base_install.sh"]
diff --git a/configure.ac b/configure.ac
index c982ee8b8c..72503f2b1c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -163,24 +163,12 @@ AC_ARG_WITH([miniupnpc],
[use_upnp=$withval],
[use_upnp=auto])
-AC_ARG_ENABLE([upnp-default],
- [AS_HELP_STRING([--enable-upnp-default],
- [if UPNP is enabled, turn it on at startup (default is no)])],
- [use_upnp_default=$enableval],
- [use_upnp_default=no])
-
AC_ARG_WITH([natpmp],
[AS_HELP_STRING([--with-natpmp],
[enable NAT-PMP (default is yes if libnatpmp is found)])],
[use_natpmp=$withval],
[use_natpmp=auto])
-AC_ARG_ENABLE([natpmp-default],
- [AS_HELP_STRING([--enable-natpmp-default],
- [if NAT-PMP is enabled, turn it on at startup (default is no)])],
- [use_natpmp_default=$enableval],
- [use_natpmp_default=no])
-
AC_ARG_ENABLE(tests,
AS_HELP_STRING([--disable-tests],[do not compile tests (default is to compile)]),
[use_tests=$enableval],
@@ -448,7 +436,7 @@ if test "$CXXFLAGS_overridden" = "no"; then
AX_CHECK_COMPILE_FLAG([-Wvla], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wshadow-field], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wthread-safety], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"], [], [$CXXFLAG_WERROR])
- AX_CHECK_COMPILE_FLAG([-Wloop-analysis], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"], [], [$CXXFLAG_WERROR])
+ AX_CHECK_COMPILE_FLAG([-Wloop-analysis], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wloop-analysis"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wredundant-decls], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wunused-member-function], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-member-function"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wdate-time], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"], [], [$CXXFLAG_WERROR])
@@ -947,7 +935,9 @@ if test "$TARGET_OS" != "windows"; then
AX_CHECK_COMPILE_FLAG([-fPIC], [PIC_FLAGS="-fPIC"])
fi
-dnl All versions of gcc that we commonly use for building are subject to bug
+dnl Versions of gcc prior to 12.1 (commit
+dnl https://github.com/gcc-mirror/gcc/commit/551aa75778a4c5165d9533cd447c8fc822f583e1)
+dnl are subject to a bug, see the gccbug_90348 test case and
dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set
dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag)
AX_CHECK_COMPILE_FLAG([-fstack-reuse=none], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"])
@@ -973,11 +963,11 @@ if test "$use_hardening" != "no"; then
dnl However, FORTIFY_SOURCE requires that there is some level of optimization, otherwise it does nothing and just creates a compiler warning.
dnl Since FORTIFY_SOURCE is a no-op without optimizations, do not enable it when enable_debug is yes.
if test "$enable_debug" != "yes"; then
- AX_CHECK_PREPROC_FLAG([-D_FORTIFY_SOURCE=2],[
+ AX_CHECK_PREPROC_FLAG([-D_FORTIFY_SOURCE=3],[
AX_CHECK_PREPROC_FLAG([-U_FORTIFY_SOURCE],[
HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -U_FORTIFY_SOURCE"
])
- HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -D_FORTIFY_SOURCE=2"
+ HARDENED_CPPFLAGS="$HARDENED_CPPFLAGS -D_FORTIFY_SOURCE=3"
])
fi
@@ -1428,14 +1418,15 @@ if test "$use_upnp" != "no"; then
[AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS="$MINIUPNPC_LIBS -lminiupnpc"], [have_miniupnpc=no], [$MINIUPNPC_LIBS])],
[have_miniupnpc=no]
)
- dnl The minimum supported miniUPnPc API version is set to 10. This keeps compatibility
- dnl with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages.
+
+ dnl The minimum supported miniUPnPc API version is set to 17. This excludes
+ dnl versions with known vulnerabilities.
if test "$have_miniupnpc" != "no"; then
AC_MSG_CHECKING([whether miniUPnPc API version is supported])
AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[
@%:@include <miniupnpc/miniupnpc.h>
]], [[
- #if MINIUPNPC_API_VERSION >= 10
+ #if MINIUPNPC_API_VERSION >= 17
// Everything is okay
#else
# error miniUPnPc API version is too old
@@ -1444,7 +1435,7 @@ if test "$use_upnp" != "no"; then
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
- AC_MSG_WARN([miniUPnPc API version < 10 is unsupported, disabling UPnP support.])
+ AC_MSG_WARN([miniUPnPc API version < 17 is unsupported, disabling UPnP support.])
have_miniupnpc=no
])
fi
@@ -1461,7 +1452,7 @@ if test "$use_natpmp" != "no"; then
CPPFLAGS="$TEMP_CPPFLAGS"
fi
-if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" = "nononononononono"; then
+if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench$enable_fuzz_binary" = "nonononononononono"; then
use_boost=no
else
use_boost=yes
@@ -1479,9 +1470,11 @@ if test "$use_boost" = "yes"; then
BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION"
dnl Prevent use of std::unary_function, which was removed in C++17,
- dnl and will generate warnings with newer compilers.
- dnl See: https://github.com/boostorg/container_hash/issues/22.
- BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_CXX98_FUNCTION_BASE"
+ dnl and will generate warnings with newer compilers for Boost
+ dnl older than 1.80.
+ dnl See: https://github.com/boostorg/config/pull/430.
+ AX_CHECK_PREPROC_FLAG([-DBOOST_NO_CXX98_FUNCTION_BASE], [BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_CXX98_FUNCTION_BASE"], [], [$CXXFLAG_WERROR],
+ [AC_LANG_PROGRAM([[#include <boost/config.hpp>]])])
if test "$enable_debug" = "yes" || test "$enable_fuzz" = "yes"; then
BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE"
@@ -1765,15 +1758,8 @@ if test "$have_miniupnpc" = "no"; then
else
if test "$use_upnp" != "no"; then
AC_MSG_RESULT([yes])
- AC_MSG_CHECKING([whether to build with UPnP enabled by default])
use_upnp=yes
- upnp_setting=0
- if test "$use_upnp_default" != "no"; then
- use_upnp_default=yes
- upnp_setting=1
- fi
- AC_MSG_RESULT([$use_upnp_default])
- AC_DEFINE_UNQUOTED([USE_UPNP],[$upnp_setting],[UPnP support not compiled if undefined, otherwise value (0 or 1) determines default state])
+ AC_DEFINE([USE_UPNP], [1], [Define to 1 if UPnP support should be compiled in.])
if test "$TARGET_OS" = "windows"; then
MINIUPNPC_CPPFLAGS="$MINIUPNPC_CPPFLAGS -DSTATICLIB -DMINIUPNP_STATICLIB"
fi
@@ -1793,15 +1779,8 @@ if test "$have_natpmp" = "no"; then
else
if test "$use_natpmp" != "no"; then
AC_MSG_RESULT([yes])
- AC_MSG_CHECKING([whether to build with NAT-PMP enabled by default])
use_natpmp=yes
- natpmp_setting=0
- if test "$use_natpmp_default" != "no"; then
- use_natpmp_default=yes
- natpmp_setting=1
- fi
- AC_MSG_RESULT($use_natpmp_default)
- AC_DEFINE_UNQUOTED([USE_NATPMP], [$natpmp_setting], [NAT-PMP support not compiled if undefined, otherwise value (0 or 1) determines default state])
+ AC_DEFINE([USE_NATPMP], [1], [Define to 1 if UPnP support should be compiled in.])
if test "$TARGET_OS" = "windows"; then
NATPMP_CPPFLAGS="$NATPMP_CPPFLAGS -DSTATICLIB -DNATPMP_STATICLIB"
fi
@@ -2004,14 +1983,6 @@ CPPFLAGS_TEMP="$CPPFLAGS"
unset CPPFLAGS
CPPFLAGS="$CPPFLAGS_TEMP"
-LDFLAGS_TEMP="$LDFLAGS"
-unset LDFLAGS
-LDFLAGS="$LDFLAGS_TEMP"
-
-LIBS_TEMP="$LIBS"
-unset LIBS
-LIBS="$LIBS_TEMP"
-
ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --disable-module-ecdh"
AC_CONFIG_SUBDIRS([src/secp256k1])
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 8377b92736..6cd022ef17 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -34,7 +34,7 @@ def check_ELF_RELRO(binary) -> bool:
flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS)
if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW:
have_bindnow = True
- except:
+ except Exception:
have_bindnow = False
return have_gnu_relro and have_bindnow
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 4b1cceb57c..f26236dd59 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -15,19 +15,19 @@ from typing import List, Dict
import lief #type:ignore
-# Debian 9 (Stretch) EOL: 2022. https://wiki.debian.org/DebianReleases#Production_Releases
+# Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS
#
-# - g++ version 6.3.0 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=g%2B%2B)
-# - libc version 2.24 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=libc6)
+# - libgcc version 8.3.0 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libgcc1)
+# - libc version 2.28 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libc6)
#
-# Ubuntu 16.04 (Xenial) EOL: 2026. https://wiki.ubuntu.com/Releases
+# Ubuntu 18.04 (Bionic) EOL: 2028. https://wiki.ubuntu.com/ReleaseTeam
#
-# - g++ version 5.3.1
-# - libc version 2.23
+# - libgcc version 8.4.0 (https://packages.ubuntu.com/bionic/libgcc1)
+# - libc version 2.27 (https://packages.ubuntu.com/bionic/libc6)
#
# CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product
#
-# - g++ version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
+# - libgcc version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
# - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
#
# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info.
@@ -35,10 +35,10 @@ import lief #type:ignore
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': {
- lief.ELF.ARCH.x86_64: (2,18),
- lief.ELF.ARCH.ARM: (2,18),
- lief.ELF.ARCH.AARCH64:(2,18),
- lief.ELF.ARCH.PPC64: (2,18),
+ lief.ELF.ARCH.x86_64: (2,27),
+ lief.ELF.ARCH.ARM: (2,27),
+ lief.ELF.ARCH.AARCH64:(2,27),
+ lief.ELF.ARCH.PPC64: (2,27),
lief.ELF.ARCH.RISCV: (2,27),
},
'LIBATOMIC': (1,0),
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index de73b02090..e304880140 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -38,31 +38,6 @@ class TestSymbolChecks(unittest.TestCase):
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
- # there's no way to do this test for RISC-V at the moment; we build for
- # RISC-V in a glibc 2.27 environment and we allow all symbols from 2.27.
- if 'riscv' in get_machine(cc):
- self.skipTest("test not available for RISC-V")
-
- # nextup was introduced in GLIBC 2.24, so is newer than our supported
- # glibc (2.18), and available in our release build environment (2.24).
- with open(source, 'w', encoding="utf8") as f:
- f.write('''
- #define _GNU_SOURCE
- #include <math.h>
-
- double nextup(double x);
-
- int main()
- {
- nextup(3.14);
- return 0;
- }
- ''')
-
- self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
- (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' +
- executable + ': failed IMPORTED_SYMBOLS'))
-
# -lutil is part of the libc6 package so a safe bet that it's installed
# it's also out of context enough that it's unlikely to ever become a real dependency
source = 'test2.c'
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index f2be3677eb..08a6c72a95 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -238,13 +238,6 @@ case "$HOST" in
*mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;;
esac
-# Using --no-tls-get-addr-optimize retains compatibility with glibc 2.18, by
-# avoiding a PowerPC64 optimisation available in glibc 2.22 and later.
-# https://sourceware.org/binutils/docs-2.35/ld/PowerPC64-ELF64.html
-case "$HOST" in
- *powerpc64*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,--no-tls-get-addr-optimize" ;;
-esac
-
# Make $HOST-specific native binaries from depends available in $PATH
export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
mkdir -p "$DISTSRC"
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 8e5c89cc5e..379ad898c4 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -147,7 +147,7 @@ chain for " target " development."))
#:key
(base-gcc-for-libc base-gcc)
(base-kernel-headers base-linux-kernel-headers)
- (base-libc (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.24)))
+ (base-libc (hardened-glibc (make-glibc-without-werror glibc-2.27)))
(base-gcc (make-gcc-rpath-link (hardened-gcc base-gcc))))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
@@ -537,33 +537,14 @@ inspecting signatures in Mach-O binaries.")
(define (make-glibc-without-werror glibc)
(package-with-extra-configure-variable glibc "enable_werror" "no"))
-(define (make-glibc-with-stack-protector glibc)
- (package-with-extra-configure-variable glibc "--enable-stack-protector" "all"))
-
-(define (make-glibc-with-bind-now glibc)
- (package-with-extra-configure-variable glibc "--enable-bind-now" "yes"))
-
-(define-public glibc-2.24
- (package
- (inherit glibc-2.31)
- (version "2.24")
- (source (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://sourceware.org/git/glibc.git")
- (commit "0d7f1ed30969886c8dde62fbf7d2c79967d4bace")))
- (file-name (git-file-name "glibc" "0d7f1ed30969886c8dde62fbf7d2c79967d4bace"))
- (sha256
- (base32
- "0g5hryia5v1k0qx97qffgwzrz4lr4jw3s5kj04yllhswsxyjbic3"))
- (patches (search-our-patches "glibc-ldd-x86_64.patch"
- "glibc-versioned-locpath.patch"
- "glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch"
- "glibc-2.24-no-build-time-cxx-header-run.patch"
- "glibc-2.24-fcommon.patch"
- "glibc-2.24-guix-prefix.patch"))))))
+;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html
+(define (hardened-glibc glibc)
+ (package-with-extra-configure-variable (
+ package-with-extra-configure-variable glibc
+ "--enable-stack-protector" "all")
+ "--enable-bind-now" "yes"))
-(define-public glibc-2.27/bitcoin-patched
+(define-public glibc-2.27
(package
(inherit glibc-2.31)
(version "2.27")
@@ -571,14 +552,15 @@ inspecting signatures in Mach-O binaries.")
(method git-fetch)
(uri (git-reference
(url "https://sourceware.org/git/glibc.git")
- (commit "23158b08a0908f381459f273a984c6fd328363cb")))
- (file-name (git-file-name "glibc" "23158b08a0908f381459f273a984c6fd328363cb"))
+ (commit "73886db6218e613bd6d4edf529f11e008a6c2fa6")))
+ (file-name (git-file-name "glibc" "73886db6218e613bd6d4edf529f11e008a6c2fa6"))
(sha256
(base32
- "1b2n1gxv9f4fd5yy68qjbnarhf8mf4vmlxk10i3328c1w5pmp0ca"))
+ "0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq"))
(patches (search-our-patches "glibc-ldd-x86_64.patch"
+ "glibc-versioned-locpath.patch"
"glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
- "glibc-2.27-dont-redefine-nss-database.patch"
+ "glibc-2.27-fcommon.patch"
"glibc-2.27-guix-prefix.patch"))))))
(packages->manifest
@@ -627,12 +609,7 @@ inspecting signatures in Mach-O binaries.")
(make-nsis-for-gcc-10 nsis-x86_64)
osslsigncode))
((string-contains target "-linux-")
- (list (cond ((string-contains target "riscv64-")
- (make-bitcoin-cross-toolchain target
- #:base-libc (make-glibc-with-stack-protector
- (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.27/bitcoin-patched)))))
- (else
- (make-bitcoin-cross-toolchain target)))))
+ (list (make-bitcoin-cross-toolchain target)))
((string-contains target "darwin")
(list clang-toolchain-10 binutils cmake xorriso python-signapple))
(else '())))))
diff --git a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
deleted file mode 100644
index 5c4d0c6ebe..0000000000
--- a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-https://sourceware.org/git/?p=glibc.git;a=commit;h=a68ba2f3cd3cbe32c1f31e13c20ed13487727b32
-
-commit 6b02af31e9a721bb15a11380cd22d53b621711f8
-Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
-Date: Wed Oct 18 17:26:23 2017 +0100
-
- [AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol
-
- This patch rewrites aarch64 elf_machine_load_address to use special _DYNAMIC
- symbol instead of _dl_start.
-
- The static address of _DYNAMIC symbol is stored in the first GOT entry.
- Here is the change which makes this solution work (part of binutils 2.24):
- https://sourceware.org/ml/binutils/2013-06/msg00248.html
-
- i386, x86_64 targets use the same method to do this as well.
-
- The original implementation relies on a trick that R_AARCH64_ABS32 relocation
- being resolved at link time and the static address fits in the 32bits.
- However, in LP64, normally, the address is defined to be 64 bit.
-
- Here is the C version one which should be portable in all cases.
-
- * sysdeps/aarch64/dl-machine.h (elf_machine_load_address): Use
- _DYNAMIC symbol to calculate load address.
-
-diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
-index e86d8b5b63..5a5b8a5de5 100644
---- a/sysdeps/aarch64/dl-machine.h
-+++ b/sysdeps/aarch64/dl-machine.h
-@@ -49,26 +49,11 @@ elf_machine_load_address (void)
- /* To figure out the load address we use the definition that for any symbol:
- dynamic_addr(symbol) = static_addr(symbol) + load_addr
-
-- The choice of symbol is arbitrary. The static address we obtain
-- by constructing a non GOT reference to the symbol, the dynamic
-- address of the symbol we compute using adrp/add to compute the
-- symbol's address relative to the PC.
-- This depends on 32bit relocations being resolved at link time
-- and that the static address fits in the 32bits. */
--
-- ElfW(Addr) static_addr;
-- ElfW(Addr) dynamic_addr;
--
-- asm (" \n"
--" adrp %1, _dl_start; \n"
--" add %1, %1, #:lo12:_dl_start \n"
--" ldr %w0, 1f \n"
--" b 2f \n"
--"1: \n"
--" .word _dl_start \n"
--"2: \n"
-- : "=r" (static_addr), "=r" (dynamic_addr));
-- return dynamic_addr - static_addr;
-+ _DYNAMIC sysmbol is used here as its link-time address stored in
-+ the special unrelocated first GOT entry. */
-+
-+ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
-+ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
- }
-
- /* Set up the loaded object described by L so its unrelocated PLT
diff --git a/contrib/guix/patches/glibc-2.24-guix-prefix.patch b/contrib/guix/patches/glibc-2.24-guix-prefix.patch
deleted file mode 100644
index 875e8cd611..0000000000
--- a/contrib/guix/patches/glibc-2.24-guix-prefix.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Without ffile-prefix-map, the debug symbols will contain paths for the
-guix store which will include the hashes of each package. However, the
-hash for the same package will differ when on different architectures.
-In order to be reproducible regardless of the architecture used to build
-the package, map all guix store prefixes to something fixed, e.g. /usr.
-
-We might be able to drop this in favour of using --with-nonshared-cflags
-when we being using newer versions of glibc.
-
---- a/Makeconfig
-+++ b/Makeconfig
-@@ -950,6 +950,10 @@ object-suffixes-for-libc += .oS
- # shared objects. We don't want to use CFLAGS-os because users may, for
- # example, make that processor-specific.
- CFLAGS-.oS = $(CFLAGS-.o) $(PIC-ccflag)
-+
-+# Map Guix store paths to /usr
-+CFLAGS-.oS += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;`
-+
- CPPFLAGS-.oS = $(CPPFLAGS-.o) -DPIC -DLIBC_NONSHARED=1
- libtype.oS = lib%_nonshared.a
- endif
---
-2.35.1
-
diff --git a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
deleted file mode 100644
index 11fe7fdc99..0000000000
--- a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-https://sourceware.org/git/?p=glibc.git;a=commit;h=fc3e1337be1c6935ab58bd13520f97a535cf70cc
-
-commit dc23a45db566095e83ff0b7a57afc87fb5ca89a1
-Author: Florian Weimer <fweimer@redhat.com>
-Date: Wed Sep 21 10:45:32 2016 +0200
-
- Avoid running $(CXX) during build to obtain header file paths
-
- This reduces the build time somewhat and is particularly noticeable
- during rebuilds with few code changes.
-
-diff --git a/Makerules b/Makerules
-index 7e4077ee50..c338850de5 100644
---- a/Makerules
-+++ b/Makerules
-@@ -121,14 +121,10 @@ ifneq (,$(CXX))
- # will be used instead of /usr/include/stdlib.h and /usr/include/math.h.
- before-compile := $(common-objpfx)cstdlib $(common-objpfx)cmath \
- $(before-compile)
--cstdlib=$(shell echo "\#include <cstdlib>" | $(CXX) -M -MP -x c++ - \
-- | sed -n "/cstdlib:/{s/:$$//;p}")
--$(common-objpfx)cstdlib: $(cstdlib)
-+$(common-objpfx)cstdlib: $(c++-cstdlib-header)
- $(INSTALL_DATA) $< $@T
- $(move-if-change) $@T $@
--cmath=$(shell echo "\#include <cmath>" | $(CXX) -M -MP -x c++ - \
-- | sed -n "/cmath:/{s/:$$//;p}")
--$(common-objpfx)cmath: $(cmath)
-+$(common-objpfx)cmath: $(c++-cmath-header)
- $(INSTALL_DATA) $< $@T
- $(move-if-change) $@T $@
- endif
-diff --git a/config.make.in b/config.make.in
-index 95c6f36876..04a8b3ed7f 100644
---- a/config.make.in
-+++ b/config.make.in
-@@ -45,6 +45,8 @@ defines = @DEFINES@
- sysheaders = @sysheaders@
- sysincludes = @SYSINCLUDES@
- c++-sysincludes = @CXX_SYSINCLUDES@
-+c++-cstdlib-header = @CXX_CSTDLIB_HEADER@
-+c++-cmath-header = @CXX_CMATH_HEADER@
- all-warnings = @all_warnings@
- enable-werror = @enable_werror@
-
-diff --git a/configure b/configure
-index 17625e1041..6ff252744b 100755
---- a/configure
-+++ b/configure
-@@ -635,6 +635,8 @@ BISON
- INSTALL_INFO
- PERL
- BASH_SHELL
-+CXX_CMATH_HEADER
-+CXX_CSTDLIB_HEADER
- CXX_SYSINCLUDES
- SYSINCLUDES
- AUTOCONF
-@@ -5054,6 +5056,18 @@ fi
-
-
-
-+# Obtain some C++ header file paths. This is used to make a local
-+# copy of those headers in Makerules.
-+if test -n "$CXX"; then
-+ find_cxx_header () {
-+ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
-+ }
-+ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
-+ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
-+fi
-+
-+
-+
- # Test if LD_LIBRARY_PATH contains the notation for the current directory
- # since this would lead to problems installing/building glibc.
- # LD_LIBRARY_PATH contains the current directory if one of the following
-diff --git a/configure.ac b/configure.ac
-index 33bcd62180..9938ab0dc2 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -1039,6 +1039,18 @@ fi
- AC_SUBST(SYSINCLUDES)
- AC_SUBST(CXX_SYSINCLUDES)
-
-+# Obtain some C++ header file paths. This is used to make a local
-+# copy of those headers in Makerules.
-+if test -n "$CXX"; then
-+ find_cxx_header () {
-+ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
-+ }
-+ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
-+ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
-+fi
-+AC_SUBST(CXX_CSTDLIB_HEADER)
-+AC_SUBST(CXX_CMATH_HEADER)
-+
- # Test if LD_LIBRARY_PATH contains the notation for the current directory
- # since this would lead to problems installing/building glibc.
- # LD_LIBRARY_PATH contains the current directory if one of the following
diff --git a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch
deleted file mode 100644
index 16a595d613..0000000000
--- a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-commit 78a90c2f74a2012dd3eff302189e47ff6779a757
-Author: Andreas Schwab <schwab@linux-m68k.org>
-Date: Fri Mar 2 23:07:14 2018 +0100
-
- Fix multiple definitions of __nss_*_database (bug 22918)
-
- (cherry picked from commit eaf6753f8aac33a36deb98c1031d1bad7b593d2d)
-
-diff --git a/nscd/gai.c b/nscd/gai.c
-index d081747797..576fd0045b 100644
---- a/nscd/gai.c
-+++ b/nscd/gai.c
-@@ -45,3 +45,6 @@
- #ifdef HAVE_LIBIDN
- # include <libidn/idn-stub.c>
- #endif
-+
-+/* Some variables normally defined in libc. */
-+service_user *__nss_hosts_database attribute_hidden;
-diff --git a/nss/nsswitch.c b/nss/nsswitch.c
-index d5e655974f..b0f0c11a3e 100644
---- a/nss/nsswitch.c
-+++ b/nss/nsswitch.c
-@@ -62,7 +62,7 @@ static service_library *nss_new_service (name_database *database,
-
- /* Declare external database variables. */
- #define DEFINE_DATABASE(name) \
-- extern service_user *__nss_##name##_database attribute_hidden; \
-+ service_user *__nss_##name##_database attribute_hidden; \
- weak_extern (__nss_##name##_database)
- #include "databases.def"
- #undef DEFINE_DATABASE
-diff --git a/nss/nsswitch.h b/nss/nsswitch.h
-index eccb535ef5..63573b9ebc 100644
---- a/nss/nsswitch.h
-+++ b/nss/nsswitch.h
-@@ -226,10 +226,10 @@ libc_hidden_proto (__nss_hostname_digits_dots)
- #define MAX_NR_ADDRS 48
-
- /* Prototypes for __nss_*_lookup2 functions. */
--#define DEFINE_DATABASE(arg) \
-- service_user *__nss_##arg##_database attribute_hidden; \
-- int __nss_##arg##_lookup2 (service_user **, const char *, \
-- const char *, void **); \
-+#define DEFINE_DATABASE(arg) \
-+ extern service_user *__nss_##arg##_database attribute_hidden; \
-+ int __nss_##arg##_lookup2 (service_user **, const char *, \
-+ const char *, void **); \
- libc_hidden_proto (__nss_##arg##_lookup2)
- #include "databases.def"
- #undef DEFINE_DATABASE
-diff --git a/posix/tst-rfc3484-2.c b/posix/tst-rfc3484-2.c
-index f509534ca9..8c64ac59ff 100644
---- a/posix/tst-rfc3484-2.c
-+++ b/posix/tst-rfc3484-2.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
-diff --git a/posix/tst-rfc3484-3.c b/posix/tst-rfc3484-3.c
-index ae44087a10..1c61aaf844 100644
---- a/posix/tst-rfc3484-3.c
-+++ b/posix/tst-rfc3484-3.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
-diff --git a/posix/tst-rfc3484.c b/posix/tst-rfc3484.c
-index 7f191abbbc..8f45848e44 100644
---- a/posix/tst-rfc3484.c
-+++ b/posix/tst-rfc3484.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
diff --git a/contrib/guix/patches/glibc-2.24-fcommon.patch b/contrib/guix/patches/glibc-2.27-fcommon.patch
index 2bc32ede90..f3baacab98 100644
--- a/contrib/guix/patches/glibc-2.24-fcommon.patch
+++ b/contrib/guix/patches/glibc-2.27-fcommon.patch
@@ -18,15 +18,15 @@ Date: Fri May 6 11:03:04 2022 +0100
https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6
diff --git a/Makeconfig b/Makeconfig
-index ee379f5852..63c4a2f234 100644
+index 86a71e5802..aa2166be60 100644
--- a/Makeconfig
+++ b/Makeconfig
-@@ -824,7 +824,7 @@ ifeq "$(strip $(+cflags))" ""
- +cflags := $(default_cflags)
+@@ -896,7 +896,7 @@ ifeq "$(strip $(+cflags))" ""
endif # $(+cflags) == ""
--+cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags)
-++cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) -fcommon
+ +cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) \
+- $(+stack-protector)
++ $(+stack-protector) -fcommon
+gcc-nowarn := -w
# Don't duplicate options if we inherited variables from the parent.
diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-2.27-guix-prefix.patch
index d777af74f0..6648bc6c05 100644
--- a/contrib/guix/patches/glibc-2.27-guix-prefix.patch
+++ b/contrib/guix/patches/glibc-2.27-guix-prefix.patch
@@ -20,6 +20,3 @@ when we being using newer versions of glibc.
libtype.o := lib%.a
object-suffixes += .o
ifeq (yes,$(build-shared))
---
-2.35.1
-
diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch
index b1b6d5a548..a23b095caa 100644
--- a/contrib/guix/patches/glibc-ldd-x86_64.patch
+++ b/contrib/guix/patches/glibc-ldd-x86_64.patch
@@ -1,8 +1,8 @@
By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas
it's in 'lib/' for us. This patch fixes that.
---- glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2012-12-25 04:02:13.000000000 +0100
-+++ glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2013-09-15 23:08:03.000000000 +0200
+--- a/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
++++ b/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
@@ -1,3 +1,3 @@
/LD_TRACE_LOADED_OBJECTS=1/a\
add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out"
diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh
deleted file mode 100755
index c7d39f5b99..0000000000
--- a/contrib/install_db4.sh
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2017-2021 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-# Install libdb4.8 (Berkeley DB).
-
-export LC_ALL=C
-set -e
-
-if [ -z "${1}" ]; then
- echo "Usage: $0 <base-dir> [<extra-bdb-configure-flag> ...]"
- echo
- echo "Must specify a single argument: the directory in which db4 will be built."
- echo "This is probably \`pwd\` if you're at the root of the bitcoin repository."
- exit 1
-fi
-
-expand_path() {
- cd "${1}" && pwd -P
-}
-
-BDB_PREFIX="$(expand_path "${1}")/db4"; shift;
-BDB_VERSION='db-4.8.30.NC'
-BDB_HASH='12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef'
-BDB_URL="https://download.oracle.com/berkeley-db/${BDB_VERSION}.tar.gz"
-
-check_exists() {
- command -v "$1" >/dev/null
-}
-
-sha256_check() {
- # Args: <sha256_hash> <filename>
- #
- if [ "$(uname)" = "FreeBSD" ]; then
- # sha256sum exists on FreeBSD, but takes different arguments than the GNU version
- sha256 -c "${1}" "${2}"
- elif check_exists sha256sum; then
- echo "${1} ${2}" | sha256sum -c
- elif check_exists sha256; then
- echo "${1} ${2}" | sha256 -c
- else
- echo "${1} ${2}" | shasum -a 256 -c
- fi
-}
-
-http_get() {
- # Args: <url> <filename> <sha256_hash>
- #
- # It's acceptable that we don't require SSL here because we manually verify
- # content hashes below.
- #
- if [ -f "${2}" ]; then
- echo "File ${2} already exists; not downloading again"
- elif check_exists curl; then
- curl --insecure --retry 5 "${1}" -o "${2}"
- elif check_exists wget; then
- wget --no-check-certificate "${1}" -O "${2}"
- else
- echo "Simple transfer utilities 'curl' and 'wget' not found. Please install one of them and try again."
- exit 1
- fi
-
- sha256_check "${3}" "${2}"
-}
-
-# Ensure the commands we use exist on the system
-if ! check_exists patch; then
- echo "Command-line tool 'patch' not found. Install patch and try again."
- exit 1
-fi
-
-mkdir -p "${BDB_PREFIX}"
-http_get "${BDB_URL}" "${BDB_VERSION}.tar.gz" "${BDB_HASH}"
-tar -xzvf ${BDB_VERSION}.tar.gz -C "$BDB_PREFIX"
-cd "${BDB_PREFIX}/${BDB_VERSION}/"
-
-# Apply a patch necessary when building with clang and c++11 (see https://community.oracle.com/thread/3952592)
-patch --ignore-whitespace -p1 << 'EOF'
-commit 3311d68f11d1697565401eee6efc85c34f022ea7
-Author: fanquake <fanquake@gmail.com>
-Date: Mon Aug 17 20:03:56 2020 +0800
-
- Fix C++11 compatibility
-
-diff --git a/dbinc/atomic.h b/dbinc/atomic.h
-index 0034dcc..7c11d4a 100644
---- a/dbinc/atomic.h
-+++ b/dbinc/atomic.h
-@@ -70,7 +70,7 @@ typedef struct {
- * These have no memory barriers; the caller must include them when necessary.
- */
- #define atomic_read(p) ((p)->value)
--#define atomic_init(p, val) ((p)->value = (val))
-+#define atomic_init_db(p, val) ((p)->value = (val))
-
- #ifdef HAVE_ATOMIC_SUPPORT
-
-@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val;
- #define atomic_inc(env, p) __atomic_inc(p)
- #define atomic_dec(env, p) __atomic_dec(p)
- #define atomic_compare_exchange(env, p, o, n) \
-- __atomic_compare_exchange((p), (o), (n))
-+ __atomic_compare_exchange_db((p), (o), (n))
- static inline int __atomic_inc(db_atomic_t *p)
- {
- int temp;
-@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p)
- * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
- * which configure could be changed to use.
- */
--static inline int __atomic_compare_exchange(
-+static inline int __atomic_compare_exchange_db(
- db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval)
- {
- atomic_value_t was;
-@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange(
- #define atomic_dec(env, p) (--(p)->value)
- #define atomic_compare_exchange(env, p, oldval, newval) \
- (DB_ASSERT(env, atomic_read(p) == (oldval)), \
-- atomic_init(p, (newval)), 1)
-+ atomic_init_db(p, (newval)), 1)
- #else
- #define atomic_inc(env, p) __atomic_inc(env, p)
- #define atomic_dec(env, p) __atomic_dec(env, p)
-diff --git a/mp/mp_fget.c b/mp/mp_fget.c
-index 5fdee5a..0b75f57 100644
---- a/mp/mp_fget.c
-+++ b/mp/mp_fget.c
-@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */
-
- /* Initialize enough so we can call __memp_bhfree. */
- alloc_bhp->flags = 0;
-- atomic_init(&alloc_bhp->ref, 1);
-+ atomic_init_db(&alloc_bhp->ref, 1);
- #ifdef DIAGNOSTIC
- if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
- __db_errx(env,
-@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */
- MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize,
- PROT_READ);
-
-- atomic_init(&alloc_bhp->ref, 1);
-+ atomic_init_db(&alloc_bhp->ref, 1);
- MUTEX_LOCK(env, alloc_bhp->mtx_buf);
- alloc_bhp->priority = bhp->priority;
- alloc_bhp->pgno = bhp->pgno;
-diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c
-index 34467d2..f05aa0c 100644
---- a/mp/mp_mvcc.c
-+++ b/mp/mp_mvcc.c
-@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp)
- #else
- memcpy(frozen_bhp, bhp, SSZA(BH, buf));
- #endif
-- atomic_init(&frozen_bhp->ref, 0);
-+ atomic_init_db(&frozen_bhp->ref, 0);
- if (mutex != MUTEX_INVALID)
- frozen_bhp->mtx_buf = mutex;
- else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH,
-@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp)
- #endif
- alloc_bhp->mtx_buf = mutex;
- MUTEX_LOCK(env, alloc_bhp->mtx_buf);
-- atomic_init(&alloc_bhp->ref, 1);
-+ atomic_init_db(&alloc_bhp->ref, 1);
- F_CLR(alloc_bhp, BH_FROZEN);
- }
-
-diff --git a/mp/mp_region.c b/mp/mp_region.c
-index e6cece9..ddbe906 100644
---- a/mp/mp_region.c
-+++ b/mp/mp_region.c
-@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
- MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0)
- return (ret);
- SH_TAILQ_INIT(&htab[i].hash_bucket);
-- atomic_init(&htab[i].hash_page_dirty, 0);
-+ atomic_init_db(&htab[i].hash_page_dirty, 0);
- }
-
- /*
-@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
- hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID :
- mtx_base + i;
- SH_TAILQ_INIT(&hp->hash_bucket);
-- atomic_init(&hp->hash_page_dirty, 0);
-+ atomic_init_db(&hp->hash_page_dirty, 0);
- #ifdef HAVE_STATISTICS
- hp->hash_io_wait = 0;
- hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0;
-diff --git a/mutex/mut_method.c b/mutex/mut_method.c
-index 2588763..5c6d516 100644
---- a/mutex/mut_method.c
-+++ b/mutex/mut_method.c
-@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval)
- MUTEX_LOCK(env, mtx);
- ret = atomic_read(v) == oldval;
- if (ret)
-- atomic_init(v, newval);
-+ atomic_init_db(v, newval);
- MUTEX_UNLOCK(env, mtx);
-
- return (ret);
-diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c
-index f3922e0..e40fcdf 100644
---- a/mutex/mut_tas.c
-+++ b/mutex/mut_tas.c
-@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags)
-
- #ifdef HAVE_SHARED_LATCHES
- if (F_ISSET(mutexp, DB_MUTEX_SHARED))
-- atomic_init(&mutexp->sharecount, 0);
-+ atomic_init_db(&mutexp->sharecount, 0);
- else
- #endif
- if (MUTEX_INIT(&mutexp->tas)) {
-@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex)
- F_CLR(mutexp, DB_MUTEX_LOCKED);
- /* Flush flag update before zeroing count */
- MEMBAR_EXIT();
-- atomic_init(&mutexp->sharecount, 0);
-+ atomic_init_db(&mutexp->sharecount, 0);
- } else {
- DB_ASSERT(env, sharecount > 0);
- MEMBAR_EXIT();
-EOF
-
-# The packaged config.guess and config.sub are ancient (2009) and can cause build issues.
-# Replace them with modern versions.
-# See https://github.com/bitcoin/bitcoin/issues/16064
-CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f'
-CONFIG_GUESS_HASH='c8f530e01840719871748a8071113435bdfdf75b74c57e78e47898edea8754ae'
-CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f'
-CONFIG_SUB_HASH='3969f7d5f6967ccc6f792401b8ef3916a1d1b1d0f0de5a4e354c95addb8b800e'
-
-rm -f "dist/config.guess"
-rm -f "dist/config.sub"
-
-http_get "${CONFIG_GUESS_URL}" dist/config.guess "${CONFIG_GUESS_HASH}"
-http_get "${CONFIG_SUB_URL}" dist/config.sub "${CONFIG_SUB_HASH}"
-
-cd build_unix/
-
-"${BDB_PREFIX}/${BDB_VERSION}/dist/configure" \
- --enable-cxx --disable-shared --disable-replication --with-pic --prefix="${BDB_PREFIX}" \
- "${@}"
-
-make install
-
-echo
-echo "db4 build complete."
-echo
-# shellcheck disable=SC2016
-echo 'When compiling bitcoind, run `./configure` in the following way:'
-echo
-echo " export BDB_PREFIX='${BDB_PREFIX}'"
-# shellcheck disable=SC2016
-echo ' ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" ...'
diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md
index b2ea7522ac..b0bbe96493 100644
--- a/contrib/seeds/README.md
+++ b/contrib/seeds/README.md
@@ -13,6 +13,6 @@ data. Run the following commands from the `/contrib/seeds` directory:
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
curl https://bitcoin.sipa.be/asmap-filled.dat > asmap-filled.dat
- python3 makeseeds.py -a asmap-filled.dat < seeds_main.txt > nodes_main.txt
+ python3 makeseeds.py -a asmap-filled.dat -s seeds_main.txt > nodes_main.txt
cat nodes_main_manual.txt >> nodes_main.txt
python3 generate-seeds.py . > ../../src/chainparamsseeds.h
diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py
index eda58c370f..23d38ee48d 100755
--- a/contrib/seeds/makeseeds.py
+++ b/contrib/seeds/makeseeds.py
@@ -173,6 +173,7 @@ def ip_stats(ips: List[Dict]) -> str:
def parse_args():
argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.')
argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True)
+ argparser.add_argument("-s","--seeds", help='the location of the DNS seeds file (required)', required=True)
return argparser.parse_args()
def main():
@@ -184,7 +185,8 @@ def main():
print('Done.', file=sys.stderr)
print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True)
- lines = sys.stdin.readlines()
+ with open(args.seeds, 'r', encoding='utf8') as f:
+ lines = f.readlines()
ips = [parseline(line) for line in lines]
print('Done.', file=sys.stderr)
diff --git a/contrib/signet/getcoins.py b/contrib/signet/getcoins.py
index d4e436626f..19751ae269 100755
--- a/contrib/signet/getcoins.py
+++ b/contrib/signet/getcoins.py
@@ -142,7 +142,7 @@ if args.captcha != '': # Retrieve a captcha
try:
res = session.post(args.faucet, data=data)
-except:
+except Exception:
raise SystemExit(f"Unexpected error when contacting faucet: {sys.exc_info()[0]}")
# Display the output as per the returned status code
diff --git a/contrib/verify-commits/allow-revsig-commits b/contrib/verify-commits/allow-revsig-commits
index 0bb299b8fa..0c43d9cce5 100644
--- a/contrib/verify-commits/allow-revsig-commits
+++ b/contrib/verify-commits/allow-revsig-commits
@@ -643,3 +643,178 @@ b7365f0545b1a6862e3277b2b2139ee0d5aee1cf
4bd0e9b90a39c5c6a016b83882ae44cb4d28f1f8
7438ceac716fdfe6621728c05e718eaa89dd89aa
4e3efd47e0d50c6cd1dc81ccc9669a5b2658f495
+5ab6a942764bf6577ae311f2551153dde3d4830c
+b04f42efe31e23e15cc945efe0de906ed2eadb2b
+ceae0eb7e31f9d3495a13a23df7372e5e870b572
+5bf65ec66e5986c9188e3f6234f1c5c0f8dc7f90
+55c9e2d790fa2e137ccd0d91e6cf3e2d0bff4813
+ba29911e21c88f49780c6c87f94ff8ed6e764a9d
+fffff0abb9c71f0af83a7925db3c293b3bb12158
+aaeb315ff0f7956449a92736160795f0140369e3
+0dd34773334c7f4db7b05df30ee61b011795b46d
+2598720d6c1ef15288045599de7e65f8707d09bc
+bc83710fdcc09d8e427e77457df107acc9db1be5
+ddd7a39aa960ee3639ef1e59b2e53852e0862c52
+0808c88d7bd992d5c9ded0009c9563f6177b4035
+a085a554913ae8f4ed83afac830ce6dc39c9cc65
+b1a824dd06aa58618947783edee2dd891b5204dc
+a4e066af8573dcefb11dff120e1c09e8cf7f40c2
+58b9d6cf9e9b801be9c677a3ae121e5d2950ce66
+7377ed778c6d832ecd291e65b2789af7bac2ae2b
+c3a41ad980cc5149de3f9ec8414962c183b1fed9
+5884a47c367f6ff1aff3ae1ef6894881c5a5e0b7
+1d39c9ca0672e7ad4c1f0959f9d58d2fcc7dc46b
+e16f6441044fc2123e0cbdcbd8a5842ec3aae7a0
+6c6cc7989cac79450bf83b932ca82d390a37e17b
+bc28ca3afb7f6656a0bf50038a5e383ee7f9b219
+57a491bee17af88f75c2cea8c109d93b1cdbc9a8
+f8586b25f6a4f1e30a54e58f45dd28aaf580bbc6
+e5df0ba0d97e5f8cfd748f09d6ed77b7bfc45471
+1b0469199bdaedfd452eea718268be7fd50db3c0
+015717e2b873b7a2ce433bd3be2328a782aa5d91
+3b3c66f85959f3393a3a9e87a29004b526f91b93
+874529665c1c326fc86fc0d0d6c3512fab087ab8
+7f2c983e1cfdb58b6f84eabe5ff6a16f143f39aa
+0ea92cad5274f3939f09d6890da31a21b8481282
+489b5876698f9bb2d93b1b1d62d514148b31effd
+faf25b09d9e78f2ff129e25b90f67930d2fc1c4f
+df933596e7e9aa17f7e5cd6e1c850520f5b56f1b
+9e4fbebcc8e497016563e46de4c64fa094edab2d
+1557014378cc5a6234a9244fa60132955206fd27
+c5fbcf5f8d7b36bee54ac80d1027d0dccea2aa75
+cccbc5fe3ea5ae52426203f4485b11071fbe4b6e
+5174a139c92c1238f9700d06e362dc628d81a0a9
+9dae9f5f1e2bf29f58d3f49b0c612063d883b8b3
+e282764e049523439bc8adaadc002a1420122830
+d8ae5044488248d5eb134aa7c0a15c813a2f8715
+06ea2783a2c11e7b171e2809c3211bb3091d894d
+00ce8543f16f4357926eb6dc701ac6229142be80
+1f63b460a8506675ccacb4647941f07d391735e3
+a100c42a136da5ddfd09aa442543ec2190f24faf
+636991d0c0f969968c790d490c82c1d2fa4e8047
+dd52f79a73eca18301db1569d517197160018dbb
+e157b98640c7cfb94cae7e0faca3bcffc2dde990
+ad9e5eaf77bf7e19a926a43407c88386a8a1e58f
+c5e67be03bb06a5d7885c55db1f016fbf2333fe3
+48eec32347494da781f26478fa488b28336afbd2
+c324b07a541a04698954ece94e5879ae7131c1c7
+4901631dac6a883c6ddd0d4e5e3edd08b10d7609
+cacbdbaa95317b45cf2100702bca92410fb43b9a
+b4f686952a60bbadc7ed2250651d0d6af0959f4d
+90e49c1ececd6296c6ec6109cea525a208c0626e
+700808754884919916a5518e7ecfdabadef956d8
+0cd1a2eff9e0020ec1052a931f3863794d1a95d9
+51527ec1ec4264f7e24ef548bb049db07a89fc7f
+ed4eeafbb6e2e73ff9fb9c03bd66bbb049b8aacd
+d4475ea7ae70ad1a1f9374b88c68f01590a88d54
+5e1aacab576b8d8918da129097a9ac0816b6ead2
+fe6a299fc0020cd62156d4b7dd9c8dac358c69c5
+0047d9b89b9fa6be660c363961cf0af72fa62ecf
+037c5e511fe2185d244049cae25a98f99b878787
+8730bd3fc87c8a7d44347267e1ff3c7a8674201b
+47b8256da872722953693c4037d1b9e07caadcb1
+85aea18ae660b5edf7b6c1415f033cfcb15307f9
+132d5f8c2f2397a4600a42203f413dafdb6bcc37
+23ebd7a8027f12e722834d214113892fe8561fe1
+a19f641a80cb2841ca9f593e9be589dbc4b4ae7c
+1e7db37e76c510d373c4404eea2b97508b367aca
+16fa967d3cca66eef0f17b41fd8aaee6a1420fbc
+9eedbe98c86ff2a9214c24c37f6524ce67fd129b
+0342ae1d395ca82614f6d3b8fabb6a44403baf2a
+777b89b3008e53374eb13fdee70db315cd61a703
+8b686776ef5cbd6ef9d5281c3136eded25ea35a4
+c90b42bcdb594638c5759ef5ef0773314d0a1379
+7134327be5c1bdcef7919ed735049a6bbfc457ec
+e88a52e9a2fda971d34425bb80e42ad2d6623d68
+173c79626867e9f89d49be7dcbb0c2042c480553
+2513499348fa955d0e4b0970b08ba9e715e6316e
+43bb10661360d9f35d921d493a1f94ac95df00e2
+6f55ab57cbfa414d57a8e9fb9a47f9bcd8c836d7
+6300b9556ec927a61371053fafe1a4045f5afb00
+f8b2e9bcfc76fede05f5e12f7b15f0d9c9d0add5
+b297b945f7610772434817181ad12067b2832565
+57a73d71a36ce212977607d3e94de6ef55521bfc
+5fdf37e14bb3b66264a7e6868250c2084ac39054
+3059d4dd72af73b654077d9f72019c47edd47674
+333a41882c5ccd5f0c7f884f97d25449bdeec07b
+7da4f65a00a8d96da2119de613ed7fbee2a28a0d
+e14f0fa6a346afecbb1d5470aef5226a8cc33e57
+cf0a8b9c4870cc88254a757286140d9632e7b70c
+b69fd5eaa99f84b62a49d7c7f48d8cee1227592a
+1e3ed01faa77215a7c36308237280aaa58895532
+6c9bc14a3f2cfa50144607c820ebab5288f9571c
+8e3c266a4f02093d57d563f32ba73d3ab4b5f208
+decde9bba6f9d3671bdf0af4fe6ff4bf28992d1d
+9b7eb584ade2ce73dbfcda080935172c3857b758
+3bbc46ddafb61f68785c7e581817db952f99d93a
+bbb83f0b2b2671980c06453fd243b1f2801a1cc4
+6c9460edaeb6c89692b71f51be7b7ee386f4f5c4
+b3072799248fae8fc16f910b642edb9c5acf8bac
+696d39410fc3372d120a6e89695c1543ac2fc052
+c5c4fb31828107a5ded88627632e19e05b2c7e83
+9ce1c506a3a5d20b1bf254235bfae48af592d86c
+fe66dad8a779ed928b1c2fc0c3accf594b042877
+f421de5be611f874a027392d5fee7e113dce4f54
+d492dc1cdaabdc52b0766bf4cba4bd73178325d0
+6348bc61b533705a229f2c2ddcff2bdd98849d07
+83b26cb97cb46516aa4fdee3bcbfa751d28c1233
+afac75f140a3e7d89877f03420e1bc64a8d8c6cd
+171f6f2699dc27e77843318be2fefdfcd9e589fb
+50c806f001d66e20f314777b9fa7fefa01dc6893
+bdbabc50ba6c87ded97ea2bbacd3605c59cd12d0
+9e32adbb5c543885b2c01a984bf1e4b80e8cec16
+7c08d81e119570792648fe95bbacddbb1d5f9ae2
+65e9ca22785f4a799cbcff6d95cbe1ce4b4a6bd2
+2948d6dea098bf722828b969838668f833c2cb00
+deb847b75710d600e5b0d3d5c77fa5166d80808a
+05e5af5a6c884d2ade3d7acc766ad5380cb85b64
+cba41db327a241f992f9329b214d9070888255b8
+f6d335e82822ed8f2da56c1bcaddf7f99acd4997
+30308cc380a8176a5ec0e0bd2beed8b9c482ccf7
+8b6cd42c6226dea28c182a48a214d1c091b9b5bb
+267917f5632a99bb51fc3fe516d8308e79d31ed1
+ba11eb354b9f3420ebb8608227062fb639a07496
+848b11615b67a3c49f76ebbcaa241a322d8014d8
+25290071c434638e2719a99784572deef44542ad
+159f89c118645c0f9e23e453f01cede84abac795
+37637bea3a9a48c0d52d68d3f78f154f8249a009
+0a76bf848c72211f986a6cc5b1c13de820b861dd
+358fe779cbb2681666ae5ab23a19662db21a2c46
+c44e734dca64a15fae92255a5d848c04adaad2fa
+8add59d77dd621be57059229f378822e4b707318
+922c49a1389531d9fba30168257c466bd413f625
+df0825046acc7cb496c47666e36af18118beb030
+c23bf06492dddacfc0eece3d4dd12cce81496dd0
+3eec29ed3aa1c8eb293a7a7a6be356fc014f8813
+a7e80449c0811b361cdaea39b6bab78ca5fbf668
+5e8e0b3d7f6055e326bda61e60712b530e8920f0
+a5edd191be93aff8f9c0f60f04e711e2e78ecc77
+515200298b555845696a07ae2bc0a84a5dd02ae4
+e8a3882e20f0ffeeb9b3964c2c09d8aa5eb53fd4
+c545a7aeb1d559377933c7b2e6edc2d4a37b33fb
+df669230cf2001dd869e897bb4f2d9c46f9accd9
+56a0fbf8365343d73cdff2b0a0e16542294d7577
+196b4599201dbce3e0317e9b98753fa6a244b82d
+cf5bb048e80d4cde8828787b266b7f5f2e3b6d7b
+b94d0c7af11bd91dad4f180ce2a2ffa09e4b5668
+792d0d8d512cf8ddca200317b74ce550c1a1a428
+767ee2e3a1082468b4e2248bac3ef8bd54bb2ddb
+31db3dd874dfbba88616c96a5767e2c9861d9a7a
+018fd9620293582f0ce43d344ac3110e19c4dedc
+801aaac2b39564aa14009785146ba26d2506fb53
+121d47afe3e67ff7f94d26e08a39573dccf652aa
+af7fba3af788e91a460582351d40f8f5e2118760
+8f1c28a609b203e0d0a844d9cc5ada9eb9160a5e
+8319c4e906e6df5f2048e7c048942fde285a93a2
+66be456d93a66526322b7f36fd734a8dbd5e5524
+c006ab29ceec9274dc85a0de7f7d0502021a4b87
+1220af5e6d1072ea306f6ecaaa7effe3d386c379
+14ba286556faad794f288ef38493c540382897cb
+784a21d35466736a7a372364498ed94482a12a2a
+4ad59042b359f473d5888ecee0c9288dcf98f1c9
+fee16b15fa3425871670239c25d4e61ae961e0c5
+216f4ca9e7ccb1f0fcb9bab0f9940992a87ae55f
+2d0bdb2089644f5904629413423cdc897911b081
+50c502f54abd9eb15c8ddca013f0fdfae3d132a9
+c840ab0231bc29057172179f005001c9ab299554
+aab5e48d422d396aec09bd6389de68613b19def5
diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root
index 1c42195961..efb6b9f7b4 100644
--- a/contrib/verify-commits/trusted-git-root
+++ b/contrib/verify-commits/trusted-git-root
@@ -1 +1 @@
-577bd51a4b8de066466a445192c1c653872657e2
+8ef096d4f8e08ac691502e3fd34721a8bdfa9044
diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys
index 5ca65e7b0d..eeafcdf205 100644
--- a/contrib/verify-commits/trusted-keys
+++ b/contrib/verify-commits/trusted-keys
@@ -1,4 +1,3 @@
-71A3B16735405025D447E8F274810B012346C9A6
B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B
E777299FC265DD04793070EB944D35F9AC3DB76A
D1DBF2C4B96F2DEBF4C16654410108112E7EA81F
diff --git a/contrib/verifybinaries/README.md b/contrib/verifybinaries/README.md
index c50d4bef71..ab831eea28 100644
--- a/contrib/verifybinaries/README.md
+++ b/contrib/verifybinaries/README.md
@@ -1,16 +1,5 @@
### Verify Binaries
-#### Preparation:
-
-Make sure you obtain the proper release signing key and verify the fingerprint with several independent sources.
-
-```sh
-$ gpg --fingerprint "Bitcoin Core binary release signing key"
-pub 4096R/36C2E964 2015-06-24 [expires: YYYY-MM-DD]
- Key fingerprint = 01EA 5486 DE18 A882 D4C2 6845 90C8 019E 36C2 E964
-uid Wladimir J. van der Laan (Bitcoin Core binary release signing key) <laanwj@gmail.com>
-```
-
#### Usage:
This script attempts to download the signature file `SHA256SUMS.asc` from https://bitcoin.org.
diff --git a/depends/README.md b/depends/README.md
index a8831eb0fc..11abbbd90d 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -98,6 +98,8 @@ The following can be set when running make: `make FOO=bar`
- `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up
- `C_STANDARD`: Set the C standard version used. Defaults to `c11`.
- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++17`.
+- `NO_BOOST`: Don't download/build/cache Boost
+- `NO_LIBEVENT`: Don't download/build/cache Libevent
- `NO_QT`: Don't download/build/cache Qt and its dependencies
- `NO_QR`: Don't download/build/cache packages needed for enabling qrencode
- `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ
diff --git a/depends/funcs.mk b/depends/funcs.mk
index 2b21d053b1..f0bbf4a168 100644
--- a/depends/funcs.mk
+++ b/depends/funcs.mk
@@ -76,7 +76,7 @@ $(1)_extracted=$$($(1)_extract_dir)/.stamp_extracted
$(1)_preprocessed=$$($(1)_extract_dir)/.stamp_preprocessed
$(1)_cleaned=$$($(1)_extract_dir)/.stamp_cleaned
$(1)_built=$$($(1)_build_dir)/.stamp_built
-$(1)_configured=$$($(1)_build_dir)/.stamp_configured
+$(1)_configured=$(host_prefix)/.$(1)_stamp_configured
$(1)_staged=$$($(1)_staging_dir)/.stamp_staged
$(1)_postprocessed=$$($(1)_staging_prefix_dir)/.stamp_postprocessed
$(1)_download_path_fixed=$(subst :,\:,$$($(1)_download_path))
@@ -214,8 +214,8 @@ $($(1)_preprocessed): | $($(1)_extracted)
$($(1)_configured): | $($(1)_dependencies) $($(1)_preprocessed)
echo Configuring $(1)...
rm -rf $(host_prefix); mkdir -p $(host_prefix)/lib; cd $(host_prefix); $(foreach package,$($(1)_all_dependencies), $(build_TAR) --no-same-owner -xf $($(package)_cached); )
- mkdir -p $$(@D)
- +{ cd $$(@D); export $($(1)_config_env); $($(1)_config_cmds); } $$($(1)_logging)
+ mkdir -p $$($(1)_build_dir)
+ +{ cd $$($(1)_build_dir); export $($(1)_config_env); $($(1)_config_cmds); } $$($(1)_logging)
touch $$@
$($(1)_built): | $($(1)_configured)
echo Building $(1)...
@@ -234,7 +234,9 @@ $($(1)_postprocessed): | $($(1)_staged)
touch $$@
$($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed)
echo Caching $(1)...
- cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | $(build_TAR) --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
+ cd $$($(1)_staging_dir)/$(host_prefix); \
+ find . ! -name '.stamp_postprocessed' -print0 | TZ=UTC xargs -0r touch -h -m -t 200001011200; \
+ find . ! -name '.stamp_postprocessed' | LC_ALL=C sort | $(build_TAR) --numeric-owner --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
mkdir -p $$(@D)
rm -rf $$(@D) && mkdir -p $$(@D)
mv $$($(1)_staging_dir)/$$(@F) $$(@)
diff --git a/depends/hosts/default.mk b/depends/hosts/default.mk
index 7c76331ab4..bad4568bcb 100644
--- a/depends/hosts/default.mk
+++ b/depends/hosts/default.mk
@@ -28,8 +28,13 @@ host_$1=$$($(host_arch)_$(host_os)_$1)
endef
define add_host_flags_func
+ifeq ($(filter $(origin $1),undefined default),)
+$(host_arch)_$(host_os)_$1 =
+$(host_arch)_$(host_os)_$(release_type)_$1 = $($1)
+else
$(host_arch)_$(host_os)_$1 += $($(host_os)_$1)
$(host_arch)_$(host_os)_$(release_type)_$1 += $($(host_os)_$(release_type)_$1)
+endif
host_$1 = $$($(host_arch)_$(host_os)_$1)
host_$(release_type)_$1 = $$($(host_arch)_$(host_os)_$(release_type)_$1)
endef
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index 262587690c..d607336059 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -15,7 +15,7 @@ $(package)_config_opts_netbsd=--with-pic
$(package)_config_opts_openbsd=--with-pic
$(package)_config_opts_android=--with-pic
$(package)_cflags+=-Wno-error=implicit-function-declaration -Wno-error=format-security
-$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600
+$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600 -D__BSD_VISIBLE=1
$(package)_cppflags_netbsd=-D_XOPEN_SOURCE=600
$(package)_cppflags_openbsd=-D_XOPEN_SOURCE=600
$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk
index 820d724214..a8ec89c6c6 100644
--- a/depends/packages/sqlite.mk
+++ b/depends/packages/sqlite.mk
@@ -6,10 +6,15 @@ $(package)_sha256_hash=5af07de982ba658fd91a03170c945f99c971f6955bc79df3266544373
define $(package)_set_vars
$(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking
+$(package)_config_opts+= --disable-rtree --disable-fts4 --disable-fts5
$(package)_config_opts_linux=--with-pic
$(package)_config_opts_freebsd=--with-pic
$(package)_config_opts_netbsd=--with-pic
$(package)_config_opts_openbsd=--with-pic
+$(package)_config_opts_debug=--enable-debug
+$(package)_cflags+=-DSQLITE_DQS=0 -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED
+$(package)_cflags+=-DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_JSON -DSQLITE_LIKE_DOESNT_MATCH_BLOBS
+$(package)_cflags+=-DSQLITE_OMIT_DECLTYPE -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_AUTOINIT
endef
define $(package)_preprocess_cmds
diff --git a/depends/packages/systemtap.mk b/depends/packages/systemtap.mk
index a57f1b6d36..541ebeee01 100644
--- a/depends/packages/systemtap.mk
+++ b/depends/packages/systemtap.mk
@@ -1,12 +1,13 @@
package=systemtap
-$(package)_version=4.7
-$(package)_download_path=https://sourceware.org/systemtap/ftp/releases/
+$(package)_version=4.8
+$(package)_download_path=https://sourceware.org/ftp/systemtap/releases/
$(package)_file_name=$(package)-$($(package)_version).tar.gz
-$(package)_sha256_hash=43a0a3db91aa4d41e28015b39a65e62059551f3cc7377ebf3a3a5ca7339e7b1f
-$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch
+$(package)_sha256_hash=cbd50a4eba5b261394dc454c12448ddec73e55e6742fda7f508f9fbc1331c223
+$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch fix_variadic_warning.patch
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch && \
+ patch -p1 < $($(package)_patch_dir)/fix_variadic_warning.patch && \
mkdir -p $($(package)_staging_prefix_dir)/include/sys && \
cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/include/sys/sdt.h
endef
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 267ed11253..d715232793 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -20,12 +20,12 @@ endef
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \
- patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch && \
- cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config
+ patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch
endef
define $(package)_config_cmds
./autogen.sh && \
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config && \
$($(package)_autoconf)
endef
diff --git a/depends/patches/systemtap/fix_variadic_warning.patch b/depends/patches/systemtap/fix_variadic_warning.patch
new file mode 100644
index 0000000000..93cc2d6081
--- /dev/null
+++ b/depends/patches/systemtap/fix_variadic_warning.patch
@@ -0,0 +1,16 @@
+Could be dropped after a migration to C++20.
+See: https://github.com/bitcoin/bitcoin/issues/26916.
+
+diff --git a/includes/sys/sdt.h b/includes/sys/sdt.h
+index 4075a5f..7c6138c 100644
+--- a/includes/sys/sdt.h
++++ b/includes/sys/sdt.h
+@@ -276,7 +276,7 @@ __extension__ extern unsigned long long __sdt_unsp;
+ _SDT_ASM_1(.purgem _SDT_TYPE_) \
+ _SDT_ASM_1(.purgem _SDT_TYPE)
+
+-#define _SDT_ASM_BODY(provider, name, pack_args, args, ...) \
++#define _SDT_ASM_BODY(provider, name, pack_args, args) \
+ _SDT_DEF_MACROS \
+ _SDT_ASM_1(990: _SDT_NOP) \
+ _SDT_ASM_3( .pushsection .note.stapsdt,_SDT_ASM_AUTOGROUP,"note") \
diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md
index d45e9c4d0d..aa10e4a891 100644
--- a/doc/build-freebsd.md
+++ b/doc/build-freebsd.md
@@ -36,13 +36,30 @@ pkg install sqlite3
```
###### Legacy Wallet Support
-`db5` is only required to support legacy wallets.
-Skip if you don't intend to use legacy wallets.
+BerkeleyDB is only required if legacy wallet support is required.
+
+It is required to use Berkeley DB 4.8. You **cannot** use the BerkeleyDB library
+from ports. However, you can build DB 4.8 yourself [using depends](/depends).
-```bash
-pkg install db5
```
----
+gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+```
+
+When the build is complete, the Berkeley DB installation location will be displayed:
+
+```
+to: /path/to/bitcoin/depends/x86_64-unknown-freebsd[release-number]
+```
+
+Finally, set `BDB_PREFIX` to this path according to your shell:
+
+```
+csh: setenv BDB_PREFIX [path displayed above]
+```
+
+```
+sh/bash: export BDB_PREFIX=[path displayed above]
+```
#### GUI Dependencies
###### Qt5
@@ -91,12 +108,12 @@ This explicitly enables the GUI and disables legacy wallet support, assuming `sq
##### Descriptor & Legacy Wallet. No GUI:
This enables support for both wallet types and disables the GUI, assuming
-`sqlite3` and `db5` are both installed.
+`sqlite3` and `db4` are both installed.
```bash
./autogen.sh
-./configure --with-gui=no --with-incompatible-bdb \
- BDB_LIBS="-ldb_cxx-5" \
- BDB_CFLAGS="-I/usr/local/include/db5" \
+./configure --with-gui=no \
+ BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
+ BDB_CFLAGS="-I${BDB_PREFIX}/include" \
MAKE=gmake
```
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index afbb5c8e75..255995a517 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -41,16 +41,18 @@ pkg_add sqlite3
BerkeleyDB is only required to support legacy wallets.
It is recommended to use Berkeley DB 4.8. You cannot use the BerkeleyDB library
-from ports. However you can build it yourself, [using the installation script included in contrib/](/contrib/install_db4.sh), like so, from the root of the repository.
+from ports. However you can build it yourself, [using depends](/depends).
```bash
-./contrib/install_db4.sh `pwd`
+gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+...
+to: /path/to/bitcoin/depends/x86_64-unknown-openbsd
```
Then set `BDB_PREFIX`:
```bash
-export BDB_PREFIX="$PWD/db4"
+export BDB_PREFIX="/path/to/bitcoin/depends/x86_64-unknown-openbsd"
```
#### GUI Dependencies
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 874015707a..0960ae1577 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -72,7 +72,7 @@ executables, which are based on BerkeleyDB 4.8. If you do not care about wallet
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
-Optional port mapping libraries (see: `--with-miniupnpc`, `--enable-upnp-default`, and `--with-natpmp`, `--enable-natpmp-default`):
+Optional port mapping libraries (see: `--with-miniupnpc` and `--with-natpmp`):
sudo apt install libminiupnpc-dev libnatpmp-dev
@@ -133,7 +133,7 @@ pass `--with-incompatible-bdb` to configure. Otherwise, you can build Berkeley D
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
-Optional port mapping libraries (see: `--with-miniupnpc`, `--enable-upnp-default`, and `--with-natpmp`, `--enable-natpmp-default`):
+Optional port mapping libraries (see: `--with-miniupnpc` and `--with-natpmp`):
sudo dnf install miniupnpc-devel libnatpmp-devel
@@ -176,38 +176,34 @@ miniupnpc
[miniupnpc](https://miniupnp.tuxfamily.org) may be used for UPnP port mapping. It can be downloaded from [here](
https://miniupnp.tuxfamily.org/files/). UPnP support is compiled in and
-turned off by default. See the configure options for UPnP behavior desired:
-
- --without-miniupnpc No UPnP support, miniupnp not required
- --disable-upnp-default (the default) UPnP support turned off by default at runtime
- --enable-upnp-default UPnP support turned on by default at runtime
+turned off by default.
libnatpmp
---------
[libnatpmp](https://miniupnp.tuxfamily.org/libnatpmp.html) may be used for NAT-PMP port mapping. It can be downloaded
from [here](https://miniupnp.tuxfamily.org/files/). NAT-PMP support is compiled in and
-turned off by default. See the configure options for NAT-PMP behavior desired:
-
- --without-natpmp No NAT-PMP support, libnatpmp not required
- --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime
- --enable-natpmp-default NAT-PMP support turned on by default at runtime
+turned off by default.
Berkeley DB
-----------
The legacy wallet uses Berkeley DB. To ensure backwards compatibility it is
-recommended to use Berkeley DB 4.8. If you have to build it yourself, you can
-use [the installation script included in contrib/](/contrib/install_db4.sh)
-like so:
-
-```shell
-./contrib/install_db4.sh `pwd`
+recommended to use Berkeley DB 4.8. If you have to build it yourself, and don't
+want to use any other libraries built in depends, you can do:
+```bash
+make -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+...
+to: /path/to/bitcoin/depends/x86_64-pc-linux-gnu
```
+and configure using the following:
+```bash
+export BDB_PREFIX="/path/to/bitcoin/depends/x86_64-pc-linux-gnu"
-from the root of the repository.
-
-Otherwise, you can build Bitcoin Core from self-compiled [depends](/depends/README.md).
+./configure \
+ BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
+ BDB_CFLAGS="-I${BDB_PREFIX}/include"
+```
**Note**: You only need Berkeley DB if the legacy wallet is enabled (see [*Disable-wallet mode*](#disable-wallet-mode)).
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 3349c81c46..a9ca5b3e7a 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -19,7 +19,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| --- | --- | --- | --- | --- |
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | [1.81.0](https://github.com/bitcoin/bitcoin/pull/26557) | [1.64.0](https://github.com/bitcoin/bitcoin/pull/22320) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
-| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.18](https://github.com/bitcoin/bitcoin/pull/23511) | Yes |
+| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.27](https://github.com/bitcoin/bitcoin/pull/27029) | Yes |
| Linux Kernel | [link](https://www.kernel.org/) | N/A | 3.2.0 | Yes |
## Optional
@@ -36,7 +36,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [libnatpmp](../depends/packages/libnatpmp.mk) | [link](https://github.com/miniupnp/libnatpmp/) | commit [07004b9...](https://github.com/bitcoin/bitcoin/pull/25917) | | No |
-| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 1.9 | No |
+| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 2.1 | No |
### Notifications
| Dependency | Releases | Version used | Minimum required | Runtime |
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 00c68911ef..e2e54e13d3 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -109,6 +109,10 @@ code.
- `++i` is preferred over `i++`.
- `nullptr` is preferred over `NULL` or `(void*)0`.
- `static_assert` is preferred over `assert` where possible. Generally; compile-time checking is preferred over run-time checking.
+ - Use a named cast or functional cast, not a C-Style cast. When casting
+ between integer types, use functional casts such as `int(x)` or `int{x}`
+ instead of `(int) x`. When casting between more complex types, use static_cast.
+ Use reinterpret_cast and const_cast as appropriate.
For function calls a namespace should be specified explicitly, unless such functions have been declared within it.
Otherwise, [argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl), also known as ADL, could be
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index b498f2f41b..84ebb0986d 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -136,10 +136,10 @@ You may also need to take care of giving the correct path for `clang` and
`clang++`, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
`clang` does not come first in your path.
-Full configure that was tested on macOS Catalina with `brew` installed `llvm`:
+Full configure that was tested on macOS with `brew` installed `llvm`:
```sh
-./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
+./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined --disable-asm CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++
```
Read the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) for more information. This [libFuzzer tutorial](https://github.com/google/fuzzing/blob/master/tutorial/libFuzzerTutorial.md) might also be of interest.
diff --git a/doc/init.md b/doc/init.md
index 399b819bf4..7f79027718 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -70,7 +70,7 @@ NOTE: When using the systemd .service file, the creation of the aforementioned
directories and the setting of their permissions is automatically handled by
systemd. Directories are given a permission of 710, giving the bitcoin group
access to files under it _if_ the files themselves give permission to the
-bitcoin group to do so (e.g. when `-sysperms` is specified). This does not allow
+bitcoin group to do so. This does not allow
for the listing of files under the directory.
NOTE: It is not currently possible to override `datadir` in
diff --git a/doc/reduce-memory.md b/doc/reduce-memory.md
index 296b172bde..25205258b8 100644
--- a/doc/reduce-memory.md
+++ b/doc/reduce-memory.md
@@ -16,11 +16,11 @@ The size of some in-memory caches can be reduced. As caches trade off memory usa
- The minimum value for `-maxmempool` is 5.
- A lower maximum mempool size means that transactions will be evicted sooner. This will affect any uses of `bitcoind` that process unconfirmed transactions.
-- To completely disable mempool functionality there is the option `-blocksonly`. This will make the client opt out of receiving (and thus relaying) transactions completely, except as part of blocks.
+- Since `0.14.0`, unused memory allocated to the mempool (default: 300MB) is shared with the UTXO cache, so when trying to reduce memory usage you should limit the mempool, with the `-maxmempool` command line argument.
- - Do not use this when using the client to broadcast transactions as any transaction sent will stick out like a sore thumb, affecting privacy. When used with the wallet it should be combined with `-walletbroadcast=0` and `-spendzeroconfchange=0`. Another mechanism for broadcasting outgoing transactions (if any) should be used.
+- To disable most of the mempool functionality there is the `-blocksonly` option. This will reduce the default memory usage to 5MB and make the client opt out of receiving (and thus relaying) transactions, except from peers who have the `relay` permission set (e.g. whitelisted peers), and as part of blocks.
-- Since `0.14.0`, unused memory allocated to the mempool (default: 300MB) is shared with the UTXO cache, so when trying to reduce memory usage you should limit the mempool, with the `-maxmempool` command line argument.
+ - Do not use this when using the client to broadcast transactions as any transaction sent will stick out like a sore thumb, affecting privacy. When used with the wallet it should be combined with `-walletbroadcast=0` and `-spendzeroconfchange=0`. Another mechanism for broadcasting outgoing transactions (if any) should be used.
## Number of peers
diff --git a/doc/release-notes-23395.md b/doc/release-notes-23395.md
new file mode 100644
index 0000000000..b9d7d9409c
--- /dev/null
+++ b/doc/release-notes-23395.md
@@ -0,0 +1,8 @@
+Notable changes
+===============
+
+New settings
+------------
+
+- The `shutdownnotify` option is used to specify a command to execute synchronously
+before Bitcoin Core has begun its shutdown sequence. (#23395)
diff --git a/doc/release-notes-25574.md b/doc/release-notes-25574.md
new file mode 100644
index 0000000000..312a99d95b
--- /dev/null
+++ b/doc/release-notes-25574.md
@@ -0,0 +1,13 @@
+Updated settings
+----------------
+
+If the `-checkblocks` or `-checklevel` options are explicitly provided by the
+user, but the verification checks cannot be completed due to an insufficient
+dbcache, Bitcoin Core will now return an error at startup. (#25574)
+
+RPC
+---
+The `-verifychain` RPC will now return `false` if the checks didn't fail,
+but couldn't be completed at the desired depth and level. This could be due
+to missing data while pruning, due to an insufficient dbcache or due to
+the node being shutdown before the call could finish. (#25574)
diff --git a/doc/release-notes-25957.md b/doc/release-notes-25957.md
new file mode 100644
index 0000000000..c71afa2c2e
--- /dev/null
+++ b/doc/release-notes-25957.md
@@ -0,0 +1,9 @@
+Wallet
+------
+
+- Rescans for descriptor wallets are now significantly faster if compact
+ block filters (BIP158) are available. Since those are not constructed
+ by default, the configuration option "-blockfilterindex=1" has to be
+ provided to take advantage of the optimization. This improves the
+ performance of the RPC calls `rescanblockchain`, `importdescriptors`
+ and `restorewallet`. (#25957)
diff --git a/doc/release-notes-26471.md b/doc/release-notes-26471.md
new file mode 100644
index 0000000000..2cb74804ca
--- /dev/null
+++ b/doc/release-notes-26471.md
@@ -0,0 +1,13 @@
+Updated settings
+----------------
+
+- Setting `-blocksonly` will now reduce the maximum mempool memory
+ to 5MB (users may still use `-maxmempool` to override). Previously,
+ the default 300MB would be used, leading to unexpected memory usage
+ for users running with `-blocksonly` expecting it to eliminate
+ mempool memory usage.
+
+ As unused mempool memory is shared with dbcache, this also reduces
+ the dbcache size for users running with `-blocksonly`, potentially
+ impacting performance.
+
diff --git a/doc/release-notes-26896.md b/doc/release-notes-26896.md
new file mode 100644
index 0000000000..ff4ab44e27
--- /dev/null
+++ b/doc/release-notes-26896.md
@@ -0,0 +1,7 @@
+Build System
+------------
+
+The --enable-upnp-default and --enable-natpmp-default options
+have been removed. If you want to use port mapping, you can
+configure it using a .conf file, or by passing the relevant
+options at runtime. \ No newline at end of file
diff --git a/doc/release-notes-27037.md b/doc/release-notes-27037.md
new file mode 100644
index 0000000000..ee30e64010
--- /dev/null
+++ b/doc/release-notes-27037.md
@@ -0,0 +1,5 @@
+RPC
+---
+
+- `decodescript` may now infer a Miniscript descriptor under P2WSH context if it is not lacking
+ information.
diff --git a/doc/release-notes-27068.md b/doc/release-notes-27068.md
new file mode 100644
index 0000000000..3f5c5dba37
--- /dev/null
+++ b/doc/release-notes-27068.md
@@ -0,0 +1,6 @@
+Wallet
+------
+
+- Wallet passphrases may now contain null characters.
+ Prior to this change, only characters up to the first
+ null character were recognized and accepted. (#27068) \ No newline at end of file
diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py
index d441d5f21d..cc7bba1f8b 100755
--- a/share/rpcauth/rpcauth.py
+++ b/share/rpcauth/rpcauth.py
@@ -4,22 +4,20 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
-from base64 import urlsafe_b64encode
from getpass import getpass
-from os import urandom
-
+from secrets import token_hex, token_urlsafe
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
- return urandom(size).hex()
+ return token_hex(size)
def generate_password():
"""Create 32 byte b64 password"""
- return urlsafe_b64encode(urandom(32)).decode('utf-8')
+ return token_urlsafe(32)
def password_to_hmac(salt, password):
- m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
+ m = hmac.new(salt.encode('utf-8'), password.encode('utf-8'), 'SHA256')
return m.hexdigest()
def main():
@@ -38,8 +36,8 @@ def main():
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to bitcoin.conf:')
- print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
- print('Your password:\n{0}'.format(args.password))
+ print(f'rpcauth={args.username}:{salt}${password_hmac}')
+ print(f'Your password:\n{args.password}')
if __name__ == '__main__':
main()
diff --git a/src/.clang-tidy b/src/.clang-tidy
index b2e6914548..b2c1b49588 100644
--- a/src/.clang-tidy
+++ b/src/.clang-tidy
@@ -9,6 +9,7 @@ performance-for-range-copy,
performance-move-const-arg,
performance-no-automatic-move,
performance-unnecessary-copy-initialization,
+readability-const-return-type,
readability-redundant-declaration,
readability-redundant-string-init,
'
@@ -16,4 +17,4 @@ WarningsAsErrors: '*'
CheckOptions:
- key: performance-move-const-arg.CheckTriviallyCopyableMove
value: false
-HeaderFilterRegex: './qt'
+HeaderFilterRegex: '.'
diff --git a/src/Makefile.am b/src/Makefile.am
index 35b0ad24c3..7b9ffe427d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -20,6 +20,7 @@ noinst_LTLIBRARIES =
bin_PROGRAMS =
noinst_PROGRAMS =
+check_PROGRAMS =
TESTS =
BENCHMARKS =
@@ -203,8 +204,10 @@ BITCOIN_CORE_H = \
node/chainstate.h \
node/chainstatemanager_args.h \
node/coin.h \
+ node/coins_view_args.h \
node/connection_types.h \
node/context.h \
+ node/database_args.h \
node/eviction.h \
node/interface_ui.h \
node/mempool_args.h \
@@ -387,8 +390,10 @@ libbitcoin_node_a_SOURCES = \
node/chainstate.cpp \
node/chainstatemanager_args.cpp \
node/coin.cpp \
+ node/coins_view_args.cpp \
node/connection_types.cpp \
node/context.cpp \
+ node/database_args.cpp \
node/eviction.cpp \
node/interface_ui.cpp \
node/interfaces.cpp \
diff --git a/src/Makefile.minisketch.include b/src/Makefile.minisketch.include
index b337f48349..1363bec34e 100644
--- a/src/Makefile.minisketch.include
+++ b/src/Makefile.minisketch.include
@@ -31,7 +31,7 @@ if ENABLE_TESTS
if !ENABLE_FUZZ
MINISKETCH_TEST = minisketch/test
TESTS += $(MINISKETCH_TEST)
-noinst_PROGRAMS += $(MINISKETCH_TEST)
+check_PROGRAMS += $(MINISKETCH_TEST)
minisketch_test_SOURCES = $(MINISKETCH_TEST_SOURCES_INT)
minisketch_test_CPPFLAGS = $(AM_CPPFLAGS) $(LIBMINISKETCH_CPPFLAGS)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 1a29e9a47a..fa77e28736 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -162,7 +162,8 @@ BITCOIN_TESTS =\
test/validation_flush_tests.cpp \
test/validation_tests.cpp \
test/validationinterface_tests.cpp \
- test/versionbits_tests.cpp
+ test/versionbits_tests.cpp \
+ test/xoroshiro128plusplus_tests.cpp
if ENABLE_WALLET
BITCOIN_TESTS += \
@@ -248,6 +249,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/chain.cpp \
test/fuzz/checkqueue.cpp \
test/fuzz/coins_view.cpp \
+ test/fuzz/coinscache_sim.cpp \
test/fuzz/connman.cpp \
test/fuzz/crypto.cpp \
test/fuzz/crypto_aes256.cpp \
@@ -293,6 +295,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/parse_numbers.cpp \
test/fuzz/parse_script.cpp \
test/fuzz/parse_univalue.cpp \
+ test/fuzz/partially_downloaded_block.cpp \
test/fuzz/policy_estimator.cpp \
test/fuzz/policy_estimator_io.cpp \
test/fuzz/pow.cpp \
diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include
index a4e8b3f842..aefefe789a 100644
--- a/src/Makefile.test_util.include
+++ b/src/Makefile.test_util.include
@@ -10,15 +10,19 @@ EXTRA_LIBRARIES += \
TEST_UTIL_H = \
test/util/blockfilter.h \
test/util/chainstate.h \
+ test/util/coins.h \
+ test/util/json.h \
test/util/logging.h \
test/util/mining.h \
test/util/net.h \
+ test/util/random.h \
test/util/script.h \
test/util/setup_common.h \
test/util/str.h \
test/util/transaction_utils.h \
test/util/txmempool.h \
- test/util/validation.h
+ test/util/validation.h \
+ test/util/xoroshiro128plusplus.h
if ENABLE_WALLET
TEST_UTIL_H += wallet/test/util.h
@@ -28,6 +32,8 @@ libtest_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BOOST_CPPFLAGS)
libtest_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libtest_util_a_SOURCES = \
test/util/blockfilter.cpp \
+ test/util/coins.cpp \
+ test/util/json.cpp \
test/util/logging.cpp \
test/util/mining.cpp \
test/util/net.cpp \
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index d95c07d6a8..7be13c8f1e 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -34,10 +34,9 @@ bool SerializeDB(Stream& stream, const Data& data)
{
// Write and commit header, data
try {
- CHashWriter hasher(stream.GetType(), stream.GetVersion());
- stream << Params().MessageStart() << data;
- hasher << Params().MessageStart() << data;
- stream << hasher.GetHash();
+ HashedSourceWriter hashwriter{stream};
+ hashwriter << Params().MessageStart() << data;
+ stream << hashwriter.GetHash();
} catch (const std::exception& e) {
return error("%s: Serialize or I/O error - %s", __func__, e.what());
}
@@ -191,7 +190,7 @@ std::optional<bilingual_str> LoadAddrman(const NetGroupManager& netgroupman, con
const auto path_addr{args.GetDataDirNet() / "peers.dat"};
try {
DeserializeFileDB(path_addr, *addrman, CLIENT_VERSION);
- LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
+ LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
} catch (const DbNotFoundError&) {
// Addrman can be in an inconsistent state after failure, reset it
addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman);
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 91eedeebe1..f5ca9a5c34 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -291,6 +291,7 @@ void AddrManImpl::Unserialize(Stream& s_)
mapAddr[info] = n;
info.nRandomPos = vRandom.size();
vRandom.push_back(n);
+ m_network_counts[info.GetNetwork()].n_new++;
}
nIdCount = nNew;
@@ -310,6 +311,7 @@ void AddrManImpl::Unserialize(Stream& s_)
mapAddr[info] = nIdCount;
vvTried[nKBucket][nKBucketPos] = nIdCount;
nIdCount++;
+ m_network_counts[info.GetNetwork()].n_tried++;
} else {
nLost++;
}
@@ -425,6 +427,8 @@ AddrInfo* AddrManImpl::Create(const CAddress& addr, const CNetAddr& addrSource,
mapAddr[addr] = nId;
mapInfo[nId].nRandomPos = vRandom.size();
vRandom.push_back(nId);
+ nNew++;
+ m_network_counts[addr.GetNetwork()].n_new++;
if (pnId)
*pnId = nId;
return &mapInfo[nId];
@@ -464,6 +468,7 @@ void AddrManImpl::Delete(int nId)
assert(info.nRefCount == 0);
SwapRandom(info.nRandomPos, vRandom.size() - 1);
+ m_network_counts[info.GetNetwork()].n_new--;
vRandom.pop_back();
mapAddr.erase(info);
mapInfo.erase(nId);
@@ -481,7 +486,7 @@ void AddrManImpl::ClearNew(int nUBucket, int nUBucketPos)
assert(infoDelete.nRefCount > 0);
infoDelete.nRefCount--;
vvNew[nUBucket][nUBucketPos] = -1;
- LogPrint(BCLog::ADDRMAN, "Removed %s from new[%i][%i]\n", infoDelete.ToString(), nUBucket, nUBucketPos);
+ LogPrint(BCLog::ADDRMAN, "Removed %s from new[%i][%i]\n", infoDelete.ToStringAddrPort(), nUBucket, nUBucketPos);
if (infoDelete.nRefCount == 0) {
Delete(nIdDelete);
}
@@ -504,6 +509,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId)
}
}
nNew--;
+ m_network_counts[info.GetNetwork()].n_new--;
assert(info.nRefCount == 0);
@@ -522,6 +528,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId)
infoOld.fInTried = false;
vvTried[nKBucket][nKBucketPos] = -1;
nTried--;
+ m_network_counts[infoOld.GetNetwork()].n_tried--;
// find which new bucket it belongs to
int nUBucket = infoOld.GetNewBucket(nKey, m_netgroupman);
@@ -533,14 +540,16 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId)
infoOld.nRefCount = 1;
vvNew[nUBucket][nUBucketPos] = nIdEvict;
nNew++;
+ m_network_counts[infoOld.GetNetwork()].n_new++;
LogPrint(BCLog::ADDRMAN, "Moved %s from tried[%i][%i] to new[%i][%i] to make space\n",
- infoOld.ToString(), nKBucket, nKBucketPos, nUBucket, nUBucketPos);
+ infoOld.ToStringAddrPort(), nKBucket, nKBucketPos, nUBucket, nUBucketPos);
}
assert(vvTried[nKBucket][nKBucketPos] == -1);
vvTried[nKBucket][nKBucketPos] = nId;
nTried++;
info.fInTried = true;
+ m_network_counts[info.GetNetwork()].n_tried++;
}
bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::chrono::seconds time_penalty)
@@ -591,7 +600,6 @@ bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::c
} else {
pinfo = Create(addr, source, &nId);
pinfo->nTime = std::max(NodeSeconds{0s}, pinfo->nTime - time_penalty);
- nNew++;
}
int nUBucket = pinfo->GetNewBucket(nKey, source, m_netgroupman);
@@ -610,7 +618,7 @@ bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::c
pinfo->nRefCount++;
vvNew[nUBucket][nUBucketPos] = nId;
LogPrint(BCLog::ADDRMAN, "Added %s mapped to AS%i to new[%i][%i]\n",
- addr.ToString(), m_netgroupman.GetMappedAS(addr), nUBucket, nUBucketPos);
+ addr.ToStringAddrPort(), m_netgroupman.GetMappedAS(addr), nUBucket, nUBucketPos);
} else {
if (pinfo->nRefCount == 0) {
Delete(nId);
@@ -661,15 +669,15 @@ bool AddrManImpl::Good_(const CService& addr, bool test_before_evict, NodeSecond
// Output the entry we'd be colliding with, for debugging purposes
auto colliding_entry = mapInfo.find(vvTried[tried_bucket][tried_bucket_pos]);
LogPrint(BCLog::ADDRMAN, "Collision with %s while attempting to move %s to tried table. Collisions=%d\n",
- colliding_entry != mapInfo.end() ? colliding_entry->second.ToString() : "",
- addr.ToString(),
+ colliding_entry != mapInfo.end() ? colliding_entry->second.ToStringAddrPort() : "",
+ addr.ToStringAddrPort(),
m_tried_collisions.size());
return false;
} else {
// move nId to the tried tables
MakeTried(info, nId);
LogPrint(BCLog::ADDRMAN, "Moved %s mapped to AS%i to tried[%i][%i]\n",
- addr.ToString(), m_netgroupman.GetMappedAS(addr), tried_bucket, tried_bucket_pos);
+ addr.ToStringAddrPort(), m_netgroupman.GetMappedAS(addr), tried_bucket, tried_bucket_pos);
return true;
}
}
@@ -681,7 +689,7 @@ bool AddrManImpl::Add_(const std::vector<CAddress>& vAddr, const CNetAddr& sourc
added += AddSingle(*it, source, time_penalty) ? 1 : 0;
}
if (added > 0) {
- LogPrint(BCLog::ADDRMAN, "Added %i addresses (of %i) from %s: %i tried, %i new\n", added, vAddr.size(), source.ToString(), nTried, nNew);
+ LogPrint(BCLog::ADDRMAN, "Added %i addresses (of %i) from %s: %i tried, %i new\n", added, vAddr.size(), source.ToStringAddr(), nTried, nNew);
}
return added > 0;
}
@@ -738,7 +746,7 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool newOnly) const
const AddrInfo& info{it_found->second};
// With probability GetChance() * fChanceFactor, return the entry.
if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) {
- LogPrint(BCLog::ADDRMAN, "Selected %s from tried\n", info.ToString());
+ LogPrint(BCLog::ADDRMAN, "Selected %s from tried\n", info.ToStringAddrPort());
return {info, info.m_last_try};
}
// Otherwise start over with a (likely) different bucket, and increased chance factor.
@@ -766,7 +774,7 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool newOnly) const
const AddrInfo& info{it_found->second};
// With probability GetChance() * fChanceFactor, return the entry.
if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) {
- LogPrint(BCLog::ADDRMAN, "Selected %s from new\n", info.ToString());
+ LogPrint(BCLog::ADDRMAN, "Selected %s from new\n", info.ToStringAddrPort());
return {info, info.m_last_try};
}
// Otherwise start over with a (likely) different bucket, and increased chance factor.
@@ -883,7 +891,7 @@ void AddrManImpl::ResolveCollisions_()
// Give address at least 60 seconds to successfully connect
if (current_time - info_old.m_last_try > 60s) {
- LogPrint(BCLog::ADDRMAN, "Replacing %s with %s in tried table\n", info_old.ToString(), info_new.ToString());
+ LogPrint(BCLog::ADDRMAN, "Replacing %s with %s in tried table\n", info_old.ToStringAddrPort(), info_new.ToStringAddrPort());
// Replaces an existing address already in the tried table with the new address
Good_(info_new, false, current_time);
@@ -893,7 +901,7 @@ void AddrManImpl::ResolveCollisions_()
// If the collision hasn't resolved in some reasonable amount of time,
// just evict the old entry -- we must not be able to
// connect to it for some reason.
- LogPrint(BCLog::ADDRMAN, "Unable to test; replacing %s with %s in tried table anyway\n", info_old.ToString(), info_new.ToString());
+ LogPrint(BCLog::ADDRMAN, "Unable to test; replacing %s with %s in tried table anyway\n", info_old.ToStringAddrPort(), info_new.ToStringAddrPort());
Good_(info_new, false, current_time);
erase_collision = true;
}
@@ -962,6 +970,28 @@ std::optional<AddressPosition> AddrManImpl::FindAddressEntry_(const CAddress& ad
}
}
+size_t AddrManImpl::Size_(std::optional<Network> net, std::optional<bool> in_new) const
+{
+ AssertLockHeld(cs);
+
+ if (!net.has_value()) {
+ if (in_new.has_value()) {
+ return *in_new ? nNew : nTried;
+ } else {
+ return vRandom.size();
+ }
+ }
+ if (auto it = m_network_counts.find(*net); it != m_network_counts.end()) {
+ auto net_count = it->second;
+ if (in_new.has_value()) {
+ return *in_new ? net_count.n_new : net_count.n_tried;
+ } else {
+ return net_count.n_new + net_count.n_tried;
+ }
+ }
+ return 0;
+}
+
void AddrManImpl::Check() const
{
AssertLockHeld(cs);
@@ -986,6 +1016,7 @@ int AddrManImpl::CheckAddrman() const
std::unordered_set<int> setTried;
std::unordered_map<int, int> mapNew;
+ std::unordered_map<Network, NewTriedCount> local_counts;
if (vRandom.size() != (size_t)(nTried + nNew))
return -7;
@@ -1000,12 +1031,14 @@ int AddrManImpl::CheckAddrman() const
if (info.nRefCount)
return -2;
setTried.insert(n);
+ local_counts[info.GetNetwork()].n_tried++;
} else {
if (info.nRefCount < 0 || info.nRefCount > ADDRMAN_NEW_BUCKETS_PER_ADDRESS)
return -3;
if (!info.nRefCount)
return -4;
mapNew[n] = info.nRefCount;
+ local_counts[info.GetNetwork()].n_new++;
}
const auto it{mapAddr.find(info)};
if (it == mapAddr.end() || it->second != n) {
@@ -1065,13 +1098,27 @@ int AddrManImpl::CheckAddrman() const
if (nKey.IsNull())
return -16;
+ // It's possible that m_network_counts may have all-zero entries that local_counts
+ // doesn't have if addrs from a network were being added and then removed again in the past.
+ if (m_network_counts.size() < local_counts.size()) {
+ return -20;
+ }
+ for (const auto& [net, count] : m_network_counts) {
+ if (local_counts[net].n_new != count.n_new || local_counts[net].n_tried != count.n_tried) {
+ return -21;
+ }
+ }
+
return 0;
}
-size_t AddrManImpl::size() const
+size_t AddrManImpl::Size(std::optional<Network> net, std::optional<bool> in_new) const
{
- LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead
- return vRandom.size();
+ LOCK(cs);
+ Check();
+ auto ret = Size_(net, in_new);
+ Check();
+ return ret;
}
bool AddrManImpl::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty)
@@ -1178,17 +1225,16 @@ void AddrMan::Unserialize(Stream& s_)
}
// explicit instantiation
-template void AddrMan::Serialize(CHashWriter& s) const;
-template void AddrMan::Serialize(CAutoFile& s) const;
+template void AddrMan::Serialize(HashedSourceWriter<CAutoFile>& s) const;
template void AddrMan::Serialize(CDataStream& s) const;
template void AddrMan::Unserialize(CAutoFile& s);
template void AddrMan::Unserialize(CHashVerifier<CAutoFile>& s);
template void AddrMan::Unserialize(CDataStream& s);
template void AddrMan::Unserialize(CHashVerifier<CDataStream>& s);
-size_t AddrMan::size() const
+size_t AddrMan::Size(std::optional<Network> net, std::optional<bool> in_new) const
{
- return m_impl->size();
+ return m_impl->Size(net, in_new);
}
bool AddrMan::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty)
diff --git a/src/addrman.h b/src/addrman.h
index 0f1f808fa1..4985fc764c 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -99,8 +99,14 @@ public:
template <typename Stream>
void Unserialize(Stream& s_);
- //! Return the number of (unique) addresses in all tables.
- size_t size() const;
+ /**
+ * Return size information about addrman.
+ *
+ * @param[in] net Select addresses only from specified network (nullopt = all)
+ * @param[in] in_new Select addresses only from one table (true = new, false = tried, nullopt = both)
+ * @return Number of unique addresses that match specified options.
+ */
+ size_t Size(std::optional<Network> net = std::nullopt, std::optional<bool> in_new = std::nullopt) const;
/**
* Attempt to add one or more addresses to addrman's new table.
diff --git a/src/addrman_impl.h b/src/addrman_impl.h
index 39754b673e..94fe81aca9 100644
--- a/src/addrman_impl.h
+++ b/src/addrman_impl.h
@@ -112,7 +112,7 @@ public:
template <typename Stream>
void Unserialize(Stream& s_) EXCLUSIVE_LOCKS_REQUIRED(!cs);
- size_t size() const EXCLUSIVE_LOCKS_REQUIRED(!cs);
+ size_t Size(std::optional<Network> net, std::optional<bool> in_new) const EXCLUSIVE_LOCKS_REQUIRED(!cs);
bool Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, std::chrono::seconds time_penalty)
EXCLUSIVE_LOCKS_REQUIRED(!cs);
@@ -215,6 +215,14 @@ private:
/** Reference to the netgroup manager. netgroupman must be constructed before addrman and destructed after. */
const NetGroupManager& m_netgroupman;
+ struct NewTriedCount {
+ size_t n_new;
+ size_t n_tried;
+ };
+
+ /** Number of entries in addrman per network and new/tried table. */
+ std::unordered_map<Network, NewTriedCount> m_network_counts GUARDED_BY(cs);
+
//! Find an entry.
AddrInfo* Find(const CService& addr, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -257,6 +265,8 @@ private:
std::optional<AddressPosition> FindAddressEntry_(const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ size_t Size_(std::optional<Network> net, std::optional<bool> in_new) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
//! Consistency check, taking into account m_consistency_check_ratio.
//! Will std::abort if an inconsistency is detected.
void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs);
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index a6065dd9bd..c710fe9471 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -58,7 +58,7 @@ public:
explicit base_uint(const std::string& str);
- const base_uint operator~() const
+ base_uint operator~() const
{
base_uint ret;
for (int i = 0; i < WIDTH; i++)
@@ -66,7 +66,7 @@ public:
return ret;
}
- const base_uint operator-() const
+ base_uint operator-() const
{
base_uint ret;
for (int i = 0; i < WIDTH; i++)
@@ -171,7 +171,7 @@ public:
return *this;
}
- const base_uint operator++(int)
+ base_uint operator++(int)
{
// postfix operator
const base_uint ret = *this;
@@ -188,7 +188,7 @@ public:
return *this;
}
- const base_uint operator--(int)
+ base_uint operator--(int)
{
// postfix operator
const base_uint ret = *this;
@@ -199,16 +199,16 @@ public:
int CompareTo(const base_uint& b) const;
bool EqualTo(uint64_t b) const;
- friend inline const base_uint operator+(const base_uint& a, const base_uint& b) { return base_uint(a) += b; }
- friend inline const base_uint operator-(const base_uint& a, const base_uint& b) { return base_uint(a) -= b; }
- friend inline const base_uint operator*(const base_uint& a, const base_uint& b) { return base_uint(a) *= b; }
- friend inline const base_uint operator/(const base_uint& a, const base_uint& b) { return base_uint(a) /= b; }
- friend inline const base_uint operator|(const base_uint& a, const base_uint& b) { return base_uint(a) |= b; }
- friend inline const base_uint operator&(const base_uint& a, const base_uint& b) { return base_uint(a) &= b; }
- friend inline const base_uint operator^(const base_uint& a, const base_uint& b) { return base_uint(a) ^= b; }
- friend inline const base_uint operator>>(const base_uint& a, int shift) { return base_uint(a) >>= shift; }
- friend inline const base_uint operator<<(const base_uint& a, int shift) { return base_uint(a) <<= shift; }
- friend inline const base_uint operator*(const base_uint& a, uint32_t b) { return base_uint(a) *= b; }
+ friend inline base_uint operator+(const base_uint& a, const base_uint& b) { return base_uint(a) += b; }
+ friend inline base_uint operator-(const base_uint& a, const base_uint& b) { return base_uint(a) -= b; }
+ friend inline base_uint operator*(const base_uint& a, const base_uint& b) { return base_uint(a) *= b; }
+ friend inline base_uint operator/(const base_uint& a, const base_uint& b) { return base_uint(a) /= b; }
+ friend inline base_uint operator|(const base_uint& a, const base_uint& b) { return base_uint(a) |= b; }
+ friend inline base_uint operator&(const base_uint& a, const base_uint& b) { return base_uint(a) &= b; }
+ friend inline base_uint operator^(const base_uint& a, const base_uint& b) { return base_uint(a) ^= b; }
+ friend inline base_uint operator>>(const base_uint& a, int shift) { return base_uint(a) >>= shift; }
+ friend inline base_uint operator<<(const base_uint& a, int shift) { return base_uint(a) <<= shift; }
+ friend inline base_uint operator*(const base_uint& a, uint32_t b) { return base_uint(a) *= b; }
friend inline bool operator==(const base_uint& a, const base_uint& b) { return memcmp(a.pn, b.pn, sizeof(a.pn)) == 0; }
friend inline bool operator!=(const base_uint& a, const base_uint& b) { return memcmp(a.pn, b.pn, sizeof(a.pn)) != 0; }
friend inline bool operator>(const base_uint& a, const base_uint& b) { return a.CompareTo(b) > 0; }
diff --git a/src/bench/chacha20.cpp b/src/bench/chacha20.cpp
index 656fb833e7..115cd064bd 100644
--- a/src/bench/chacha20.cpp
+++ b/src/bench/chacha20.cpp
@@ -14,9 +14,9 @@ static const uint64_t BUFFER_SIZE_LARGE = 1024*1024;
static void CHACHA20(benchmark::Bench& bench, size_t buffersize)
{
std::vector<uint8_t> key(32,0);
- ChaCha20 ctx(key.data(), key.size());
+ ChaCha20 ctx(key.data());
ctx.SetIV(0);
- ctx.Seek(0);
+ ctx.Seek64(0);
std::vector<uint8_t> in(buffersize,0);
std::vector<uint8_t> out(buffersize,0);
bench.batch(in.size()).unit("byte").run([&] {
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index 087e1442fe..11b0e0dee2 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -45,7 +45,7 @@ static void CoinSelection(benchmark::Bench& bench)
{
NodeContext node;
auto chain = interfaces::MakeChain(node);
- CWallet wallet(chain.get(), "", gArgs, CreateDummyWalletDatabase());
+ CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
std::vector<std::unique_ptr<CWalletTx>> wtxs;
LOCK(wallet.cs_wallet);
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index bd524e7458..cf8d807d7b 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -18,7 +18,7 @@
/* Number of bytes to hash per iteration */
static const uint64_t BUFFER_SIZE = 1000*1000;
-static void RIPEMD160(benchmark::Bench& bench)
+static void BenchRIPEMD160(benchmark::Bench& bench)
{
uint8_t hash[CRIPEMD160::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
@@ -150,7 +150,7 @@ static void MuHashPrecompute(benchmark::Bench& bench)
});
}
-BENCHMARK(RIPEMD160, benchmark::PriorityLevel::HIGH);
+BENCHMARK(BenchRIPEMD160, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA1, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA256, benchmark::PriorityLevel::HIGH);
BENCHMARK(SHA512, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/gcs_filter.cpp b/src/bench/gcs_filter.cpp
index 51fbe15760..0af4ee98fe 100644
--- a/src/bench/gcs_filter.cpp
+++ b/src/bench/gcs_filter.cpp
@@ -5,7 +5,7 @@
#include <bench/bench.h>
#include <blockfilter.h>
-static const GCSFilter::ElementSet GenerateGCSTestElements()
+static GCSFilter::ElementSet GenerateGCSTestElements()
{
GCSFilter::ElementSet elements;
diff --git a/src/bench/load_external.cpp b/src/bench/load_external.cpp
index be01b2a483..0fd842c7c3 100644
--- a/src/bench/load_external.cpp
+++ b/src/bench/load_external.cpp
@@ -27,7 +27,7 @@ static void LoadExternalBlockFile(benchmark::Bench& bench)
// Create a single block as in the blocks files (magic bytes, block size,
// block data) as a stream object.
const fs::path blkfile{testing_setup.get()->m_path_root / "blk.dat"};
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
auto params{testing_setup->m_node.chainman->GetParams()};
ss << params.MessageStart();
ss << static_cast<uint32_t>(benchmark::data::block413567.size());
diff --git a/src/bench/nanobench.h b/src/bench/nanobench.h
index 70e02083c9..8b3dc6c71c 100644
--- a/src/bench/nanobench.h
+++ b/src/bench/nanobench.h
@@ -7,7 +7,7 @@
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
// SPDX-License-Identifier: MIT
-// Copyright (c) 2019-2021 Martin Ankerl <martin.ankerl@gmail.com>
+// Copyright (c) 2019-2023 Martin Leitner-Ankerl <martin.ankerl@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -31,19 +31,20 @@
#define ANKERL_NANOBENCH_H_INCLUDED
// see https://semver.org/
-#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
-#define ANKERL_NANOBENCH_VERSION_MINOR 3 // backwards-compatible changes
-#define ANKERL_NANOBENCH_VERSION_PATCH 6 // backwards-compatible bug fixes
+#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
+#define ANKERL_NANOBENCH_VERSION_MINOR 3 // backwards-compatible changes
+#define ANKERL_NANOBENCH_VERSION_PATCH 10 // backwards-compatible bug fixes
///////////////////////////////////////////////////////////////////////////////////////////////////
// public facing api - as minimal as possible
///////////////////////////////////////////////////////////////////////////////////////////////////
-#include <chrono> // high_resolution_clock
-#include <cstring> // memcpy
-#include <iosfwd> // for std::ostream* custom output target in Config
-#include <string> // all names
-#include <vector> // holds all results
+#include <chrono> // high_resolution_clock
+#include <cstring> // memcpy
+#include <iosfwd> // for std::ostream* custom output target in Config
+#include <string> // all names
+#include <unordered_map> // holds context information of results
+#include <vector> // holds all results
#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
@@ -91,7 +92,7 @@
#define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
# include <linux/version.h>
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
// PERF_COUNT_HW_REF_CPU_CYCLES only available since kernel 3.3
// PERF_FLAG_FD_CLOEXEC since kernel 3.14
# undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS
@@ -144,43 +145,45 @@ class BigO;
* * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as
* a benchmark result is available. Within it, you can use these tags:
*
- * * `{{title}}` See Bench::title().
+ * * `{{title}}` See Bench::title.
*
- * * `{{name}}` Benchmark name, usually directly provided with Bench::run(), but can also be set with Bench::name().
+ * * `{{name}}` Benchmark name, usually directly provided with Bench::run, but can also be set with Bench::name.
*
- * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::title().
+ * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::unit.
*
- * * `{{batch}}` Batch size, see Bench::batch().
+ * * `{{batch}}` Batch size, see Bench::batch.
*
- * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN().
+ * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN.
*
- * * `{{epochs}}` Number of epochs, see Bench::epochs().
+ * * `{{epochs}}` Number of epochs, see Bench::epochs.
*
* * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock.
* For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first
* benchmark that is run, and used as a static variable throughout the application's runtime.
*
- * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple().
+ * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple.
* This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster
* will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast.
*
* * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least
- * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime().
+ * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime.
*
- * * `{{minEpochTime}}` Minimum epoch time, usually not set. See Bench::minEpochTime().
+ * * `{{minEpochTime}}` Minimum epoch time, defaults to 1ms. See Bench::minEpochTime.
*
- * * `{{minEpochIterations}}` See Bench::minEpochIterations().
+ * * `{{minEpochIterations}}` See Bench::minEpochIterations.
*
- * * `{{epochIterations}}` See Bench::epochIterations().
+ * * `{{epochIterations}}` See Bench::epochIterations.
*
- * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup().
+ * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup.
*
- * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative().
+ * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative.
+ *
+ * * `{{context(variableName)}}` See Bench::context.
*
* Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations
* are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters
* are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`,
- * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measuers (except `iterations`) are
+ * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measures (except `iterations`) are
* provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available:
*
* * `{{median(<name>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`.
@@ -201,7 +204,7 @@ class BigO;
* This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the
* measurements deviate less than 5% from the median, and the other deviate more than 5% from the median.
*
- * * `{{sum(<name>)}}` Sums of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
+ * * `{{sum(<name>)}}` Sum of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
* measured in this benchmark.
*
* * `{{minimum(<name>)}}` Minimum of all measurements.
@@ -244,21 +247,21 @@ class BigO;
* For the layer tags *result* and *measurement* you additionally can use these special markers:
*
* * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only
- * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between
* ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``.
*
* * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This,
- * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer.
* So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
*
* * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``.
*
* * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only
- * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between
* ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``.
*
* * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This,
- * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer.
* So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
*
* * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``.
@@ -316,12 +319,12 @@ char const* csv() noexcept;
See the tutorial at :ref:`tutorial-template-html` for an example.
@endverbatim
- @see ankerl::nanobench::render()
+ @see also ankerl::nanobench::render()
*/
char const* htmlBoxplot() noexcept;
/*!
- @brief Output in pyperf compatible JSON format, which can be used for more analyzations.
+ @brief Output in pyperf compatible JSON format, which can be used for more analyzation.
@verbatim embed:rst
See the tutorial at :ref:`tutorial-template-pyperf` for an example how to further analyze the output.
@endverbatim
@@ -378,30 +381,32 @@ struct PerfCountSet {
ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
struct Config {
// actual benchmark config
- std::string mBenchmarkTitle = "benchmark";
- std::string mBenchmarkName = "noname";
- std::string mUnit = "op";
- double mBatch = 1.0;
- double mComplexityN = -1.0;
- size_t mNumEpochs = 11;
- size_t mClockResolutionMultiple = static_cast<size_t>(1000);
- std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
- std::chrono::nanoseconds mMinEpochTime{};
- uint64_t mMinEpochIterations{1};
- uint64_t mEpochIterations{0}; // If not 0, run *exactly* these number of iterations per epoch.
- uint64_t mWarmup = 0;
- std::ostream* mOut = nullptr;
- std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1};
- std::string mTimeUnitName = "ns";
- bool mShowPerformanceCounters = true;
- bool mIsRelative = false;
+ std::string mBenchmarkTitle = "benchmark"; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mBenchmarkName = "noname"; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mUnit = "op"; // NOLINT(misc-non-private-member-variables-in-classes)
+ double mBatch = 1.0; // NOLINT(misc-non-private-member-variables-in-classes)
+ double mComplexityN = -1.0; // NOLINT(misc-non-private-member-variables-in-classes)
+ size_t mNumEpochs = 11; // NOLINT(misc-non-private-member-variables-in-classes)
+ size_t mClockResolutionMultiple = static_cast<size_t>(1000); // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100); // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1); // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mMinEpochIterations{1}; // NOLINT(misc-non-private-member-variables-in-classes)
+ // If not 0, run *exactly* these number of iterations per epoch.
+ uint64_t mEpochIterations{0}; // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mWarmup = 0; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::ostream* mOut = nullptr; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1}; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mTimeUnitName = "ns"; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool mShowPerformanceCounters = true; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool mIsRelative = false; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::unordered_map<std::string, std::string> mContext{}; // NOLINT(misc-non-private-member-variables-in-classes)
Config();
~Config();
- Config& operator=(Config const&);
- Config& operator=(Config&&);
- Config(Config const&);
- Config(Config&&) noexcept;
+ Config& operator=(Config const& other);
+ Config& operator=(Config&& other) noexcept;
+ Config(Config const& other);
+ Config(Config&& other) noexcept;
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
@@ -421,13 +426,13 @@ public:
_size
};
- explicit Result(Config const& benchmarkConfig);
+ explicit Result(Config benchmarkConfig);
~Result();
- Result& operator=(Result const&);
- Result& operator=(Result&&);
- Result(Result const&);
- Result(Result&&) noexcept;
+ Result& operator=(Result const& other);
+ Result& operator=(Result&& other) noexcept;
+ Result(Result const& other);
+ Result(Result&& other) noexcept;
// adds new measurement results
// all values are scaled by iters (except iters...)
@@ -442,6 +447,8 @@ public:
ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::string const& context(char const* variableName) const;
+ ANKERL_NANOBENCH(NODISCARD) std::string const& context(std::string const& variableName) const;
ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const;
@@ -485,9 +492,9 @@ public:
static constexpr uint64_t(max)();
/**
- * As a safety precausion, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
+ * As a safety precaution, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
* same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is
- * automatically seeded from `std::random_device`. If you really need a copy, use copy().
+ * automatically seeded from `std::random_device`. If you really need a copy, use `copy()`.
*/
Rng(Rng const&) = delete;
@@ -528,7 +535,7 @@ public:
*/
explicit Rng(uint64_t seed) noexcept;
Rng(uint64_t x, uint64_t y) noexcept;
- Rng(std::vector<uint64_t> const& data);
+ explicit Rng(std::vector<uint64_t> const& data);
/**
* Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original.
@@ -620,8 +627,8 @@ public:
*/
Bench();
- Bench(Bench&& other);
- Bench& operator=(Bench&& other);
+ Bench(Bench&& other) noexcept;
+ Bench& operator=(Bench&& other) noexcept;
Bench(Bench const& other);
Bench& operator=(Bench const& other);
~Bench() noexcept;
@@ -667,6 +674,10 @@ public:
*/
Bench& title(char const* benchmarkTitle);
Bench& title(std::string const& benchmarkTitle);
+
+ /**
+ * @brief Gets the title of the benchmark
+ */
ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept;
/// Name of the benchmark, will be shown in the table row.
@@ -675,6 +686,31 @@ public:
ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
/**
+ * @brief Set context information.
+ *
+ * The information can be accessed using custom render templates via `{{context(variableName)}}`.
+ * Trying to render a variable that hasn't been set before raises an exception.
+ * Not included in (default) markdown table.
+ *
+ * @see clearContext, render
+ *
+ * @param variableName The name of the context variable.
+ * @param variableValue The value of the context variable.
+ */
+ Bench& context(char const* variableName, char const* variableValue);
+ Bench& context(std::string const& variableName, std::string const& variableValue);
+
+ /**
+ * @brief Reset context information.
+ *
+ * This may improve efficiency when using many context entries,
+ * or improve robustness by removing spurious context entries.
+ *
+ * @see context
+ */
+ Bench& clearContext();
+
+ /**
* @brief Sets the batch size.
*
* E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark
@@ -754,9 +790,9 @@ public:
* representation of the benchmarked code's runtime stability.
*
* Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy.
- * This setting goes hand in hand with minEpocIterations() (or minEpochTime()). If you are more interested in *median* runtime, you
- * might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase minEpochIterations()
- * instead.
+ * This setting goes hand in hand with minEpochIterations() (or minEpochTime()). If you are more interested in *median* runtime,
+ * you might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase
+ * minEpochIterations() instead.
*
* @param numEpochs Number of epochs.
*/
@@ -766,10 +802,10 @@ public:
/**
* @brief Upper limit for the runtime of each epoch.
*
- * As a safety precausion if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
+ * As a safety precaution if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
* epoch. Default is 100ms. At least a single evaluation of the benchmark is performed.
*
- * @see minEpochTime(), minEpochIterations()
+ * @see minEpochTime, minEpochIterations
*
* @param t Maximum target runtime for a single epoch.
*/
@@ -782,7 +818,7 @@ public:
* Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see
* that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations().
*
- * @see maxEpochTime(), minEpochIterations()
+ * @see maxEpochTim), minEpochIterations
*
* @param t Minimum time each epoch should take.
*/
@@ -793,9 +829,9 @@ public:
* @brief Sets the minimum number of iterations each epoch should take.
*
* Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want
- * to increase the minimum number or iterations, or increase the minEpochTime().
+ * to increase the minimum number of iterations, or increase the minEpochTime().
*
- * @see minEpochTime(), maxEpochTime(), minEpochIterations()
+ * @see minEpochTime, maxEpochTime, minEpochIterations
*
* @param numIters Minimum number of iterations per epoch.
*/
@@ -886,10 +922,10 @@ public:
@endverbatim
@tparam T Any type is cast to `double`.
- @param b Length of N for the next benchmark run, so it is possible to calculate `bigO`.
+ @param n Length of N for the next benchmark run, so it is possible to calculate `bigO`.
*/
template <typename T>
- Bench& complexityN(T b) noexcept;
+ Bench& complexityN(T n) noexcept;
ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept;
/*!
@@ -993,7 +1029,7 @@ void doNotOptimizeAway(T const& val);
#else
// These assembly magic is directly from what Google Benchmark is doing. I have previously used what facebook's folly was doing, but
-// this seemd to have compilation problems in some cases. Google Benchmark seemed to be the most well tested anyways.
+// this seemed to have compilation problems in some cases. Google Benchmark seemed to be the most well tested anyways.
// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
template <typename T>
void doNotOptimizeAway(T const& val) {
@@ -1019,7 +1055,11 @@ void doNotOptimizeAway(T& val) {
ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH)
class IterationLogic {
public:
- explicit IterationLogic(Bench const& config) noexcept;
+ explicit IterationLogic(Bench const& bench);
+ IterationLogic(IterationLogic&&) = delete;
+ IterationLogic& operator=(IterationLogic&&) = delete;
+ IterationLogic(IterationLogic const&) = delete;
+ IterationLogic& operator=(IterationLogic const&) = delete;
~IterationLogic();
ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept;
@@ -1036,7 +1076,9 @@ ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
class PerformanceCounters {
public:
PerformanceCounters(PerformanceCounters const&) = delete;
+ PerformanceCounters(PerformanceCounters&&) = delete;
PerformanceCounters& operator=(PerformanceCounters const&) = delete;
+ PerformanceCounters& operator=(PerformanceCounters&&) = delete;
PerformanceCounters();
~PerformanceCounters();
@@ -1081,11 +1123,11 @@ public:
: BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
template <typename Op>
- BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
- : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+ BigO(std::string bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {}
BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure);
- BigO(std::string const& bigOName, RangeMeasure const& scaledRangeMeasure);
+ BigO(std::string bigOName, RangeMeasure const& scaledRangeMeasure);
ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept;
ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept;
@@ -1127,7 +1169,7 @@ uint64_t Rng::operator()() noexcept {
ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined")
uint32_t Rng::bounded(uint32_t range) noexcept {
- uint64_t r32 = static_cast<uint32_t>(operator()());
+ uint64_t const r32 = static_cast<uint32_t>(operator()());
auto multiresult = r32 * range;
return static_cast<uint32_t>(multiresult >> 32U);
}
@@ -1136,18 +1178,23 @@ double Rng::uniform01() noexcept {
auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U);
// can't use union in c++ here for type puning, it's undefined behavior.
// std::memcpy is optimized anyways.
- double d;
+ double d{};
std::memcpy(&d, &i, sizeof(double));
return d - 1.0;
}
template <typename Container>
void Rng::shuffle(Container& container) noexcept {
- auto size = static_cast<uint32_t>(container.size());
- for (auto i = size; i > 1U; --i) {
+ auto i = container.size();
+ while (i > 1U) {
using std::swap;
- auto p = bounded(i); // number in [0, i)
- swap(container[i - 1], container[p]);
+ auto n = operator()();
+ // using decltype(i) instead of size_t to be compatible to containers with 32bit index (see #80)
+ auto b1 = static_cast<decltype(i)>((static_cast<uint32_t>(n) * static_cast<uint64_t>(i)) >> 32U);
+ swap(container[--i], container[b1]);
+
+ auto b2 = static_cast<decltype(i)>(((n >> 32U) * static_cast<uint64_t>(i)) >> 32U);
+ swap(container[--i], container[b2]);
}
}
@@ -1165,11 +1212,11 @@ Bench& Bench::run(Op&& op) {
while (auto n = iterationLogic.numIters()) {
pc.beginMeasure();
- Clock::time_point before = Clock::now();
+ Clock::time_point const before = Clock::now();
while (n-- > 0) {
op();
}
- Clock::time_point after = Clock::now();
+ Clock::time_point const after = Clock::now();
pc.endMeasure();
pc.updateResults(iterationLogic.numIters());
iterationLogic.add(after - before, pc);
@@ -1270,7 +1317,6 @@ void doNotOptimizeAway(T const& val) {
# include <linux/perf_event.h>
# include <sys/ioctl.h>
# include <sys/syscall.h>
-# include <unistd.h>
# endif
// declarations ///////////////////////////////////////////////////////////////////////////////////
@@ -1436,31 +1482,37 @@ struct Node {
template <size_t N>
// NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
bool operator==(char const (&str)[N]) const noexcept {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
}
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+// NOLINTNEXTLINE(misc-no-recursion)
static std::vector<Node> parseMustacheTemplate(char const** tpl) {
std::vector<Node> nodes;
while (true) {
- auto begin = std::strstr(*tpl, "{{");
- auto end = begin;
+ auto const* begin = std::strstr(*tpl, "{{");
+ auto const* end = begin;
if (begin != nullptr) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
begin += 2;
end = std::strstr(begin, "}}");
}
if (begin == nullptr || end == nullptr) {
// nothing found, finish node
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
return nodes;
}
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
// we found a tag
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
*tpl = end + 2;
switch (*begin) {
case '/':
@@ -1468,10 +1520,12 @@ static std::vector<Node> parseMustacheTemplate(char const** tpl) {
return nodes;
case '#':
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
break;
case '^':
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
break;
@@ -1484,8 +1538,8 @@ static std::vector<Node> parseMustacheTemplate(char const** tpl) {
static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) {
ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
- bool matchFirst = n == "-first";
- bool matchLast = n == "-last";
+ bool const matchFirst = n == "-first";
+ bool const matchLast = n == "-last";
if (!matchFirst && !matchLast) {
return false;
}
@@ -1518,7 +1572,7 @@ static bool matchCmdArgs(std::string const& str, std::vector<std::string>& match
matchResult.emplace_back(str.substr(0, idxOpen));
// split by comma
- matchResult.emplace_back(std::string{});
+ matchResult.emplace_back();
for (size_t i = idxOpen + 1; i != idxClose; ++i) {
if (str[i] == ' ' || str[i] == '\t') {
// skip whitespace
@@ -1526,7 +1580,7 @@ static bool matchCmdArgs(std::string const& str, std::vector<std::string>& match
}
if (str[i] == ',') {
// got a comma => new string
- matchResult.emplace_back(std::string{});
+ matchResult.emplace_back();
continue;
}
// no whitespace no comma, append
@@ -1541,49 +1595,63 @@ static bool generateConfigTag(Node const& n, Config const& config, std::ostream&
if (n == "title") {
out << config.mBenchmarkTitle;
return true;
- } else if (n == "name") {
+ }
+ if (n == "name") {
out << config.mBenchmarkName;
return true;
- } else if (n == "unit") {
+ }
+ if (n == "unit") {
out << config.mUnit;
return true;
- } else if (n == "batch") {
+ }
+ if (n == "batch") {
out << config.mBatch;
return true;
- } else if (n == "complexityN") {
+ }
+ if (n == "complexityN") {
out << config.mComplexityN;
return true;
- } else if (n == "epochs") {
+ }
+ if (n == "epochs") {
out << config.mNumEpochs;
return true;
- } else if (n == "clockResolution") {
+ }
+ if (n == "clockResolution") {
out << d(detail::clockResolution());
return true;
- } else if (n == "clockResolutionMultiple") {
+ }
+ if (n == "clockResolutionMultiple") {
out << config.mClockResolutionMultiple;
return true;
- } else if (n == "maxEpochTime") {
+ }
+ if (n == "maxEpochTime") {
out << d(config.mMaxEpochTime);
return true;
- } else if (n == "minEpochTime") {
+ }
+ if (n == "minEpochTime") {
out << d(config.mMinEpochTime);
return true;
- } else if (n == "minEpochIterations") {
+ }
+ if (n == "minEpochIterations") {
out << config.mMinEpochIterations;
return true;
- } else if (n == "epochIterations") {
+ }
+ if (n == "epochIterations") {
out << config.mEpochIterations;
return true;
- } else if (n == "warmup") {
+ }
+ if (n == "warmup") {
out << config.mWarmup;
return true;
- } else if (n == "relative") {
+ }
+ if (n == "relative") {
out << config.mIsRelative;
return true;
}
return false;
}
+// NOLINTNEXTLINE(readability-function-cognitive-complexity)
static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) {
if (generateConfigTag(n, r.config(), out)) {
return out;
@@ -1596,6 +1664,10 @@ static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostr
std::vector<std::string> matchResult;
if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
if (matchResult.size() == 2) {
+ if (matchResult[0] == "context") {
+ return out << r.context(matchResult[1]);
+ }
+
auto m = Result::fromString(matchResult[1]);
if (m == Result::Measure::_size) {
return out << 0.0;
@@ -1712,7 +1784,7 @@ template <typename T>
T parseFile(std::string const& filename);
void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
-void printStabilityInformationOnce(std::ostream* os);
+void printStabilityInformationOnce(std::ostream* outStream);
// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
uint64_t& singletonHeaderHash() noexcept;
@@ -1779,13 +1851,13 @@ private:
};
// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent
-std::string to_s(uint64_t s);
+std::string to_s(uint64_t n);
std::ostream& operator<<(std::ostream& os, Number const& n);
class MarkDownColumn {
public:
- MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val);
+ MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val);
std::string title() const;
std::string separator() const;
std::string invalid() const;
@@ -1823,8 +1895,9 @@ std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
namespace ankerl {
namespace nanobench {
+// NOLINTNEXTLINE(readability-function-cognitive-complexity)
void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) {
- detail::fmt::StreamStateRestorer restorer(out);
+ detail::fmt::StreamStateRestorer const restorer(out);
out.precision(std::numeric_limits<double>::digits10);
auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
@@ -1905,7 +1978,7 @@ PerformanceCounters& performanceCounters() {
// Windows version of doNotOptimizeAway
// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
// see https://github.com/facebook/folly/blob/master/folly/Benchmark.h#L280
-// see https://learn.microsoft.com/en-us/cpp/preprocessor/optimize
+// see https://docs.microsoft.com/en-us/cpp/preprocessor/optimize
# if defined(_MSC_VER)
# pragma optimize("", off)
void doNotOptimizeAwaySink(void const*) {}
@@ -1914,7 +1987,7 @@ void doNotOptimizeAwaySink(void const*) {}
template <typename T>
T parseFile(std::string const& filename) {
- std::ifstream fin(filename);
+ std::ifstream fin(filename); // NOLINT(misc-const-correctness)
T num{};
fin >> num;
return num;
@@ -1925,20 +1998,20 @@ char const* getEnv(char const* name) {
# pragma warning(push)
# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe.
# endif
- return std::getenv(name);
+ return std::getenv(name); // NOLINT(concurrency-mt-unsafe)
# if defined(_MSC_VER)
# pragma warning(pop)
# endif
}
bool isEndlessRunning(std::string const& name) {
- auto endless = getEnv("NANOBENCH_ENDLESS");
+ auto const* const endless = getEnv("NANOBENCH_ENDLESS");
return nullptr != endless && endless == name;
}
// True when environment variable NANOBENCH_SUPPRESS_WARNINGS is either not set at all, or set to "0"
bool isWarningsEnabled() {
- auto suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS");
+ auto const* const suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS");
return nullptr == suppression || suppression == std::string("0");
}
@@ -1946,11 +2019,11 @@ void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<
warnings.clear();
recommendations.clear();
- bool recommendCheckFlags = false;
-
# if defined(DEBUG)
warnings.emplace_back("DEBUG defined");
- recommendCheckFlags = true;
+ bool const recommendCheckFlags = true;
+# else
+ bool const recommendCheckFlags = false;
# endif
bool recommendPyPerf = false;
@@ -2000,7 +2073,7 @@ void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<
void printStabilityInformationOnce(std::ostream* outStream) {
static bool shouldPrint = true;
- if (shouldPrint && outStream && isWarningsEnabled()) {
+ if (shouldPrint && (nullptr != outStream) && isWarningsEnabled()) {
auto& os = *outStream;
shouldPrint = false;
std::vector<std::string> warnings;
@@ -2050,7 +2123,7 @@ Clock::duration calcClockResolution(size_t numEvaluations) noexcept {
// Calculates clock resolution once, and remembers the result
Clock::duration clockResolution() noexcept {
- static Clock::duration sResolution = calcClockResolution(20);
+ static Clock::duration const sResolution = calcClockResolution(20);
return sResolution;
}
@@ -2183,6 +2256,7 @@ struct IterationLogic::Impl {
<< ", mState=" << static_cast<int>(mState));
}
+ // NOLINTNEXTLINE(readability-function-cognitive-complexity)
void showResult(std::string const& errorMessage) const {
ANKERL_NANOBENCH_LOG(errorMessage);
@@ -2208,7 +2282,7 @@ struct IterationLogic::Impl {
rMedian / (mBench.timeUnit().count() * mBench.batch()));
columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
- double rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
+ double const rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0);
double rInsMedian = -1.0;
@@ -2226,7 +2300,7 @@ struct IterationLogic::Impl {
columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
}
if (mBench.performanceCounters() && mResult.has(Result::Measure::branchinstructions)) {
- double rBraMedian = mResult.median(Result::Measure::branchinstructions);
+ double const rBraMedian = mResult.median(Result::Measure::branchinstructions);
columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch());
if (mResult.has(Result::Measure::branchmisses)) {
double p = 0.0;
@@ -2299,25 +2373,22 @@ struct IterationLogic::Impl {
return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
}
- uint64_t mNumIters = 1;
- Bench const& mBench;
- std::chrono::nanoseconds mTargetRuntimePerEpoch{};
- Result mResult;
- Rng mRng{123};
- std::chrono::nanoseconds mTotalElapsed{};
- uint64_t mTotalNumIters = 0;
-
- State mState = State::upscaling_runtime;
+ uint64_t mNumIters = 1; // NOLINT(misc-non-private-member-variables-in-classes)
+ Bench const& mBench; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mTargetRuntimePerEpoch{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ Result mResult; // NOLINT(misc-non-private-member-variables-in-classes)
+ Rng mRng{123}; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mTotalElapsed{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mTotalNumIters = 0; // NOLINT(misc-non-private-member-variables-in-classes)
+ State mState = State::upscaling_runtime; // NOLINT(misc-non-private-member-variables-in-classes)
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
-IterationLogic::IterationLogic(Bench const& bench) noexcept
+IterationLogic::IterationLogic(Bench const& bench)
: mPimpl(new Impl(bench)) {}
IterationLogic::~IterationLogic() {
- if (mPimpl) {
- delete mPimpl;
- }
+ delete mPimpl;
}
uint64_t IterationLogic::numIters() const noexcept {
@@ -2344,11 +2415,16 @@ public:
, correctMeasuringOverhead(correctMeasuringOverhead_)
, correctLoopOverhead(correctLoopOverhead_) {}
- uint64_t* targetValue{};
- bool correctMeasuringOverhead{};
- bool correctLoopOverhead{};
+ uint64_t* targetValue{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool correctMeasuringOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool correctLoopOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes)
};
+ LinuxPerformanceCounters() = default;
+ LinuxPerformanceCounters(LinuxPerformanceCounters const&) = delete;
+ LinuxPerformanceCounters(LinuxPerformanceCounters&&) = delete;
+ LinuxPerformanceCounters& operator=(LinuxPerformanceCounters const&) = delete;
+ LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) = delete;
~LinuxPerformanceCounters();
// quick operation
@@ -2370,13 +2446,13 @@ public:
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
if (mHasError) {
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
}
@@ -2385,7 +2461,7 @@ public:
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
if (mHasError) {
return;
@@ -2406,9 +2482,9 @@ public:
ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined")
static inline uint32_t mix(uint32_t x) noexcept {
- x ^= x << 13;
- x ^= x >> 17;
- x ^= x << 5;
+ x ^= x << 13U;
+ x ^= x >> 17U;
+ x ^= x << 5U;
return x;
}
@@ -2448,7 +2524,7 @@ public:
// marsaglia's xorshift: mov, sal/shr, xor. Times 3.
// This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further.
// see https://godbolt.org/z/49RVQ5
- uint64_t const numIters = 100000U + (std::random_device{}() & 3);
+ uint64_t const numIters = 100000U + (std::random_device{}() & 3U);
uint64_t n = numIters;
uint32_t x = 1234567;
@@ -2582,6 +2658,7 @@ bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target t
const unsigned long flags = 0;
# endif
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags));
if (-1 == fd) {
return false;
@@ -2591,7 +2668,7 @@ bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target t
mFd = fd;
}
uint64_t id = 0;
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) {
// couldn't get id
return false;
@@ -2639,9 +2716,8 @@ PerformanceCounters::PerformanceCounters()
}
PerformanceCounters::~PerformanceCounters() {
- if (nullptr != mPc) {
- delete mPc;
- }
+ // no need to check for nullptr, delete nullptr has no effect
+ delete mPc;
}
void PerformanceCounters::beginMeasure() {
@@ -2721,7 +2797,7 @@ Number::Number(int width, int precision, double value)
, mValue(value) {}
std::ostream& Number::write(std::ostream& os) const {
- StreamStateRestorer restorer(os);
+ StreamStateRestorer const restorer(os);
os.imbue(std::locale(os.getloc(), new NumSep(',')));
os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
return os;
@@ -2747,11 +2823,11 @@ std::ostream& operator<<(std::ostream& os, Number const& n) {
return n.write(os);
}
-MarkDownColumn::MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val)
+MarkDownColumn::MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val)
: mWidth(w)
, mPrecision(prec)
- , mTitle(tit)
- , mSuffix(suff)
+ , mTitle(std::move(tit))
+ , mSuffix(std::move(suff))
, mValue(val) {}
std::string MarkDownColumn::title() const {
@@ -2785,7 +2861,7 @@ std::string MarkDownColumn::value() const {
MarkDownCode::MarkDownCode(std::string const& what) {
mWhat.reserve(what.size() + 2);
mWhat.push_back('`');
- for (char c : what) {
+ for (char const c : what) {
mWhat.push_back(c);
if ('`' == c) {
mWhat.push_back('`');
@@ -2808,14 +2884,14 @@ std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) {
Config::Config() = default;
Config::~Config() = default;
Config& Config::operator=(Config const&) = default;
-Config& Config::operator=(Config&&) = default;
+Config& Config::operator=(Config&&) noexcept = default;
Config::Config(Config const&) = default;
Config::Config(Config&&) noexcept = default;
// provide implementation here so it's only generated once
Result::~Result() = default;
Result& Result::operator=(Result const&) = default;
-Result& Result::operator=(Result&&) = default;
+Result& Result::operator=(Result&&) noexcept = default;
Result::Result(Result const&) = default;
Result::Result(Result&&) noexcept = default;
@@ -2827,15 +2903,15 @@ inline constexpr typename std::underlying_type<T>::type u(T val) noexcept {
} // namespace detail
// Result returned after a benchmark has finished. Can be used as a baseline for relative().
-Result::Result(Config const& benchmarkConfig)
- : mConfig(benchmarkConfig)
+Result::Result(Config benchmarkConfig)
+ : mConfig(std::move(benchmarkConfig))
, mNameToMeasurements{detail::u(Result::Measure::_size)} {}
void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) {
using detail::d;
using detail::u;
- double dIters = d(iters);
+ double const dIters = d(iters);
mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
@@ -2987,27 +3063,41 @@ double Result::maximum(Measure m) const noexcept {
return *std::max_element(data.begin(), data.end());
}
+std::string const& Result::context(char const* variableName) const {
+ return mConfig.mContext.at(variableName);
+}
+
+std::string const& Result::context(std::string const& variableName) const {
+ return mConfig.mContext.at(variableName);
+}
+
Result::Measure Result::fromString(std::string const& str) {
if (str == "elapsed") {
return Measure::elapsed;
- } else if (str == "iterations") {
+ }
+ if (str == "iterations") {
return Measure::iterations;
- } else if (str == "pagefaults") {
+ }
+ if (str == "pagefaults") {
return Measure::pagefaults;
- } else if (str == "cpucycles") {
+ }
+ if (str == "cpucycles") {
return Measure::cpucycles;
- } else if (str == "contextswitches") {
+ }
+ if (str == "contextswitches") {
return Measure::contextswitches;
- } else if (str == "instructions") {
+ }
+ if (str == "instructions") {
return Measure::instructions;
- } else if (str == "branchinstructions") {
+ }
+ if (str == "branchinstructions") {
return Measure::branchinstructions;
- } else if (str == "branchmisses") {
+ }
+ if (str == "branchmisses") {
return Measure::branchmisses;
- } else {
- // not found, return _size
- return Measure::_size;
}
+ // not found, return _size
+ return Measure::_size;
}
// Configuration of a microbenchmark.
@@ -3015,8 +3105,8 @@ Bench::Bench() {
mConfig.mOut = &std::cout;
}
-Bench::Bench(Bench&&) = default;
-Bench& Bench::operator=(Bench&&) = default;
+Bench::Bench(Bench&&) noexcept = default;
+Bench& Bench::operator=(Bench&&) noexcept = default;
Bench::Bench(Bench const&) = default;
Bench& Bench::operator=(Bench const&) = default;
Bench::~Bench() noexcept = default;
@@ -3114,6 +3204,21 @@ std::string const& Bench::name() const noexcept {
return mConfig.mBenchmarkName;
}
+Bench& Bench::context(char const* variableName, char const* variableValue) {
+ mConfig.mContext[variableName] = variableValue;
+ return *this;
+}
+
+Bench& Bench::context(std::string const& variableName, std::string const& variableValue) {
+ mConfig.mContext[variableName] = variableValue;
+ return *this;
+}
+
+Bench& Bench::clearContext() {
+ mConfig.mContext.clear();
+ return *this;
+}
+
// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch.
Bench& Bench::epochs(size_t numEpochs) noexcept {
mConfig.mNumEpochs = numEpochs;
@@ -3295,27 +3400,27 @@ BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results)
return rangeMeasure;
}
-BigO::BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure)
- : mName(bigOName) {
+BigO::BigO(std::string bigOName, RangeMeasure const& rangeMeasure)
+ : mName(std::move(bigOName)) {
// estimate the constant factor
double sumRangeMeasure = 0.0;
double sumRangeRange = 0.0;
- for (size_t i = 0; i < rangeMeasure.size(); ++i) {
- sumRangeMeasure += rangeMeasure[i].first * rangeMeasure[i].second;
- sumRangeRange += rangeMeasure[i].first * rangeMeasure[i].first;
+ for (const auto& rm : rangeMeasure) {
+ sumRangeMeasure += rm.first * rm.second;
+ sumRangeRange += rm.first * rm.first;
}
mConstant = sumRangeMeasure / sumRangeRange;
// calculate root mean square
double err = 0.0;
double sumMeasure = 0.0;
- for (size_t i = 0; i < rangeMeasure.size(); ++i) {
- auto diff = mConstant * rangeMeasure[i].first - rangeMeasure[i].second;
+ for (const auto& rm : rangeMeasure) {
+ auto diff = mConstant * rm.first - rm.second;
err += diff * diff;
- sumMeasure += rangeMeasure[i].second;
+ sumMeasure += rm.second;
}
auto n = static_cast<double>(rangeMeasure.size());
@@ -3347,7 +3452,7 @@ std::ostream& operator<<(std::ostream& os, BigO const& bigO) {
}
std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) {
- detail::fmt::StreamStateRestorer restorer(os);
+ detail::fmt::StreamStateRestorer const restorer(os);
os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl;
for (auto const& bigO : bigOs) {
os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " ";
diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp
index ef1ea1162b..59c4af086e 100644
--- a/src/bench/prevector.cpp
+++ b/src/bench/prevector.cpp
@@ -61,7 +61,7 @@ static void PrevectorResize(benchmark::Bench& bench)
template <typename T>
static void PrevectorDeserialize(benchmark::Bench& bench)
{
- CDataStream s0(SER_NETWORK, 0);
+ DataStream s0{};
prevector<28, T> t0;
t0.resize(28);
for (auto x = 0; x < 900; ++x) {
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index ea272b2120..d5d057e96d 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -28,7 +28,7 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b
const auto& ADDRESS_WATCHONLY = ADDRESS_BCRT1_UNSPENDABLE;
- CWallet wallet{test_setup->m_node.chain.get(), "", gArgs, CreateMockWalletDatabase()};
+ CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockWalletDatabase()};
{
LOCK(wallet.cs_wallet);
wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
diff --git a/src/bench/wallet_create_tx.cpp b/src/bench/wallet_create_tx.cpp
index 820c9d5d50..bd32a5abdc 100644
--- a/src/bench/wallet_create_tx.cpp
+++ b/src/bench/wallet_create_tx.cpp
@@ -83,7 +83,7 @@ static void WalletCreateTx(benchmark::Bench& bench, const OutputType output_type
{
const auto test_setup = MakeNoLogFileContext<const TestingSetup>();
- CWallet wallet{test_setup->m_node.chain.get(), "", gArgs, CreateMockWalletDatabase()};
+ CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockWalletDatabase()};
{
LOCK(wallet.cs_wallet);
wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -136,7 +136,7 @@ static void WalletCreateTx(benchmark::Bench& bench, const OutputType output_type
static void AvailableCoins(benchmark::Bench& bench, const std::vector<OutputType>& output_type)
{
const auto test_setup = MakeNoLogFileContext<const TestingSetup>();
- CWallet wallet{test_setup->m_node.chain.get(), "", gArgs, CreateMockWalletDatabase()};
+ CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockWalletDatabase()};
{
LOCK(wallet.cs_wallet);
wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
diff --git a/src/bench/wallet_loading.cpp b/src/bench/wallet_loading.cpp
index 2f7dc53b0c..6b09adcc9d 100644
--- a/src/bench/wallet_loading.cpp
+++ b/src/bench/wallet_loading.cpp
@@ -24,7 +24,7 @@ using wallet::WALLET_FLAG_DESCRIPTORS;
using wallet::WalletContext;
using wallet::WalletDatabase;
-static const std::shared_ptr<CWallet> BenchLoadWallet(std::unique_ptr<WalletDatabase> database, WalletContext& context, DatabaseOptions& options)
+static std::shared_ptr<CWallet> BenchLoadWallet(std::unique_ptr<WalletDatabase> database, WalletContext& context, DatabaseOptions& options)
{
bilingual_str error;
std::vector<bilingual_str> warnings;
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index d972b71a65..423fa79c6f 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -82,6 +82,7 @@ int main(int argc, char* argv[])
// SETUP: Chainstate
const ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
+ .datadir = gArgs.GetDataDirNet(),
.adjusted_time_callback = NodeClock::now,
};
ChainstateManager chainman{chainman_opts};
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index e6e33007d5..df8fb7cece 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -55,7 +55,10 @@ static constexpr int DEFAULT_WAIT_CLIENT_TIMEOUT = 0;
static const bool DEFAULT_NAMED=false;
static const int CONTINUE_EXECUTION=-1;
static constexpr int8_t UNKNOWN_NETWORK{-1};
-static constexpr std::array NETWORKS{"ipv4", "ipv6", "onion", "i2p", "cjdns"};
+// See GetNetworkName() in netbase.cpp
+static constexpr std::array NETWORKS{"not_publicly_routable", "ipv4", "ipv6", "onion", "i2p", "cjdns", "internal"};
+static constexpr std::array NETWORK_SHORT_NAMES{"npr", "ipv4", "ipv6", "onion", "i2p", "cjdns", "int"};
+static constexpr std::array UNREACHABLE_NETWORK_IDS{/*not_publicly_routable*/0, /*internal*/6};
/** Default number of blocks to generate for RPC generatetoaddress. */
static const std::string DEFAULT_NBLOCKS = "1";
@@ -289,7 +292,7 @@ public:
// Prepare result to return to user.
UniValue result{UniValue::VOBJ}, addresses{UniValue::VOBJ};
uint64_t total{0}; // Total address count
- for (size_t i = 0; i < NETWORKS.size(); ++i) {
+ for (size_t i = 1; i < NETWORKS.size() - 1; ++i) {
addresses.pushKV(NETWORKS[i], counts.at(i));
total += counts.at(i);
}
@@ -506,7 +509,7 @@ public:
const bool is_addr_relay_enabled{peer["addr_relay_enabled"].isNull() ? false : peer["addr_relay_enabled"].get_bool()};
const bool is_bip152_hb_from{peer["bip152_hb_from"].get_bool()};
const bool is_bip152_hb_to{peer["bip152_hb_to"].get_bool()};
- m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, addr_processed, addr_rate_limited, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_addr_relay_enabled, is_bip152_hb_from, is_bip152_hb_to, is_outbound, is_tx_relay});
+ m_peers.push_back({addr, sub_version, conn_type, NETWORK_SHORT_NAMES[network_id], age, min_ping, ping, addr_processed, addr_rate_limited, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_addr_relay_enabled, is_bip152_hb_from, is_bip152_hb_to, is_outbound, is_tx_relay});
m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length);
m_max_addr_processed_length = std::max(ToString(addr_processed).length(), m_max_addr_processed_length);
m_max_addr_rate_limited_length = std::max(ToString(addr_rate_limited).length(), m_max_addr_rate_limited_length);
@@ -571,6 +574,13 @@ public:
reachable_networks.push_back(network_id);
}
};
+
+ for (const size_t network_id : UNREACHABLE_NETWORK_IDS) {
+ if (m_counts.at(2).at(network_id) == 0) continue;
+ result += strprintf("%8s", NETWORK_SHORT_NAMES.at(network_id)); // column header
+ reachable_networks.push_back(network_id);
+ }
+
result += " total block";
if (m_manual_peers_count) result += " manual";
@@ -636,7 +646,7 @@ public:
" \"manual\" - peer we manually added using RPC addnode or the -addnode/-connect config options\n"
" \"feeler\" - short-lived connection for testing addresses\n"
" \"addr\" - address fetch; short-lived connection for requesting addresses\n"
- " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", or \"cjdns\")\n"
+ " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", \"cjdns\", or \"npr\" (not publicly routable))\n"
" mping Minimum observed ping time, in milliseconds (ms)\n"
" ping Last observed ping time, in milliseconds (ms)\n"
" send Time since last message sent to the peer, in seconds\n"
diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp
index 7327875b64..61d4b9c6f1 100644
--- a/src/bitcoin-util.cpp
+++ b/src/bitcoin-util.cpp
@@ -139,7 +139,7 @@ static int Grind(const std::vector<std::string>& args, std::string& strPrint)
return EXIT_FAILURE;
}
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss{};
ss << header;
strPrint = HexStr(ss);
return EXIT_SUCCESS;
diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
index bcb86d75cc..a29e4f794e 100644
--- a/src/blockencodings.cpp
+++ b/src/blockencodings.cpp
@@ -29,7 +29,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
}
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << header << nonce;
CSHA256 hasher;
hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
@@ -52,7 +52,8 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
return READ_STATUS_INVALID;
- assert(header.IsNull() && txn_available.empty());
+ if (!header.IsNull() || !txn_available.empty()) return READ_STATUS_INVALID;
+
header = cmpctblock.header;
txn_available.resize(cmpctblock.BlockTxCount());
@@ -167,14 +168,18 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
return READ_STATUS_OK;
}
-bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const {
- assert(!header.IsNull());
+bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
+{
+ if (header.IsNull()) return false;
+
assert(index < txn_available.size());
return txn_available[index] != nullptr;
}
-ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing) {
- assert(!header.IsNull());
+ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
+{
+ if (header.IsNull()) return READ_STATUS_INVALID;
+
uint256 hash = header.GetHash();
block = header;
block.vtx.resize(txn_available.size());
@@ -197,7 +202,8 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
return READ_STATUS_INVALID;
BlockValidationState state;
- if (!CheckBlock(block, state, Params().GetConsensus())) {
+ CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
+ if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
// TODO: We really want to just check merkle tree manually here,
// but that is expensive, and CheckBlock caches a block's
// "checked-status" (in the CBlock?). CBlock should be able to
diff --git a/src/blockencodings.h b/src/blockencodings.h
index e60c1e3db4..afdfa426f1 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -7,8 +7,13 @@
#include <primitives/block.h>
+#include <functional>
class CTxMemPool;
+class BlockValidationState;
+namespace Consensus {
+struct Params;
+};
// Transaction compression schemes for compact block relay can be introduced by writing
// an actual formatter here.
@@ -129,6 +134,11 @@ protected:
const CTxMemPool* pool;
public:
CBlockHeader header;
+
+ // Can be overridden for testing
+ using CheckBlockFn = std::function<bool(const CBlock&, BlockValidationState&, const Consensus::Params&, bool, bool)>;
+ CheckBlockFn m_check_block_mock{nullptr};
+
explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {}
// extra_txn is a list of extra transactions to look at, in <witness hash, reference> form
diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp
index fc6dde20f9..88c7526b9e 100644
--- a/src/blockfilter.cpp
+++ b/src/blockfilter.cpp
@@ -247,21 +247,10 @@ bool BlockFilter::BuildParams(GCSFilter::Params& params) const
uint256 BlockFilter::GetHash() const
{
- const std::vector<unsigned char>& data = GetEncodedFilter();
-
- uint256 result;
- CHash256().Write(data).Finalize(result);
- return result;
+ return Hash(GetEncodedFilter());
}
uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const
{
- const uint256& filter_hash = GetHash();
-
- uint256 result;
- CHash256()
- .Write(filter_hash)
- .Write(prev_header)
- .Finalize(result);
- return result;
+ return Hash(GetHash(), prev_header);
}
diff --git a/src/coins.cpp b/src/coins.cpp
index 976118e23c..5a6ae525a7 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -13,7 +13,7 @@
bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); }
-bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; }
+bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return false; }
std::unique_ptr<CCoinsViewCursor> CCoinsView::Cursor() const { return nullptr; }
bool CCoinsView::HaveCoin(const COutPoint &outpoint) const
@@ -28,11 +28,14 @@ bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
-bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); }
+bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) { return base->BatchWrite(mapCoins, hashBlock, erase); }
std::unique_ptr<CCoinsViewCursor> CCoinsViewBacked::Cursor() const { return base->Cursor(); }
size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); }
-CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), cachedCoinsUsage(0) {}
+CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) :
+ CCoinsViewBacked(baseIn), m_deterministic(deterministic),
+ cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic))
+{}
size_t CCoinsViewCache::DynamicMemoryUsage() const {
return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage;
@@ -176,8 +179,10 @@ void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) {
hashBlock = hashBlockIn;
}
-bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn) {
- for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = mapCoins.erase(it)) {
+bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn, bool erase) {
+ for (CCoinsMap::iterator it = mapCoins.begin();
+ it != mapCoins.end();
+ it = erase ? mapCoins.erase(it) : std::next(it)) {
// Ignore non-dirty entries (optimization).
if (!(it->second.flags & CCoinsCacheEntry::DIRTY)) {
continue;
@@ -190,7 +195,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
// Create the coin in the parent cache, move the data up
// and mark it as dirty.
CCoinsCacheEntry& entry = cacheCoins[it->first];
- entry.coin = std::move(it->second.coin);
+ if (erase) {
+ // The `move` call here is purely an optimization; we rely on the
+ // `mapCoins.erase` call in the `for` expression to actually remove
+ // the entry from the child map.
+ entry.coin = std::move(it->second.coin);
+ } else {
+ entry.coin = it->second.coin;
+ }
cachedCoinsUsage += entry.coin.DynamicMemoryUsage();
entry.flags = CCoinsCacheEntry::DIRTY;
// We can mark it FRESH in the parent if it was FRESH in the child
@@ -218,7 +230,14 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
} else {
// A normal modification.
cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage();
- itUs->second.coin = std::move(it->second.coin);
+ if (erase) {
+ // The `move` call here is purely an optimization; we rely on the
+ // `mapCoins.erase` call in the `for` expression to actually remove
+ // the entry from the child map.
+ itUs->second.coin = std::move(it->second.coin);
+ } else {
+ itUs->second.coin = it->second.coin;
+ }
cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage();
itUs->second.flags |= CCoinsCacheEntry::DIRTY;
// NOTE: It isn't safe to mark the coin as FRESH in the parent
@@ -233,12 +252,32 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn
}
bool CCoinsViewCache::Flush() {
- bool fOk = base->BatchWrite(cacheCoins, hashBlock);
- cacheCoins.clear();
+ bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/true);
+ if (fOk && !cacheCoins.empty()) {
+ /* BatchWrite must erase all cacheCoins elements when erase=true. */
+ throw std::logic_error("Not all cached coins were erased");
+ }
cachedCoinsUsage = 0;
return fOk;
}
+bool CCoinsViewCache::Sync()
+{
+ bool fOk = base->BatchWrite(cacheCoins, hashBlock, /*erase=*/false);
+ // Instead of clearing `cacheCoins` as we would in Flush(), just clear the
+ // FRESH/DIRTY flags of any coin that isn't spent.
+ for (auto it = cacheCoins.begin(); it != cacheCoins.end(); ) {
+ if (it->second.coin.IsSpent()) {
+ cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage();
+ it = cacheCoins.erase(it);
+ } else {
+ it->second.flags = 0;
+ ++it;
+ }
+ }
+ return fOk;
+}
+
void CCoinsViewCache::Uncache(const COutPoint& hash)
{
CCoinsMap::iterator it = cacheCoins.find(hash);
@@ -275,7 +314,24 @@ void CCoinsViewCache::ReallocateCache()
// Cache should be empty when we're calling this.
assert(cacheCoins.size() == 0);
cacheCoins.~CCoinsMap();
- ::new (&cacheCoins) CCoinsMap();
+ ::new (&cacheCoins) CCoinsMap(0, SaltedOutpointHasher(/*deterministic=*/m_deterministic));
+}
+
+void CCoinsViewCache::SanityCheck() const
+{
+ size_t recomputed_usage = 0;
+ for (const auto& [_, entry] : cacheCoins) {
+ unsigned attr = 0;
+ if (entry.flags & CCoinsCacheEntry::DIRTY) attr |= 1;
+ if (entry.flags & CCoinsCacheEntry::FRESH) attr |= 2;
+ if (entry.coin.IsSpent()) attr |= 4;
+ // Only 5 combinations are possible.
+ assert(attr != 2 && attr != 4 && attr != 7);
+
+ // Recompute cachedCoinsUsage.
+ recomputed_usage += entry.coin.DynamicMemoryUsage();
+ }
+ assert(recomputed_usage == cachedCoinsUsage);
}
static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), PROTOCOL_VERSION);
diff --git a/src/coins.h b/src/coins.h
index b0d6bdf333..dd336b210a 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -176,7 +176,7 @@ public:
//! Do a bulk modification (multiple Coin changes + BestBlock change).
//! The passed mapCoins can be modified.
- virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock);
+ virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true);
//! Get a cursor to iterate over the whole state
virtual std::unique_ptr<CCoinsViewCursor> Cursor() const;
@@ -202,7 +202,7 @@ public:
uint256 GetBestBlock() const override;
std::vector<uint256> GetHeadBlocks() const override;
void SetBackend(CCoinsView &viewIn);
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override;
size_t EstimateSize() const override;
};
@@ -211,6 +211,9 @@ public:
/** CCoinsView that adds a memory cache for transactions to another CCoinsView */
class CCoinsViewCache : public CCoinsViewBacked
{
+private:
+ const bool m_deterministic;
+
protected:
/**
* Make mutable so that we can "fill the cache" even from Get-methods
@@ -220,10 +223,10 @@ protected:
mutable CCoinsMap cacheCoins;
/* Cached dynamic memory usage for the inner Coin objects. */
- mutable size_t cachedCoinsUsage;
+ mutable size_t cachedCoinsUsage{0};
public:
- CCoinsViewCache(CCoinsView *baseIn);
+ CCoinsViewCache(CCoinsView *baseIn, bool deterministic = false);
/**
* By deleting the copy constructor, we prevent accidentally using it when one intends to create a cache on top of a base cache.
@@ -235,7 +238,7 @@ public:
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
void SetBestBlock(const uint256 &hashBlock);
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override {
throw std::logic_error("CCoinsViewCache cursor iteration not supported.");
}
@@ -282,13 +285,23 @@ public:
bool SpendCoin(const COutPoint &outpoint, Coin* moveto = nullptr);
/**
- * Push the modifications applied to this cache to its base.
- * Failure to call this method before destruction will cause the changes to be forgotten.
+ * Push the modifications applied to this cache to its base and wipe local state.
+ * Failure to call this method or Sync() before destruction will cause the changes
+ * to be forgotten.
* If false is returned, the state of this cache (and its backing view) will be undefined.
*/
bool Flush();
/**
+ * Push the modifications applied to this cache to its base while retaining
+ * the contents of this cache (except for spent coins, which we erase).
+ * Failure to call this method or Flush() before destruction will cause the changes
+ * to be forgotten.
+ * If false is returned, the state of this cache (and its backing view) will be undefined.
+ */
+ bool Sync();
+
+ /**
* Removes the UTXO with the given outpoint from the cache, if it is
* not modified.
*/
@@ -310,6 +323,9 @@ public:
//! See: https://stackoverflow.com/questions/42114044/how-to-release-unordered-map-memory
void ReallocateCache();
+ //! Run an internal sanity check on the cache data structure. */
+ void SanityCheck() const;
+
private:
/**
* @note this is marked const, but may actually append to `cacheCoins`, increasing
diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp
index 3ba0414b31..fd3276b5a7 100644
--- a/src/common/bloom.cpp
+++ b/src/common/bloom.cpp
@@ -60,7 +60,7 @@ void CBloomFilter::insert(Span<const unsigned char> vKey)
void CBloomFilter::insert(const COutPoint& outpoint)
{
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << outpoint;
insert(MakeUCharSpan(stream));
}
@@ -81,7 +81,7 @@ bool CBloomFilter::contains(Span<const unsigned char> vKey) const
bool CBloomFilter::contains(const COutPoint& outpoint) const
{
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << outpoint;
return contains(MakeUCharSpan(stream));
}
diff --git a/src/consensus/params.h b/src/consensus/params.h
index be92556611..25f53eb620 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -11,6 +11,7 @@
#include <chrono>
#include <limits>
#include <map>
+#include <vector>
namespace Consensus {
diff --git a/src/core_io.h b/src/core_io.h
index 33e1ad82fc..997f3bfd5b 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -15,6 +15,7 @@ class CBlockHeader;
class CScript;
class CTransaction;
struct CMutableTransaction;
+class SigningProvider;
class uint256;
class UniValue;
class CTxUndo;
@@ -52,7 +53,7 @@ UniValue ValueFromAmount(const CAmount amount);
std::string FormatScript(const CScript& script);
std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags = 0);
std::string SighashToStr(unsigned char sighash_type);
-void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex = true, bool include_address = false);
+void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex = true, bool include_address = false, const SigningProvider* provider = nullptr);
void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry, bool include_hex = true, int serialize_flags = 0, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS);
#endif // BITCOIN_CORE_IO_H
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 7bab171c89..84cd559b7f 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -207,7 +207,7 @@ bool DecodeHexBlockHeader(CBlockHeader& header, const std::string& hex_header)
if (!IsHex(hex_header)) return false;
const std::vector<unsigned char> header_data{ParseHex(hex_header)};
- CDataStream ser_header(header_data, SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ser_header{header_data};
try {
ser_header >> header;
} catch (const std::exception&) {
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 91a6eb2864..b0e3b0b3c4 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -147,13 +147,13 @@ std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags)
return HexStr(ssTx);
}
-void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool include_address)
+void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool include_address, const SigningProvider* provider)
{
CTxDestination address;
out.pushKV("asm", ScriptToAsmStr(script));
if (include_address) {
- out.pushKV("desc", InferDescriptor(script, DUMMY_SIGNING_PROVIDER)->ToString());
+ out.pushKV("desc", InferDescriptor(script, provider ? *provider : DUMMY_SIGNING_PROVIDER)->ToString());
}
if (include_hex) {
out.pushKV("hex", HexStr(script));
@@ -170,6 +170,8 @@ void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool i
void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry, bool include_hex, int serialize_flags, const CTxUndo* txundo, TxVerbosity verbosity)
{
+ CHECK_NONFATAL(verbosity >= TxVerbosity::SHOW_DETAILS);
+
entry.pushKV("txid", tx.GetHash().GetHex());
entry.pushKV("hash", tx.GetWitnessHash().GetHex());
// Transaction version is actually unsigned in consensus checks, just signed in memory,
diff --git a/src/crypto/chacha20.cpp b/src/crypto/chacha20.cpp
index 25d7baa8cc..6934cef163 100644
--- a/src/crypto/chacha20.cpp
+++ b/src/crypto/chacha20.cpp
@@ -8,6 +8,7 @@
#include <crypto/common.h>
#include <crypto/chacha20.h>
+#include <algorithm>
#include <string.h>
constexpr static inline uint32_t rotl32(uint32_t v, int c) { return (v << c) | (v >> (32 - c)); }
@@ -20,95 +21,69 @@ constexpr static inline uint32_t rotl32(uint32_t v, int c) { return (v << c) | (
#define REPEAT10(a) do { {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; } while(0)
-static const unsigned char sigma[] = "expand 32-byte k";
-static const unsigned char tau[] = "expand 16-byte k";
-
-void ChaCha20::SetKey(const unsigned char* k, size_t keylen)
+void ChaCha20Aligned::SetKey32(const unsigned char* k)
{
- const unsigned char *constants;
-
- input[4] = ReadLE32(k + 0);
- input[5] = ReadLE32(k + 4);
- input[6] = ReadLE32(k + 8);
- input[7] = ReadLE32(k + 12);
- if (keylen == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* keylen == 16 */
- constants = tau;
- }
- input[8] = ReadLE32(k + 0);
- input[9] = ReadLE32(k + 4);
- input[10] = ReadLE32(k + 8);
- input[11] = ReadLE32(k + 12);
- input[0] = ReadLE32(constants + 0);
- input[1] = ReadLE32(constants + 4);
- input[2] = ReadLE32(constants + 8);
- input[3] = ReadLE32(constants + 12);
- input[12] = 0;
- input[13] = 0;
- input[14] = 0;
- input[15] = 0;
+ input[0] = ReadLE32(k + 0);
+ input[1] = ReadLE32(k + 4);
+ input[2] = ReadLE32(k + 8);
+ input[3] = ReadLE32(k + 12);
+ input[4] = ReadLE32(k + 16);
+ input[5] = ReadLE32(k + 20);
+ input[6] = ReadLE32(k + 24);
+ input[7] = ReadLE32(k + 28);
+ input[8] = 0;
+ input[9] = 0;
+ input[10] = 0;
+ input[11] = 0;
}
-ChaCha20::ChaCha20()
+ChaCha20Aligned::ChaCha20Aligned()
{
memset(input, 0, sizeof(input));
}
-ChaCha20::ChaCha20(const unsigned char* k, size_t keylen)
+ChaCha20Aligned::ChaCha20Aligned(const unsigned char* key32)
{
- SetKey(k, keylen);
+ SetKey32(key32);
}
-void ChaCha20::SetIV(uint64_t iv)
+void ChaCha20Aligned::SetIV(uint64_t iv)
{
- input[14] = iv;
- input[15] = iv >> 32;
+ input[10] = iv;
+ input[11] = iv >> 32;
}
-void ChaCha20::Seek(uint64_t pos)
+void ChaCha20Aligned::Seek64(uint64_t pos)
{
- input[12] = pos;
- input[13] = pos >> 32;
+ input[8] = pos;
+ input[9] = pos >> 32;
}
-void ChaCha20::Keystream(unsigned char* c, size_t bytes)
+inline void ChaCha20Aligned::Keystream64(unsigned char* c, size_t blocks)
{
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- unsigned char *ctarget = nullptr;
- unsigned char tmp[64];
- unsigned int i;
-
- if (!bytes) return;
-
- j0 = input[0];
- j1 = input[1];
- j2 = input[2];
- j3 = input[3];
- j4 = input[4];
- j5 = input[5];
- j6 = input[6];
- j7 = input[7];
- j8 = input[8];
- j9 = input[9];
- j10 = input[10];
- j11 = input[11];
- j12 = input[12];
- j13 = input[13];
- j14 = input[14];
- j15 = input[15];
+ uint32_t j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+
+ if (!blocks) return;
+
+ j4 = input[0];
+ j5 = input[1];
+ j6 = input[2];
+ j7 = input[3];
+ j8 = input[4];
+ j9 = input[5];
+ j10 = input[6];
+ j11 = input[7];
+ j12 = input[8];
+ j13 = input[9];
+ j14 = input[10];
+ j15 = input[11];
for (;;) {
- if (bytes < 64) {
- ctarget = c;
- c = tmp;
- }
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
x4 = j4;
x5 = j5;
x6 = j6;
@@ -134,10 +109,10 @@ void ChaCha20::Keystream(unsigned char* c, size_t bytes)
QUARTERROUND( x3, x4, x9,x14);
);
- x0 += j0;
- x1 += j1;
- x2 += j2;
- x3 += j3;
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
x4 += j4;
x5 += j5;
x6 += j6;
@@ -171,59 +146,41 @@ void ChaCha20::Keystream(unsigned char* c, size_t bytes)
WriteLE32(c + 56, x14);
WriteLE32(c + 60, x15);
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i) ctarget[i] = c[i];
- }
- input[12] = j12;
- input[13] = j13;
+ if (blocks == 1) {
+ input[8] = j12;
+ input[9] = j13;
return;
}
- bytes -= 64;
+ blocks -= 1;
c += 64;
}
}
-void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
+inline void ChaCha20Aligned::Crypt64(const unsigned char* m, unsigned char* c, size_t blocks)
{
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- unsigned char *ctarget = nullptr;
- unsigned char tmp[64];
- unsigned int i;
-
- if (!bytes) return;
-
- j0 = input[0];
- j1 = input[1];
- j2 = input[2];
- j3 = input[3];
- j4 = input[4];
- j5 = input[5];
- j6 = input[6];
- j7 = input[7];
- j8 = input[8];
- j9 = input[9];
- j10 = input[10];
- j11 = input[11];
- j12 = input[12];
- j13 = input[13];
- j14 = input[14];
- j15 = input[15];
+ uint32_t j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+
+ if (!blocks) return;
+
+ j4 = input[0];
+ j5 = input[1];
+ j6 = input[2];
+ j7 = input[3];
+ j8 = input[4];
+ j9 = input[5];
+ j10 = input[6];
+ j11 = input[7];
+ j12 = input[8];
+ j13 = input[9];
+ j14 = input[10];
+ j15 = input[11];
for (;;) {
- if (bytes < 64) {
- // if m has fewer than 64 bytes available, copy m to tmp and
- // read from tmp instead
- for (i = 0;i < bytes;++i) tmp[i] = m[i];
- m = tmp;
- ctarget = c;
- c = tmp;
- }
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
x4 = j4;
x5 = j5;
x6 = j6;
@@ -249,10 +206,10 @@ void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
QUARTERROUND( x3, x4, x9,x14);
);
- x0 += j0;
- x1 += j1;
- x2 += j2;
- x3 += j3;
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
x4 += j4;
x5 += j5;
x6 += j6;
@@ -303,16 +260,65 @@ void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
WriteLE32(c + 56, x14);
WriteLE32(c + 60, x15);
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i) ctarget[i] = c[i];
- }
- input[12] = j12;
- input[13] = j13;
+ if (blocks == 1) {
+ input[8] = j12;
+ input[9] = j13;
return;
}
- bytes -= 64;
+ blocks -= 1;
c += 64;
m += 64;
}
}
+
+void ChaCha20::Keystream(unsigned char* c, size_t bytes)
+{
+ if (!bytes) return;
+ if (m_bufleft) {
+ unsigned reuse = std::min<size_t>(m_bufleft, bytes);
+ memcpy(c, m_buffer + 64 - m_bufleft, reuse);
+ m_bufleft -= reuse;
+ bytes -= reuse;
+ c += reuse;
+ }
+ if (bytes >= 64) {
+ size_t blocks = bytes / 64;
+ m_aligned.Keystream64(c, blocks);
+ c += blocks * 64;
+ bytes -= blocks * 64;
+ }
+ if (bytes) {
+ m_aligned.Keystream64(m_buffer, 1);
+ memcpy(c, m_buffer, bytes);
+ m_bufleft = 64 - bytes;
+ }
+}
+
+void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
+{
+ if (!bytes) return;
+ if (m_bufleft) {
+ unsigned reuse = std::min<size_t>(m_bufleft, bytes);
+ for (unsigned i = 0; i < reuse; i++) {
+ c[i] = m[i] ^ m_buffer[64 - m_bufleft + i];
+ }
+ m_bufleft -= reuse;
+ bytes -= reuse;
+ c += reuse;
+ m += reuse;
+ }
+ if (bytes >= 64) {
+ size_t blocks = bytes / 64;
+ m_aligned.Crypt64(m, c, blocks);
+ c += blocks * 64;
+ m += blocks * 64;
+ bytes -= blocks * 64;
+ }
+ if (bytes) {
+ m_aligned.Keystream64(m_buffer, 1);
+ for (unsigned i = 0; i < bytes; i++) {
+ c[i] = m[i] ^ m_buffer[i];
+ }
+ m_bufleft = 64 - bytes;
+ }
+}
diff --git a/src/crypto/chacha20.h b/src/crypto/chacha20.h
index 624c083191..b286ef59fe 100644
--- a/src/crypto/chacha20.h
+++ b/src/crypto/chacha20.h
@@ -8,19 +8,69 @@
#include <cstdlib>
#include <stdint.h>
-/** A class for ChaCha20 256-bit stream cipher developed by Daniel J. Bernstein
- https://cr.yp.to/chacha/chacha-20080128.pdf */
+// classes for ChaCha20 256-bit stream cipher developed by Daniel J. Bernstein
+// https://cr.yp.to/chacha/chacha-20080128.pdf */
+
+/** ChaCha20 cipher that only operates on multiples of 64 bytes. */
+class ChaCha20Aligned
+{
+private:
+ uint32_t input[12];
+
+public:
+ ChaCha20Aligned();
+
+ /** Initialize a cipher with specified 32-byte key. */
+ ChaCha20Aligned(const unsigned char* key32);
+
+ /** set 32-byte key. */
+ void SetKey32(const unsigned char* key32);
+
+ /** set the 64-bit nonce. */
+ void SetIV(uint64_t iv);
+
+ /** set the 64bit block counter (pos seeks to byte position 64*pos). */
+ void Seek64(uint64_t pos);
+
+ /** outputs the keystream of size <64*blocks> into <c> */
+ void Keystream64(unsigned char* c, size_t blocks);
+
+ /** enciphers the message <input> of length <64*blocks> and write the enciphered representation into <output>
+ * Used for encryption and decryption (XOR)
+ */
+ void Crypt64(const unsigned char* input, unsigned char* output, size_t blocks);
+};
+
+/** Unrestricted ChaCha20 cipher. */
class ChaCha20
{
private:
- uint32_t input[16];
+ ChaCha20Aligned m_aligned;
+ unsigned char m_buffer[64] = {0};
+ unsigned m_bufleft{0};
public:
- ChaCha20();
- ChaCha20(const unsigned char* key, size_t keylen);
- void SetKey(const unsigned char* key, size_t keylen); //!< set key with flexible keylength; 256bit recommended */
- void SetIV(uint64_t iv); // set the 64bit nonce
- void Seek(uint64_t pos); // set the 64bit block counter
+ ChaCha20() = default;
+
+ /** Initialize a cipher with specified 32-byte key. */
+ ChaCha20(const unsigned char* key32) : m_aligned(key32) {}
+
+ /** set 32-byte key. */
+ void SetKey32(const unsigned char* key32)
+ {
+ m_aligned.SetKey32(key32);
+ m_bufleft = 0;
+ }
+
+ /** set the 64-bit nonce. */
+ void SetIV(uint64_t iv) { m_aligned.SetIV(iv); }
+
+ /** set the 64bit block counter (pos seeks to byte position 64*pos). */
+ void Seek64(uint64_t pos)
+ {
+ m_aligned.Seek64(pos);
+ m_bufleft = 0;
+ }
/** outputs the keystream of size <bytes> into <c> */
void Keystream(unsigned char* c, size_t bytes);
diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp
index 6511f46adc..119ad6902f 100644
--- a/src/crypto/chacha_poly_aead.cpp
+++ b/src/crypto/chacha_poly_aead.cpp
@@ -36,8 +36,9 @@ ChaCha20Poly1305AEAD::ChaCha20Poly1305AEAD(const unsigned char* K_1, size_t K_1_
assert(K_1_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
assert(K_2_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_header.SetKey(K_1, CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_main.SetKey(K_2, CHACHA20_POLY1305_AEAD_KEY_LEN);
+ static_assert(CHACHA20_POLY1305_AEAD_KEY_LEN == 32);
+ m_chacha_header.SetKey32(K_1);
+ m_chacha_main.SetKey32(K_2);
// set the cached sequence number to uint64 max which hints for an unset cache.
// we can't hit uint64 max since the rekey rule (which resets the sequence number) is 1GB
@@ -62,7 +63,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
// block counter 0 for the poly1305 key
// use lower 32bytes for the poly1305 key
// (throws away 32 unused bytes (upper 32) from this ChaCha20 round)
- m_chacha_main.Seek(0);
+ m_chacha_main.Seek64(0);
m_chacha_main.Crypt(poly_key, poly_key, sizeof(poly_key));
// if decrypting, verify the tag prior to decryption
@@ -85,7 +86,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
if (m_cached_aad_seqnr != seqnr_aad) {
m_cached_aad_seqnr = seqnr_aad;
m_chacha_header.SetIV(seqnr_aad);
- m_chacha_header.Seek(0);
+ m_chacha_header.Seek64(0);
m_chacha_header.Keystream(m_aad_keystream_buffer, CHACHA20_ROUND_OUTPUT);
}
// crypt the AAD (3 bytes message length) with given position in AAD cipher instance keystream
@@ -94,7 +95,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
dest[2] = src[2] ^ m_aad_keystream_buffer[aad_pos + 2];
// Set the playload ChaCha instance block counter to 1 and crypt the payload
- m_chacha_main.Seek(1);
+ m_chacha_main.Seek64(1);
m_chacha_main.Crypt(src + CHACHA20_POLY1305_AEAD_AAD_LEN, dest + CHACHA20_POLY1305_AEAD_AAD_LEN, src_len - CHACHA20_POLY1305_AEAD_AAD_LEN);
// If encrypting, calculate and append tag
@@ -117,7 +118,7 @@ bool ChaCha20Poly1305AEAD::GetLength(uint32_t* len24_out, uint64_t seqnr_aad, in
// we need to calculate the 64 keystream bytes since we reached a new aad sequence number
m_cached_aad_seqnr = seqnr_aad;
m_chacha_header.SetIV(seqnr_aad); // use LE for the nonce
- m_chacha_header.Seek(0); // block counter 0
+ m_chacha_header.Seek64(0); // block counter 0
m_chacha_header.Keystream(m_aad_keystream_buffer, CHACHA20_ROUND_OUTPUT); // write keystream to the cache
}
diff --git a/src/crypto/muhash.cpp b/src/crypto/muhash.cpp
index 26f0248663..471ee6af97 100644
--- a/src/crypto/muhash.cpp
+++ b/src/crypto/muhash.cpp
@@ -299,7 +299,7 @@ Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) {
unsigned char tmp[Num3072::BYTE_SIZE];
uint256 hashed_in{(HashWriter{} << in).GetSHA256()};
- ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, Num3072::BYTE_SIZE);
+ ChaCha20Aligned(hashed_in.data()).Keystream64(tmp, Num3072::BYTE_SIZE / 64);
Num3072 out{tmp};
return out;
diff --git a/src/crypto/ripemd160.cpp b/src/crypto/ripemd160.cpp
index 29a4ad906f..a2f7c6e156 100644
--- a/src/crypto/ripemd160.cpp
+++ b/src/crypto/ripemd160.cpp
@@ -239,7 +239,7 @@ void Transform(uint32_t* s, const unsigned char* chunk)
////// RIPEMD160
-CRIPEMD160::CRIPEMD160() : bytes(0)
+CRIPEMD160::CRIPEMD160()
{
ripemd160::Initialize(s);
}
diff --git a/src/crypto/ripemd160.h b/src/crypto/ripemd160.h
index ae9c339181..fb631a66d2 100644
--- a/src/crypto/ripemd160.h
+++ b/src/crypto/ripemd160.h
@@ -14,7 +14,7 @@ class CRIPEMD160
private:
uint32_t s[5];
unsigned char buf[64];
- uint64_t bytes;
+ uint64_t bytes{0};
public:
static const size_t OUTPUT_SIZE = 20;
diff --git a/src/crypto/sha1.cpp b/src/crypto/sha1.cpp
index 1fb9bb2b72..2610108f60 100644
--- a/src/crypto/sha1.cpp
+++ b/src/crypto/sha1.cpp
@@ -146,7 +146,7 @@ void Transform(uint32_t* s, const unsigned char* chunk)
////// SHA1
-CSHA1::CSHA1() : bytes(0)
+CSHA1::CSHA1()
{
sha1::Initialize(s);
}
diff --git a/src/crypto/sha1.h b/src/crypto/sha1.h
index 4bd6c331a8..741cdaad58 100644
--- a/src/crypto/sha1.h
+++ b/src/crypto/sha1.h
@@ -14,7 +14,7 @@ class CSHA1
private:
uint32_t s[5];
unsigned char buf[64];
- uint64_t bytes;
+ uint64_t bytes{0};
public:
static const size_t OUTPUT_SIZE = 20;
diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp
index 7cd5b3661b..a4eef36480 100644
--- a/src/crypto/sha256.cpp
+++ b/src/crypto/sha256.cpp
@@ -673,7 +673,7 @@ std::string SHA256AutoDetect()
////// SHA-256
-CSHA256::CSHA256() : bytes(0)
+CSHA256::CSHA256()
{
sha256::Initialize(s);
}
diff --git a/src/crypto/sha256.h b/src/crypto/sha256.h
index 9fd73becfd..7625508665 100644
--- a/src/crypto/sha256.h
+++ b/src/crypto/sha256.h
@@ -15,7 +15,7 @@ class CSHA256
private:
uint32_t s[8];
unsigned char buf[64];
- uint64_t bytes;
+ uint64_t bytes{0};
public:
static const size_t OUTPUT_SIZE = 32;
diff --git a/src/crypto/sha512.cpp b/src/crypto/sha512.cpp
index 8a822e0e7e..2713f06210 100644
--- a/src/crypto/sha512.cpp
+++ b/src/crypto/sha512.cpp
@@ -151,7 +151,7 @@ void Transform(uint64_t* s, const unsigned char* chunk)
////// SHA-512
-CSHA512::CSHA512() : bytes(0)
+CSHA512::CSHA512()
{
sha512::Initialize(s);
}
diff --git a/src/crypto/sha512.h b/src/crypto/sha512.h
index d8fa8d2e39..d2f7d6a05e 100644
--- a/src/crypto/sha512.h
+++ b/src/crypto/sha512.h
@@ -14,7 +14,7 @@ class CSHA512
private:
uint64_t s[8];
unsigned char buf[128];
- uint64_t bytes;
+ uint64_t bytes{0};
public:
static constexpr size_t OUTPUT_SIZE = 64;
diff --git a/src/cuckoocache.h b/src/cuckoocache.h
index 6adcc74516..cb0b362143 100644
--- a/src/cuckoocache.h
+++ b/src/cuckoocache.h
@@ -166,7 +166,7 @@ private:
std::vector<Element> table;
/** size stores the total available slots in the hash table */
- uint32_t size;
+ uint32_t size{0};
/** The bit_packed_atomic_flags array is marked mutable because we want
* garbage collection to be allowed to occur from const methods */
@@ -183,7 +183,7 @@ private:
* decremented on insert and reset to the new number of inserts which would
* cause the epoch to reach epoch_size when it reaches zero.
*/
- uint32_t epoch_heuristic_counter;
+ uint32_t epoch_heuristic_counter{0};
/** epoch_size is set to be the number of elements supposed to be in a
* epoch. When the number of non-erased elements in an epoch
@@ -193,12 +193,12 @@ private:
* one "dead" which has been erased, one "dying" which has been marked to be
* erased next, and one "living" which new inserts add to.
*/
- uint32_t epoch_size;
+ uint32_t epoch_size{0};
/** depth_limit determines how many elements insert should try to replace.
* Should be set to log2(n).
*/
- uint8_t depth_limit;
+ uint8_t depth_limit{0};
/** hash_function is a const instance of the hash function. It cannot be
* static or initialized at call time as it may have internal state (such as
@@ -322,8 +322,7 @@ public:
/** You must always construct a cache with some elements via a subsequent
* call to setup or setup_bytes, otherwise operations may segfault.
*/
- cache() : table(), size(), collection_flags(0), epoch_flags(),
- epoch_heuristic_counter(), epoch_size(), depth_limit(0), hash_function()
+ cache() : table(), collection_flags(0), epoch_flags(), hash_function()
{
}
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index 6efaf2ec19..0c6debfa80 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -127,40 +127,40 @@ static leveldb::Options GetOptions(size_t nCacheSize)
return options;
}
-CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate)
- : m_name{fs::PathToString(path.stem())}, m_path{path}, m_is_memory{fMemory}
+CDBWrapper::CDBWrapper(const DBParams& params)
+ : m_name{fs::PathToString(params.path.stem())}, m_path{params.path}, m_is_memory{params.memory_only}
{
penv = nullptr;
readoptions.verify_checksums = true;
iteroptions.verify_checksums = true;
iteroptions.fill_cache = false;
syncoptions.sync = true;
- options = GetOptions(nCacheSize);
+ options = GetOptions(params.cache_bytes);
options.create_if_missing = true;
- if (fMemory) {
+ if (params.memory_only) {
penv = leveldb::NewMemEnv(leveldb::Env::Default());
options.env = penv;
} else {
- if (fWipe) {
- LogPrintf("Wiping LevelDB in %s\n", fs::PathToString(path));
- leveldb::Status result = leveldb::DestroyDB(fs::PathToString(path), options);
+ if (params.wipe_data) {
+ LogPrintf("Wiping LevelDB in %s\n", fs::PathToString(params.path));
+ leveldb::Status result = leveldb::DestroyDB(fs::PathToString(params.path), options);
dbwrapper_private::HandleError(result);
}
- TryCreateDirectories(path);
- LogPrintf("Opening LevelDB in %s\n", fs::PathToString(path));
+ TryCreateDirectories(params.path);
+ LogPrintf("Opening LevelDB in %s\n", fs::PathToString(params.path));
}
// PathToString() return value is safe to pass to leveldb open function,
// because on POSIX leveldb passes the byte string directly to ::open(), and
// on Windows it converts from UTF-8 to UTF-16 before calling ::CreateFileW
// (see env_posix.cc and env_windows.cc).
- leveldb::Status status = leveldb::DB::Open(options, fs::PathToString(path), &pdb);
+ leveldb::Status status = leveldb::DB::Open(options, fs::PathToString(params.path), &pdb);
dbwrapper_private::HandleError(status);
LogPrintf("Opened LevelDB successfully\n");
- if (gArgs.GetBoolArg("-forcecompactdb", false)) {
- LogPrintf("Starting database compaction of %s\n", fs::PathToString(path));
+ if (params.options.force_compact) {
+ LogPrintf("Starting database compaction of %s\n", fs::PathToString(params.path));
pdb->CompactRange(nullptr, nullptr);
- LogPrintf("Finished database compaction of %s\n", fs::PathToString(path));
+ LogPrintf("Finished database compaction of %s\n", fs::PathToString(params.path));
}
// The base-case obfuscation key, which is a noop.
@@ -168,7 +168,7 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key);
- if (!key_exists && obfuscate && IsEmpty()) {
+ if (!key_exists && params.obfuscate && IsEmpty()) {
// Initialize non-degenerate obfuscation if it won't upset
// existing, non-obfuscated data.
std::vector<unsigned char> new_key = CreateObfuscateKey();
@@ -177,10 +177,10 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
Write(OBFUSCATE_KEY_KEY, new_key);
obfuscate_key = new_key;
- LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
+ LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key));
}
- LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
+ LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key));
}
CDBWrapper::~CDBWrapper()
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 3d3eee32ce..578d9880ac 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -31,6 +31,29 @@ class Env;
static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64;
static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024;
+//! User-controlled performance and debug options.
+struct DBOptions {
+ //! Compact database on startup.
+ bool force_compact = false;
+};
+
+//! Application-specific storage settings.
+struct DBParams {
+ //! Location in the filesystem where leveldb data will be stored.
+ fs::path path;
+ //! Configures various leveldb cache settings.
+ size_t cache_bytes;
+ //! If true, use leveldb's memory environment.
+ bool memory_only = false;
+ //! If true, remove all existing data.
+ bool wipe_data = false;
+ //! If true, store data obfuscated via simple XOR. If false, XOR with a
+ //! zero'd byte array.
+ bool obfuscate = false;
+ //! Passed-through options.
+ DBOptions options{};
+};
+
class dbwrapper_error : public std::runtime_error
{
public:
@@ -68,16 +91,16 @@ private:
const CDBWrapper &parent;
leveldb::WriteBatch batch;
- CDataStream ssKey;
+ DataStream ssKey{};
CDataStream ssValue;
- size_t size_estimate;
+ size_t size_estimate{0};
public:
/**
* @param[in] _parent CDBWrapper that this batch is to be submitted to
*/
- explicit CDBBatch(const CDBWrapper &_parent) : parent(_parent), ssKey(SER_DISK, CLIENT_VERSION), ssValue(SER_DISK, CLIENT_VERSION), size_estimate(0) { };
+ explicit CDBBatch(const CDBWrapper& _parent) : parent(_parent), ssValue(SER_DISK, CLIENT_VERSION){};
void Clear()
{
@@ -151,7 +174,7 @@ public:
void SeekToFirst();
template<typename K> void Seek(const K& key) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -163,7 +186,7 @@ public:
template<typename K> bool GetKey(K& key) {
leveldb::Slice slKey = piter->key();
try {
- CDataStream ssKey{MakeByteSpan(slKey), SER_DISK, CLIENT_VERSION};
+ DataStream ssKey{MakeByteSpan(slKey)};
ssKey >> key;
} catch (const std::exception&) {
return false;
@@ -230,15 +253,7 @@ private:
bool m_is_memory;
public:
- /**
- * @param[in] path Location in the filesystem where leveldb data will be stored.
- * @param[in] nCacheSize Configures various leveldb cache settings.
- * @param[in] fMemory If true, use leveldb's memory environment.
- * @param[in] fWipe If true, remove all existing data.
- * @param[in] obfuscate If true, store data obfuscated via simple XOR. If false, XOR
- * with a zero'd byte array.
- */
- CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory = false, bool fWipe = false, bool obfuscate = false);
+ CDBWrapper(const DBParams& params);
~CDBWrapper();
CDBWrapper(const CDBWrapper&) = delete;
@@ -247,7 +262,7 @@ public:
template <typename K, typename V>
bool Read(const K& key, V& value) const
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -289,7 +304,7 @@ public:
template <typename K>
bool Exists(const K& key) const
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size());
@@ -331,7 +346,7 @@ public:
template<typename K>
size_t EstimateSize(const K& key_begin, const K& key_end) const
{
- CDataStream ssKey1(SER_DISK, CLIENT_VERSION), ssKey2(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey1{}, ssKey2{};
ssKey1.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey1 << key_begin;
diff --git a/src/external_signer.cpp b/src/external_signer.cpp
index 8a3e17a292..5524b943f4 100644
--- a/src/external_signer.cpp
+++ b/src/external_signer.cpp
@@ -16,7 +16,7 @@
ExternalSigner::ExternalSigner(const std::string& command, const std::string chain, const std::string& fingerprint, const std::string name): m_command(command), m_chain(chain), m_fingerprint(fingerprint), m_name(name) {}
-const std::string ExternalSigner::NetworkArg() const
+std::string ExternalSigner::NetworkArg() const
{
return " --chain " + m_chain;
}
diff --git a/src/external_signer.h b/src/external_signer.h
index e40fd7f010..90f07478e3 100644
--- a/src/external_signer.h
+++ b/src/external_signer.h
@@ -24,7 +24,7 @@ private:
//! Bitcoin mainnet, testnet, etc
std::string m_chain;
- const std::string NetworkArg() const;
+ std::string NetworkArg() const;
public:
//! @param[in] command the command which handles interaction with the external signer
diff --git a/src/fs.cpp b/src/fs.cpp
index 0429b8cd0f..64411fe41f 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -60,36 +60,20 @@ FileLock::~FileLock()
}
}
-static bool IsWSL()
-{
- struct utsname uname_data;
- return uname(&uname_data) == 0 && std::string(uname_data.version).find("Microsoft") != std::string::npos;
-}
-
bool FileLock::TryLock()
{
if (fd == -1) {
return false;
}
- // Exclusive file locking is broken on WSL using fcntl (issue #18622)
- // This workaround can be removed once the bug on WSL is fixed
- static const bool is_wsl = IsWSL();
- if (is_wsl) {
- if (flock(fd, LOCK_EX | LOCK_NB) == -1) {
- reason = GetErrorReason();
- return false;
- }
- } else {
- struct flock lock;
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- if (fcntl(fd, F_SETLK, &lock) == -1) {
- reason = GetErrorReason();
- return false;
- }
+ struct flock lock;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+ if (fcntl(fd, F_SETLK, &lock) == -1) {
+ reason = GetErrorReason();
+ return false;
}
return true;
diff --git a/src/fs.h b/src/fs.h
index 1a790e0682..0ece256acb 100644
--- a/src/fs.h
+++ b/src/fs.h
@@ -35,7 +35,7 @@ public:
// Allow path objects arguments for compatibility.
path(std::filesystem::path path) : std::filesystem::path::path(std::move(path)) {}
path& operator=(std::filesystem::path path) { std::filesystem::path::operator=(std::move(path)); return *this; }
- path& operator/=(std::filesystem::path path) { std::filesystem::path::operator/=(std::move(path)); return *this; }
+ path& operator/=(std::filesystem::path path) { std::filesystem::path::operator/=(path); return *this; }
// Allow literal string arguments, which are safe as long as the literals are ASCII.
path(const char* c) : std::filesystem::path(c) {}
diff --git a/src/hash.h b/src/hash.h
index b18a031268..2e3ed11b43 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -6,11 +6,13 @@
#ifndef BITCOIN_HASH_H
#define BITCOIN_HASH_H
+#include <attributes.h>
#include <crypto/common.h>
#include <crypto/ripemd160.h>
#include <crypto/sha256.h>
#include <prevector.h>
#include <serialize.h>
+#include <span.h>
#include <uint256.h>
#include <version.h>
@@ -165,6 +167,39 @@ public:
};
/** Reads data from an underlying stream, while hashing the read data. */
+template <typename Source>
+class HashVerifier : public HashWriter
+{
+private:
+ Source& m_source;
+
+public:
+ explicit HashVerifier(Source& source LIFETIMEBOUND) : m_source{source} {}
+
+ void read(Span<std::byte> dst)
+ {
+ m_source.read(dst);
+ this->write(dst);
+ }
+
+ void ignore(size_t num_bytes)
+ {
+ std::byte data[1024];
+ while (num_bytes > 0) {
+ size_t now = std::min<size_t>(num_bytes, 1024);
+ read({data, now});
+ num_bytes -= now;
+ }
+ }
+
+ template <typename T>
+ HashVerifier<Source>& operator>>(T&& obj)
+ {
+ ::Unserialize(*this, obj);
+ return *this;
+ }
+};
+
template<typename Source>
class CHashVerifier : public CHashWriter
{
@@ -199,6 +234,30 @@ public:
}
};
+/** Writes data to an underlying source stream, while hashing the written data. */
+template <typename Source>
+class HashedSourceWriter : public CHashWriter
+{
+private:
+ Source& m_source;
+
+public:
+ explicit HashedSourceWriter(Source& source LIFETIMEBOUND) : CHashWriter{source.GetType(), source.GetVersion()}, m_source{source} {}
+
+ void write(Span<const std::byte> src)
+ {
+ m_source.write(src);
+ CHashWriter::write(src);
+ }
+
+ template <typename T>
+ HashedSourceWriter& operator<<(const T& obj)
+ {
+ ::Serialize(*this, obj);
+ return *this;
+ }
+};
+
/** Compute the 256-bit hash of an object's serialization. */
template<typename T>
uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL_VERSION)
@@ -223,4 +282,12 @@ void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char he
*/
HashWriter TaggedHash(const std::string& tag);
+/** Compute the 160-bit RIPEMD-160 hash of an array. */
+inline uint160 RIPEMD160(Span<const unsigned char> data)
+{
+ uint160 result;
+ CRIPEMD160().Write(data.data(), data.size()).Finalize(result.begin());
+ return result;
+}
+
#endif // BITCOIN_HASH_H
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 33a75df68e..86166a5ca4 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -160,7 +160,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req)
JSONRPCRequest jreq;
jreq.context = context;
- jreq.peerAddr = req->GetPeer().ToString();
+ jreq.peerAddr = req->GetPeer().ToStringAddrPort();
if (!RPCAuthorized(authHeader.second, jreq.authUser)) {
LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr);
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 720f5c9353..4e4f27f1be 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -222,7 +222,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
// Early address-based allow check
if (!ClientAllowed(hreq->GetPeer())) {
LogPrint(BCLog::HTTP, "HTTP request from %s rejected: Client network is not allowed RPC access\n",
- hreq->GetPeer().ToString());
+ hreq->GetPeer().ToStringAddrPort());
hreq->WriteReply(HTTP_FORBIDDEN);
return;
}
@@ -230,13 +230,13 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
// Early reject unknown HTTP methods
if (hreq->GetRequestMethod() == HTTPRequest::UNKNOWN) {
LogPrint(BCLog::HTTP, "HTTP request from %s rejected: Unknown HTTP request method\n",
- hreq->GetPeer().ToString());
+ hreq->GetPeer().ToStringAddrPort());
hreq->WriteReply(HTTP_BAD_METHOD);
return;
}
LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n",
- RequestMethodString(hreq->GetRequestMethod()), SanitizeString(hreq->GetURI(), SAFE_CHARS_URI).substr(0, 100), hreq->GetPeer().ToString());
+ RequestMethodString(hreq->GetRequestMethod()), SanitizeString(hreq->GetURI(), SAFE_CHARS_URI).substr(0, 100), hreq->GetPeer().ToStringAddrPort());
// Find registered handler for prefix
std::string strURI = hreq->GetURI();
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 586ee649a7..a3bfc23a65 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -206,7 +206,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error)
}
const Reply& lookup_reply =
- SendRequestAndGetReply(*sock, strprintf("NAMING LOOKUP NAME=%s", to.ToStringIP()));
+ SendRequestAndGetReply(*sock, strprintf("NAMING LOOKUP NAME=%s", to.ToStringAddr()));
const std::string& dest = lookup_reply.Get("VALUE");
@@ -233,7 +233,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error)
throw std::runtime_error(strprintf("\"%s\"", connect_reply.full));
} catch (const std::runtime_error& e) {
- Log("Error connecting to %s: %s", to.ToString(), e.what());
+ Log("Error connecting to %s: %s", to.ToStringAddrPort(), e.what());
CheckControlSock();
return false;
}
@@ -302,7 +302,7 @@ std::unique_ptr<Sock> Session::Hello() const
}
if (!ConnectSocketDirectly(m_control_host, *sock, nConnectTimeout, true)) {
- throw std::runtime_error(strprintf("Cannot connect to %s", m_control_host.ToString()));
+ throw std::runtime_error(strprintf("Cannot connect to %s", m_control_host.ToStringAddrPort()));
}
SendRequestAndGetReply(*sock, "HELLO VERSION MIN=3.1 MAX=3.1");
@@ -336,7 +336,7 @@ void Session::GenerateAndSavePrivateKey(const Sock& sock)
{
DestGenerate(sock);
- // umask is set to 077 in init.cpp, which is ok (unless -sysperms is given)
+ // umask is set to 0077 in util/system.cpp, which is ok.
if (!WriteBinaryFile(m_private_key_file,
std::string(m_private_key.begin(), m_private_key.end()))) {
throw std::runtime_error(
@@ -371,7 +371,7 @@ void Session::CreateIfNotCreatedAlready()
const auto session_type = m_transient ? "transient" : "persistent";
const auto session_id = GetRandHash().GetHex().substr(0, 10); // full is overkill, too verbose in the logs
- Log("Creating %s SAM session %s with %s", session_type, session_id, m_control_host.ToString());
+ Log("Creating %s SAM session %s with %s", session_type, session_id, m_control_host.ToStringAddrPort());
auto sock = Hello();
@@ -380,7 +380,9 @@ void Session::CreateIfNotCreatedAlready()
// in the reply in DESTINATION=.
const Reply& reply = SendRequestAndGetReply(
*sock,
- strprintf("SESSION CREATE STYLE=STREAM ID=%s DESTINATION=TRANSIENT SIGNATURE_TYPE=7", session_id));
+ strprintf("SESSION CREATE STYLE=STREAM ID=%s DESTINATION=TRANSIENT SIGNATURE_TYPE=7 "
+ "inbound.quantity=1 outbound.quantity=1",
+ session_id));
m_private_key = DecodeI2PBase64(reply.Get("DESTINATION"));
} else {
@@ -396,7 +398,8 @@ void Session::CreateIfNotCreatedAlready()
const std::string& private_key_b64 = SwapBase64(EncodeBase64(m_private_key));
SendRequestAndGetReply(*sock,
- strprintf("SESSION CREATE STYLE=STREAM ID=%s DESTINATION=%s",
+ strprintf("SESSION CREATE STYLE=STREAM ID=%s DESTINATION=%s "
+ "inbound.quantity=3 outbound.quantity=3",
session_id,
private_key_b64));
}
@@ -408,7 +411,7 @@ void Session::CreateIfNotCreatedAlready()
Log("%s SAM session %s created, my address=%s",
Capitalize(session_type),
m_session_id,
- m_my_addr.ToString());
+ m_my_addr.ToStringAddrPort());
}
std::unique_ptr<Sock> Session::StreamAccept()
diff --git a/src/index/base.cpp b/src/index/base.cpp
index a8b8cbe8a9..6f2ce2efe4 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -8,6 +8,7 @@
#include <kernel/chain.h>
#include <node/blockstorage.h>
#include <node/context.h>
+#include <node/database_args.h>
#include <node/interface_ui.h>
#include <shutdown.h>
#include <tinyformat.h>
@@ -48,7 +49,13 @@ CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
}
BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
- CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate)
+ CDBWrapper{DBParams{
+ .path = path,
+ .cache_bytes = n_cache_size,
+ .memory_only = f_memory,
+ .wipe_data = f_wipe,
+ .obfuscate = f_obfuscate,
+ .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
{}
bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
@@ -415,8 +422,9 @@ IndexSummary BaseIndex::GetSummary() const
return summary;
}
-void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) {
- assert(!node::fPruneMode || AllowPrune());
+void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
+{
+ assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
if (AllowPrune() && block) {
node::PruneLockInfo prune_lock;
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 07b4cdc06b..59bf6d34cf 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -157,9 +157,7 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256&
std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
- uint256 result;
- CHash256().Write(encoded_filter).Finalize(result);
- if (result != hash) return error("Checksum mismatch in filter decode.");
+ if (Hash(encoded_filter) != hash) return error("Checksum mismatch in filter decode.");
filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true);
}
catch (const std::exception& e) {
diff --git a/src/init.cpp b/src/init.cpp
index 5160718eaa..4e06d44cb0 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -191,8 +191,24 @@ static fs::path GetPidFile(const ArgsManager& args)
// shutdown thing.
//
+#if HAVE_SYSTEM
+static void ShutdownNotify(const ArgsManager& args)
+{
+ std::vector<std::thread> threads;
+ for (const auto& cmd : args.GetArgs("-shutdownnotify")) {
+ threads.emplace_back(runCommand, cmd);
+ }
+ for (auto& t : threads) {
+ t.join();
+ }
+}
+#endif
+
void Interrupt(NodeContext& node)
{
+#if HAVE_SYSTEM
+ ShutdownNotify(*node.args);
+#endif
InterruptHTTPServer();
InterruptHTTPRPC();
InterruptRPC();
@@ -441,11 +457,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#if HAVE_SYSTEM
argsman.AddArg("-startupnotify=<cmd>", "Execute command on startup.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
-#endif
-#ifndef WIN32
- argsman.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
-#else
- hidden_args.emplace_back("-sysperms");
+ argsman.AddArg("-shutdownnotify=<cmd>", "Execute command immediately before beginning shutdown. The need for shutdown may be urgent, so be careful not to delay it long (if the command doesn't require interaction with the server, consider having it fork into the background).", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-blockfilterindex=<type>",
@@ -586,7 +598,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
argsman.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
argsman.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
argsman.AddArg("-rpcdoccheck", strprintf("Throw a non-fatal error at runtime if the documentation for an RPC is incorrect (default: %u)", DEFAULT_RPC_DOC_CHECK), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
@@ -719,10 +731,13 @@ void InitParameterInteraction(ArgsManager& args)
LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
}
- // disable whitelistrelay in blocksonly mode
if (args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
+ // disable whitelistrelay in blocksonly mode
if (args.SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
+ // Reduce default mempool size in blocksonly mode to avoid unexpected resource usage
+ if (args.SoftSetArg("-maxmempool", ToString(DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB)))
+ LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -maxmempool=%d\n", __func__, DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB);
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
@@ -801,10 +816,6 @@ bool AppInitBasicSetup(const ArgsManager& args)
}
#ifndef WIN32
- if (!args.GetBoolArg("-sysperms", false)) {
- umask(077);
- }
-
// Clean shutdown on SIGTERM
registerSignalHandler(SIGTERM, HandleSIGTERM);
registerSignalHandler(SIGINT, HandleSIGTERM);
@@ -1035,6 +1046,7 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
{
ChainstateManager::Options chainman_opts_dummy{
.chainparams = chainparams,
+ .datadir = args.GetDataDirNet(),
};
if (const auto error{ApplyArgsManOptions(args, chainman_opts_dummy)}) {
return InitError(*error);
@@ -1433,6 +1445,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
bool fReindexChainState = args.GetBoolArg("-reindex-chainstate", false);
ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
+ .datadir = args.GetDataDirNet(),
.adjusted_time_callback = GetAdjustedTime,
};
Assert(!ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
@@ -1479,9 +1492,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
options.mempool = Assert(node.mempool.get());
options.reindex = node::fReindex;
options.reindex_chainstate = fReindexChainState;
- options.prune = node::fPruneMode;
+ options.prune = chainman.m_blockman.IsPruneMode();
options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
+ options.require_full_verification = args.IsArgSet("-checkblocks") || args.IsArgSet("-checklevel");
options.check_interrupt = ShutdownRequested;
options.coins_error_cb = [] {
uiInterface.ThreadSafeMessageBox(
@@ -1513,7 +1527,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}
- if (status == node::ChainstateLoadStatus::FAILURE_INCOMPATIBLE_DB) {
+ if (status == node::ChainstateLoadStatus::FAILURE_INCOMPATIBLE_DB || status == node::ChainstateLoadStatus::FAILURE_INSUFFICIENT_DBCACHE) {
return InitError(error);
}
@@ -1589,7 +1603,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// if pruning, perform the initial blockstore prune
// after any wallet rescanning has taken place.
- if (fPruneMode) {
+ if (chainman.m_blockman.IsPruneMode()) {
if (!fReindex) {
LOCK(cs_main);
for (Chainstate* chainstate : chainman.GetAll()) {
@@ -1617,8 +1631,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// On first startup, warn on low block storage space
if (!fReindex && !fReindexChainState && chain_active_height <= 1) {
- uint64_t additional_bytes_needed = fPruneMode ? nPruneTarget
- : chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024;
+ uint64_t additional_bytes_needed{
+ chainman.m_blockman.IsPruneMode() ?
+ chainman.m_blockman.GetPruneTarget() :
+ chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024};
if (!CheckDiskSpace(args.GetBlocksDirPath(), additional_bytes_needed)) {
InitWarning(strprintf(_(
@@ -1785,7 +1801,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
if (connOptions.onion_binds.size() > 1) {
InitWarning(strprintf(_("More than one onion bind address is provided. Using %s "
"for the automatically created Tor onion service."),
- onion_service_target.ToStringIPPort()));
+ onion_service_target.ToStringAddrPort()));
}
StartTorControl(onion_service_target);
}
@@ -1812,6 +1828,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
if (connect.size() != 1 || connect[0] != "0") {
connOptions.m_specified_outgoing = connect;
}
+ if (!connOptions.m_specified_outgoing.empty() && !connOptions.vSeedNodes.empty()) {
+ LogPrintf("-seednode is ignored when -connect is used\n");
+ }
+
+ if (args.IsArgSet("-dnsseed") && args.GetBoolArg("-dnsseed", DEFAULT_DNSSEED) && args.IsArgSet("-proxy")) {
+ LogPrintf("-dnsseed is ignored when -connect is used and -proxy is specified\n");
+ }
}
const std::string& i2psam_arg = args.GetArg("-i2psam", "");
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index ce6c44e2bc..7e87d5a523 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -177,11 +177,8 @@ public:
//! Is initial block download.
virtual bool isInitialBlockDownload() = 0;
- //! Get reindex.
- virtual bool getReindex() = 0;
-
- //! Get importing.
- virtual bool getImporting() = 0;
+ //! Is loading blocks.
+ virtual bool isLoadingBlocks() = 0;
//! Set network active.
virtual void setNetworkActive(bool active) = 0;
diff --git a/src/kernel/chainstatemanager_opts.h b/src/kernel/chainstatemanager_opts.h
index 226bb6031e..2395f60164 100644
--- a/src/kernel/chainstatemanager_opts.h
+++ b/src/kernel/chainstatemanager_opts.h
@@ -6,6 +6,8 @@
#define BITCOIN_KERNEL_CHAINSTATEMANAGER_OPTS_H
#include <arith_uint256.h>
+#include <dbwrapper.h>
+#include <txdb.h>
#include <uint256.h>
#include <util/time.h>
@@ -27,6 +29,7 @@ namespace kernel {
*/
struct ChainstateManagerOpts {
const CChainParams& chainparams;
+ fs::path datadir;
const std::function<NodeClock::time_point()> adjusted_time_callback{nullptr};
std::optional<bool> check_block_index{};
bool checkpoints_enabled{DEFAULT_CHECKPOINTS_ENABLED};
@@ -36,6 +39,9 @@ struct ChainstateManagerOpts {
std::optional<uint256> assumed_valid_block{};
//! If the tip is older than this, the node is considered to be in initial block download.
std::chrono::seconds max_tip_age{DEFAULT_MAX_TIP_AGE};
+ DBOptions block_tree_db{};
+ DBOptions coins_db{};
+ CoinsViewOptions coins_view{};
};
} // namespace kernel
diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp
index 06a4b8c974..82d7d8c46b 100644
--- a/src/kernel/coinstats.cpp
+++ b/src/kernel/coinstats.cpp
@@ -48,8 +48,9 @@ uint64_t GetBogoSize(const CScript& script_pub_key)
script_pub_key.size() /* scriptPubKey */;
}
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin) {
- CDataStream ss(SER_DISK, PROTOCOL_VERSION);
+DataStream TxOutSer(const COutPoint& outpoint, const Coin& coin)
+{
+ DataStream ss{};
ss << outpoint;
ss << static_cast<uint32_t>(coin.nHeight * 2 + coin.fCoinBase);
ss << coin.out;
diff --git a/src/kernel/coinstats.h b/src/kernel/coinstats.h
index b7c1328e93..54d0e4f664 100644
--- a/src/kernel/coinstats.h
+++ b/src/kernel/coinstats.h
@@ -72,7 +72,7 @@ struct CCoinsStats {
uint64_t GetBogoSize(const CScript& script_pub_key);
-CDataStream TxOutSer(const COutPoint& outpoint, const Coin& coin);
+DataStream TxOutSer(const COutPoint& outpoint, const Coin& coin);
std::optional<CCoinsStats> ComputeUTXOStats(CoinStatsHashType hash_type, CCoinsView* view, node::BlockManager& blockman, const std::function<void()>& interruption_point = {});
} // namespace kernel
diff --git a/src/kernel/cs_main.cpp b/src/kernel/cs_main.cpp
index c3a08c9695..d27cb7caf3 100644
--- a/src/kernel/cs_main.cpp
+++ b/src/kernel/cs_main.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <kernel/cs_main.h>
#include <sync.h>
RecursiveMutex cs_main;
diff --git a/src/kernel/mempool_options.h b/src/kernel/mempool_options.h
index dad6f14c39..beb5fca5e9 100644
--- a/src/kernel/mempool_options.h
+++ b/src/kernel/mempool_options.h
@@ -18,6 +18,8 @@ class CBlockPolicyEstimator;
/** Default for -maxmempool, maximum megabytes of mempool memory usage */
static constexpr unsigned int DEFAULT_MAX_MEMPOOL_SIZE_MB{300};
+/** Default for -maxmempool when blocksonly is set */
+static constexpr unsigned int DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB{5};
/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
static constexpr unsigned int DEFAULT_MEMPOOL_EXPIRY_HOURS{336};
/** Default for -mempoolfullrbf, if the transaction replaceability signaling is ignored */
diff --git a/src/key.cpp b/src/key.cpp
index 33913ed461..3a3f0b2bc2 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -245,8 +245,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const {
unsigned char rnd[8];
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd);
- uint256 hash;
- CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
+ uint256 hash{Hash(str, rnd)};
std::vector<unsigned char> vchSig;
Sign(hash, vchSig);
return pubkey.Verify(hash, vchSig);
diff --git a/src/key.h b/src/key.h
index d5b662c6fc..4e092fffea 100644
--- a/src/key.h
+++ b/src/key.h
@@ -42,10 +42,10 @@ public:
private:
//! Whether this private key is valid. We check for correctness when modifying the key
//! data, so fValid should always correspond to the actual state.
- bool fValid;
+ bool fValid{false};
//! Whether the public key corresponding to this private key is (to be) compressed.
- bool fCompressed;
+ bool fCompressed{false};
//! The actual byte data
std::vector<unsigned char, secure_allocator<unsigned char> > keydata;
@@ -55,7 +55,7 @@ private:
public:
//! Construct an invalid private key.
- CKey() : fValid(false), fCompressed(false)
+ CKey()
{
// Important: vch must be 32 bytes in length to not break serialization
keydata.resize(32);
diff --git a/src/logging/timer.h b/src/logging/timer.h
index ea0821dede..993ba99c25 100644
--- a/src/logging/timer.h
+++ b/src/logging/timer.h
@@ -12,6 +12,7 @@
#include <util/types.h>
#include <chrono>
+#include <optional>
#include <string>
@@ -28,14 +29,14 @@ public:
std::string prefix,
std::string end_msg,
BCLog::LogFlags log_category = BCLog::LogFlags::ALL,
- bool msg_on_completion = true) :
- m_prefix(std::move(prefix)),
- m_title(std::move(end_msg)),
- m_log_category(log_category),
- m_message_on_completion(msg_on_completion)
+ bool msg_on_completion = true)
+ : m_prefix(std::move(prefix)),
+ m_title(std::move(end_msg)),
+ m_log_category(log_category),
+ m_message_on_completion(msg_on_completion)
{
this->Log(strprintf("%s started", m_title));
- m_start_t = GetTime<std::chrono::microseconds>();
+ m_start_t = std::chrono::steady_clock::now();
}
~Timer()
@@ -60,24 +61,25 @@ public:
std::string LogMsg(const std::string& msg)
{
- const auto end_time = GetTime<std::chrono::microseconds>() - m_start_t;
- if (m_start_t.count() <= 0) {
+ const auto end_time{std::chrono::steady_clock::now()};
+ if (!m_start_t) {
return strprintf("%s: %s", m_prefix, msg);
}
+ const auto duration{end_time - *m_start_t};
if constexpr (std::is_same<TimeType, std::chrono::microseconds>::value) {
- return strprintf("%s: %s (%iμs)", m_prefix, msg, end_time.count());
+ return strprintf("%s: %s (%iμs)", m_prefix, msg, Ticks<std::chrono::microseconds>(duration));
} else if constexpr (std::is_same<TimeType, std::chrono::milliseconds>::value) {
- return strprintf("%s: %s (%.2fms)", m_prefix, msg, end_time.count() * 0.001);
+ return strprintf("%s: %s (%.2fms)", m_prefix, msg, Ticks<MillisecondsDouble>(duration));
} else if constexpr (std::is_same<TimeType, std::chrono::seconds>::value) {
- return strprintf("%s: %s (%.2fs)", m_prefix, msg, end_time.count() * 0.000001);
+ return strprintf("%s: %s (%.2fs)", m_prefix, msg, Ticks<SecondsDouble>(duration));
} else {
static_assert(ALWAYS_FALSE<TimeType>, "Error: unexpected time type");
}
}
private:
- std::chrono::microseconds m_start_t{};
+ std::optional<std::chrono::steady_clock::time_point> m_start_t{};
//! Log prefix; usually the name of the function this was created in.
const std::string m_prefix;
diff --git a/src/mapport.cpp b/src/mapport.cpp
index e6a473c185..994fd12cf5 100644
--- a/src/mapport.cpp
+++ b/src/mapport.cpp
@@ -27,9 +27,9 @@
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
-// The minimum supported miniUPnPc API version is set to 10. This keeps compatibility
-// with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages.
-static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed");
+// The minimum supported miniUPnPc API version is set to 17. This excludes
+// versions with known vulnerabilities.
+static_assert(MINIUPNPC_API_VERSION >= 17, "miniUPnPc API version >= 17 assumed");
#endif // USE_UPNP
#include <atomic>
@@ -104,7 +104,7 @@ static bool NatpmpMapping(natpmp_t* natpmp, const struct in_addr& external_ipv4_
AddLocal(external, LOCAL_MAPPED);
external_ip_discovered = true;
}
- LogPrintf("natpmp: Port mapping successful. External address = %s\n", external.ToString());
+ LogPrintf("natpmp: Port mapping successful. External address = %s\n", external.ToStringAddrPort());
return true;
} else {
LogPrintf("natpmp: Port mapping failed.\n");
@@ -159,11 +159,7 @@ static bool ProcessUpnp()
char lanaddr[64];
int error = 0;
-#if MINIUPNPC_API_VERSION < 14
- devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
-#else
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
-#endif
struct UPNPUrls urls;
struct IGDdatas data;
@@ -181,7 +177,7 @@ static bool ProcessUpnp()
if (externalIPAddress[0]) {
CNetAddr resolved;
if (LookupHost(externalIPAddress, resolved, false)) {
- LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString());
+ LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToStringAddr());
AddLocal(resolved, LOCAL_MAPPED);
}
} else {
diff --git a/src/mapport.h b/src/mapport.h
index 279d65167f..6f55c46f6c 100644
--- a/src/mapport.h
+++ b/src/mapport.h
@@ -5,17 +5,9 @@
#ifndef BITCOIN_MAPPORT_H
#define BITCOIN_MAPPORT_H
-#ifdef USE_UPNP
-static constexpr bool DEFAULT_UPNP = USE_UPNP;
-#else
static constexpr bool DEFAULT_UPNP = false;
-#endif // USE_UPNP
-#ifdef USE_NATPMP
-static constexpr bool DEFAULT_NATPMP = USE_NATPMP;
-#else
static constexpr bool DEFAULT_NATPMP = false;
-#endif // USE_NATPMP
enum MapPortProtoFlag : unsigned int {
NONE = 0x00,
diff --git a/src/net.cpp b/src/net.cpp
index 960d0ee841..4e4f2f78be 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -196,7 +196,7 @@ static std::vector<CAddress> ConvertSeeds(const std::vector<uint8_t> &vSeedsIn)
s >> endpoint;
CAddress addr{endpoint, GetDesirableServiceFlags(NODE_NONE)};
addr.nTime = rng.rand_uniform_delay(Now<NodeSeconds>() - one_week, -one_week);
- LogPrint(BCLog::NET, "Added hardcoded seed: %s\n", addr.ToString());
+ LogPrint(BCLog::NET, "Added hardcoded seed: %s\n", addr.ToStringAddrPort());
vSeedsOut.push_back(addr);
}
return vSeedsOut;
@@ -258,7 +258,7 @@ std::optional<CService> GetLocalAddrForPeer(CNode& node)
}
if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false))
{
- LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToString(), node.GetId());
+ LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToStringAddrPort(), node.GetId());
return addrLocal;
}
// Address is unroutable. Don't advertise.
@@ -295,7 +295,7 @@ bool AddLocal(const CService& addr_, int nScore)
if (!IsReachable(addr))
return false;
- LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
+ LogPrintf("AddLocal(%s,%i)\n", addr.ToStringAddrPort(), nScore);
{
LOCK(g_maplocalhost_mutex);
@@ -318,7 +318,7 @@ bool AddLocal(const CNetAddr &addr, int nScore)
void RemoveLocal(const CService& addr)
{
LOCK(g_maplocalhost_mutex);
- LogPrintf("RemoveLocal(%s)\n", addr.ToString());
+ LogPrintf("RemoveLocal(%s)\n", addr.ToStringAddrPort());
mapLocalHost.erase(addr);
}
@@ -405,7 +405,7 @@ CNode* CConnman::FindNode(const CService& addr)
bool CConnman::AlreadyConnectedToAddress(const CAddress& addr)
{
- return FindNode(static_cast<CNetAddr>(addr)) || FindNode(addr.ToStringIPPort());
+ return FindNode(static_cast<CNetAddr>(addr)) || FindNode(addr.ToStringAddrPort());
}
bool CConnman::CheckIncomingNonce(uint64_t nonce)
@@ -436,6 +436,7 @@ static CAddress GetBindAddress(const Sock& sock)
CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type)
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
assert(conn_type != ConnectionType::INBOUND);
if (pszDest == nullptr) {
@@ -452,7 +453,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
}
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying connection %s lastseen=%.1fhrs\n",
- pszDest ? pszDest : addrConnect.ToString(),
+ pszDest ? pszDest : addrConnect.ToStringAddrPort(),
Ticks<HoursDouble>(pszDest ? 0h : Now<NodeSeconds>() - addrConnect.nTime));
// Resolve
@@ -464,7 +465,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
const CService rnd{resolved[GetRand(resolved.size())]};
addrConnect = CAddress{MaybeFlipIPv6toCJDNS(rnd), NODE_NONE};
if (!addrConnect.IsValid()) {
- LogPrint(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToString(), pszDest);
+ LogPrint(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToStringAddrPort(), pszDest);
return nullptr;
}
// It is possible that we already have a connection to the IP/port pszDest resolved to.
@@ -496,8 +497,23 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (m_i2p_sam_session) {
connected = m_i2p_sam_session->Connect(addrConnect, conn, proxyConnectionFailed);
} else {
- i2p_transient_session = std::make_unique<i2p::sam::Session>(proxy.proxy, &interruptNet);
+ {
+ LOCK(m_unused_i2p_sessions_mutex);
+ if (m_unused_i2p_sessions.empty()) {
+ i2p_transient_session =
+ std::make_unique<i2p::sam::Session>(proxy.proxy, &interruptNet);
+ } else {
+ i2p_transient_session.swap(m_unused_i2p_sessions.front());
+ m_unused_i2p_sessions.pop();
+ }
+ }
connected = i2p_transient_session->Connect(addrConnect, conn, proxyConnectionFailed);
+ if (!connected) {
+ LOCK(m_unused_i2p_sessions_mutex);
+ if (m_unused_i2p_sessions.size() < MAX_UNUSED_I2P_SESSIONS_SIZE) {
+ m_unused_i2p_sessions.emplace(i2p_transient_session.release());
+ }
+ }
}
if (connected) {
@@ -509,7 +525,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (!sock) {
return nullptr;
}
- connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(),
+ connected = ConnectThroughProxy(proxy, addrConnect.ToStringAddr(), addrConnect.GetPort(),
*sock, nConnectTimeout, proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
@@ -593,7 +609,7 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) {
AssertLockNotHeld(m_addr_local_mutex);
LOCK(m_addr_local_mutex);
if (addrLocal.IsValid()) {
- error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToString(), addrLocalIn.ToString());
+ error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort());
} else {
addrLocal = addrLocalIn;
}
@@ -644,7 +660,7 @@ void CNode::CopyStats(CNodeStats& stats)
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
- stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : "";
+ stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToStringAddrPort() : "";
X(m_conn_type);
}
@@ -825,7 +841,13 @@ size_t CConnman::SocketSendData(CNode& node) const
if (!node.m_sock) {
break;
}
- nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
+ int flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+#ifdef MSG_MORE
+ if (it + 1 != node.vSendMsg.end()) {
+ flags |= MSG_MORE;
+ }
+#endif
+ nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, flags);
}
if (nBytes > 0) {
node.m_last_send = GetTime<std::chrono::seconds>();
@@ -967,12 +989,12 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
}
if (!fNetworkActive) {
- LogPrint(BCLog::NET, "connection from %s dropped: not accepting new connections\n", addr.ToString());
+ LogPrint(BCLog::NET, "connection from %s dropped: not accepting new connections\n", addr.ToStringAddrPort());
return;
}
if (!sock->IsSelectable()) {
- LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString());
+ LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToStringAddrPort());
return;
}
@@ -981,14 +1003,14 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
const int on{1};
if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
LogPrint(BCLog::NET, "connection from %s: unable to set TCP_NODELAY, continuing anyway\n",
- addr.ToString());
+ addr.ToStringAddrPort());
}
// Don't accept connections from banned peers.
bool banned = m_banman && m_banman->IsBanned(addr);
if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && banned)
{
- LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToString());
+ LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToStringAddrPort());
return;
}
@@ -996,7 +1018,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
bool discouraged = m_banman && m_banman->IsDiscouraged(addr);
if (!NetPermissions::HasFlag(permission_flags, NetPermissionFlags::NoBan) && nInbound + 1 >= nMaxInbound && discouraged)
{
- LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToString());
+ LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToStringAddrPort());
return;
}
@@ -1034,7 +1056,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
pnode->AddRef();
m_msgproc->InitializeNode(*pnode, nodeServices);
- LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
+ LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToStringAddrPort());
{
LOCK(m_nodes_mutex);
@@ -1047,6 +1069,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
bool CConnman::AddConnection(const std::string& address, ConnectionType conn_type)
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
std::optional<int> max_connections;
switch (conn_type) {
case ConnectionType::INBOUND:
@@ -1305,15 +1328,14 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
RecordBytesRecv(nBytes);
if (notify) {
size_t nSizeAdded = 0;
- auto it(pnode->vRecvMsg.begin());
- for (; it != pnode->vRecvMsg.end(); ++it) {
+ for (const auto& msg : pnode->vRecvMsg) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message are held by TransportDeserializer
- nSizeAdded += it->m_raw_message_size;
+ nSizeAdded += msg.m_raw_message_size;
}
{
LOCK(pnode->cs_vProcessMsg);
- pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it);
+ pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg);
pnode->nProcessQueueSize += nSizeAdded;
pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize;
}
@@ -1399,7 +1421,7 @@ void CConnman::ThreadDNSAddressSeed()
if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) {
// When -forcednsseed is provided, query all.
seeds_right_now = seeds.size();
- } else if (addrman.size() == 0) {
+ } else if (addrman.Size() == 0) {
// If we have no known peers, query all.
// This will occur on the first run, or if peers.dat has been
// deleted.
@@ -1418,13 +1440,13 @@ void CConnman::ThreadDNSAddressSeed()
// * If we continue having problems, eventually query all the
// DNS seeds, and if that fails too, also try the fixed seeds.
// (done in ThreadOpenConnections)
- const std::chrono::seconds seeds_wait_time = (addrman.size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS);
+ const std::chrono::seconds seeds_wait_time = (addrman.Size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS);
for (const std::string& seed : seeds) {
if (seeds_right_now == 0) {
seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE;
- if (addrman.size() > 0) {
+ if (addrman.Size() > 0) {
LogPrintf("Waiting %d seconds before querying DNS seeds.\n", seeds_wait_time.count());
std::chrono::seconds to_wait = seeds_wait_time;
while (to_wait.count() > 0) {
@@ -1466,6 +1488,8 @@ void CConnman::ThreadDNSAddressSeed()
}
LogPrintf("Loading addresses from DNS seed %s\n", seed);
+ // If -proxy is in use, we make an ADDR_FETCH connection to the DNS resolved peer address
+ // for the base dns seed domain in chainparams
if (HaveNameProxy()) {
AddAddrFetch(seed);
} else {
@@ -1487,8 +1511,9 @@ void CConnman::ThreadDNSAddressSeed()
}
addrman.Add(vAdd, resolveSource);
} else {
- // We now avoid directly using results from DNS Seeds which do not support service bit filtering,
- // instead using them as a addrfetch to get nodes with our desired service bits.
+ // If the seed does not support a subdomain with our desired service bits,
+ // we make an ADDR_FETCH connection to the DNS resolved peer address for the
+ // base dns seed domain in chainparams
AddAddrFetch(seed);
}
}
@@ -1504,11 +1529,12 @@ void CConnman::DumpAddresses()
DumpPeerAddresses(::gArgs, addrman);
LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n",
- addrman.size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
+ addrman.Size(), Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
}
void CConnman::ProcessAddrFetch()
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
std::string strDest;
{
LOCK(m_addr_fetches_mutex);
@@ -1575,8 +1601,22 @@ int CConnman::GetExtraBlockRelayCount() const
return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
}
+std::unordered_set<Network> CConnman::GetReachableEmptyNetworks() const
+{
+ std::unordered_set<Network> networks{};
+ for (int n = 0; n < NET_MAX; n++) {
+ enum Network net = (enum Network)n;
+ if (net == NET_UNROUTABLE || net == NET_INTERNAL) continue;
+ if (IsReachable(net) && addrman.Size(net, std::nullopt) == 0) {
+ networks.insert(net);
+ }
+ }
+ return networks;
+}
+
void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
SetSyscallSandboxPolicy(SyscallSandboxPolicy::NET_OPEN_CONNECTION);
FastRandomContext rng;
// Connect to specific addresses
@@ -1584,7 +1624,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
{
for (int64_t nLoop = 0;; nLoop++)
{
- ProcessAddrFetch();
for (const std::string& strAddr : connect)
{
CAddress addr(CService(), NODE_NONE);
@@ -1624,7 +1663,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
if (interruptNet)
return;
- if (add_fixed_seeds && addrman.size() == 0) {
+ const std::unordered_set<Network> fixed_seed_networks{GetReachableEmptyNetworks()};
+ if (add_fixed_seeds && !fixed_seed_networks.empty()) {
// When the node starts with an empty peers.dat, there are a few other sources of peers before
// we fallback on to fixed seeds: -dnsseed, -seednode, -addnode
// If none of those are available, we fallback on to fixed seeds immediately, else we allow
@@ -1633,7 +1673,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// It is cheapest to check if enough time has passed first.
if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) {
add_fixed_seeds_now = true;
- LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty\n");
+ LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty for at least one reachable network\n");
}
// Checking !dnsseed is cheaper before locking 2 mutexes.
@@ -1650,14 +1690,12 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// We will not make outgoing connections to peers that are unreachable
// (e.g. because of -onlynet configuration).
// Therefore, we do not add them to addrman in the first place.
- // Note that if you change -onlynet setting from one network to another,
- // peers.dat will contain only peers of unreachable networks and
- // manual intervention will be needed (either delete peers.dat after
- // configuration change or manually add some reachable peer using addnode),
- // see <https://github.com/bitcoin/bitcoin/issues/26035> for details.
+ // In case previously unreachable networks become reachable
+ // (e.g. in case of -onlynet changes by the user), fixed seeds will
+ // be loaded only for networks for which we have no addresses.
seed_addrs.erase(std::remove_if(seed_addrs.begin(), seed_addrs.end(),
- [](const CAddress& addr) { return !IsReachable(addr); }),
- seed_addrs.end());
+ [&fixed_seed_networks](const CAddress& addr) { return fixed_seed_networks.count(addr.GetNetwork()) == 0; }),
+ seed_addrs.end());
CNetAddr local;
local.SetInternal("fixedseeds");
addrman.Add(seed_addrs, local);
@@ -1771,7 +1809,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
!HasAllDesirableServiceFlags(addr.nServices) ||
setConnected.count(m_netgroupman.GetGroup(addr))) continue;
addrConnect = addr;
- LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToString());
+ LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToStringAddrPort());
break;
}
@@ -1851,7 +1889,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
if (!interruptNet.sleep_for(rng.rand_uniform_duration<CThreadInterrupt::Clock>(FEELER_SLEEP_WINDOW))) {
return;
}
- LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString());
+ LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToStringAddrPort());
}
OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type);
@@ -1928,6 +1966,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const
void CConnman::ThreadOpenAddedConnections()
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
SetSyscallSandboxPolicy(SyscallSandboxPolicy::NET_ADD_CONNECTION);
while (true)
{
@@ -1957,6 +1996,7 @@ void CConnman::ThreadOpenAddedConnections()
// if successful, this moves the passed grant to the constructed node
void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type)
{
+ AssertLockNotHeld(m_unused_i2p_sessions_mutex);
assert(conn_type != ConnectionType::INBOUND);
//
@@ -2080,7 +2120,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len))
{
- strError = strprintf(Untranslated("Bind address family for %s not supported"), addrBind.ToString());
+ strError = strprintf(Untranslated("Bind address family for %s not supported"), addrBind.ToStringAddrPort());
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
return false;
}
@@ -2120,13 +2160,13 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
if (sock->Bind(reinterpret_cast<struct sockaddr*>(&sockaddr), len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE)
- strError = strprintf(_("Unable to bind to %s on this computer. %s is probably already running."), addrBind.ToString(), PACKAGE_NAME);
+ strError = strprintf(_("Unable to bind to %s on this computer. %s is probably already running."), addrBind.ToStringAddrPort(), PACKAGE_NAME);
else
- strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr));
+ strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToStringAddrPort(), NetworkErrorString(nErr));
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
return false;
}
- LogPrintf("Bound to %s\n", addrBind.ToString());
+ LogPrintf("Bound to %s\n", addrBind.ToStringAddrPort());
// Listen for incoming connections
if (sock->Listen(SOMAXCONN) == SOCKET_ERROR)
@@ -2156,7 +2196,7 @@ void Discover()
for (const CNetAddr &addr : vaddr)
{
if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString());
+ LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToStringAddr());
}
}
}
@@ -2176,14 +2216,14 @@ void Discover()
struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
+ LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
}
else if (ifa->ifa_addr->sa_family == AF_INET6)
{
struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString());
+ LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
}
}
freeifaddrs(myaddrs);
@@ -2742,7 +2782,7 @@ CNode::CNode(NodeId idIn,
m_connected{GetTime<std::chrono::seconds>()},
addr{addrIn},
addrBind{addrBindIn},
- m_addr_name{addrNameIn.empty() ? addr.ToStringIPPort() : addrNameIn},
+ m_addr_name{addrNameIn.empty() ? addr.ToStringAddrPort() : addrNameIn},
m_inbound_onion{inbound_onion},
m_prefer_evict{node_opts.prefer_evict},
nKeyedNetGroup{nKeyedNetGroupIn},
@@ -2848,7 +2888,7 @@ void CaptureMessageToFile(const CAddress& addr,
auto now = GetTime<std::chrono::microseconds>();
// Windows folder names cannot include a colon
- std::string clean_addr = addr.ToString();
+ std::string clean_addr = addr.ToStringAddrPort();
std::replace(clean_addr.begin(), clean_addr.end(), ':', '_');
fs::path base_path = gArgs.GetDataDirNet() / "message_capture" / fs::u8path(clean_addr);
diff --git a/src/net.h b/src/net.h
index 31d17ea76c..2025dfdb05 100644
--- a/src/net.h
+++ b/src/net.h
@@ -38,7 +38,9 @@
#include <map>
#include <memory>
#include <optional>
+#include <queue>
#include <thread>
+#include <unordered_set>
#include <vector>
class AddrMan;
@@ -743,7 +745,7 @@ public:
bool GetNetworkActive() const { return fNetworkActive; };
bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; };
void SetNetworkActive(bool active);
- void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type);
+ void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
bool CheckIncomingNonce(uint64_t nonce);
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
@@ -819,7 +821,7 @@ public:
* - Max total outbound connection capacity filled
* - Max connection capacity for type is filled
*/
- bool AddConnection(const std::string& address, ConnectionType conn_type);
+ bool AddConnection(const std::string& address, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
size_t GetNodeCount(ConnectionDirection) const;
void GetNodeStats(std::vector<CNodeStats>& vstats) const;
@@ -885,10 +887,10 @@ private:
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
bool InitBinds(const Options& options);
- void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
+ void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex);
void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex);
- void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex);
- void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex);
+ void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_unused_i2p_sessions_mutex);
+ void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex);
void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void ThreadI2PAcceptIncoming();
void AcceptConnection(const ListenSocket& hListenSocket);
@@ -955,7 +957,7 @@ private:
bool AlreadyConnectedToAddress(const CAddress& addr);
bool AttemptToEvictConnection();
- CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type);
+ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const;
void DeleteNode(CNode* pnode);
@@ -970,6 +972,12 @@ private:
void RecordBytesSent(uint64_t bytes) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
/**
+ Return reachable networks for which we have no addresses in addrman and therefore
+ may require loading fixed seeds.
+ */
+ std::unordered_set<Network> GetReachableEmptyNetworks() const;
+
+ /**
* Return vector of current BLOCK_RELAY peers.
*/
std::vector<CAddress> GetCurrentBlockRelayOnlyConns() const;
@@ -1127,6 +1135,26 @@ private:
std::vector<CService> m_onion_binds;
/**
+ * Mutex protecting m_i2p_sam_sessions.
+ */
+ Mutex m_unused_i2p_sessions_mutex;
+
+ /**
+ * A pool of created I2P SAM transient sessions that should be used instead
+ * of creating new ones in order to reduce the load on the I2P network.
+ * Creating a session in I2P is not cheap, thus if this is not empty, then
+ * pick an entry from it instead of creating a new session. If connecting to
+ * a host fails, then the created session is put to this pool for reuse.
+ */
+ std::queue<std::unique_ptr<i2p::sam::Session>> m_unused_i2p_sessions GUARDED_BY(m_unused_i2p_sessions_mutex);
+
+ /**
+ * Cap on the size of `m_unused_i2p_sessions`, to ensure it does not
+ * unexpectedly use too much memory.
+ */
+ static constexpr size_t MAX_UNUSED_I2P_SESSIONS_SIZE{10};
+
+ /**
* RAII helper to atomically create a copy of `m_nodes` and add a reference
* to each of the nodes. The nodes are released when this object is destroyed.
*/
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 21a49bdebd..25c65c7090 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -53,9 +53,6 @@
using node::ReadBlockFromDisk;
using node::ReadRawBlockFromDisk;
-using node::fImporting;
-using node::fPruneMode;
-using node::fReindex;
/** How long to cache transactions in mapRelay for normal relay */
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
@@ -113,8 +110,11 @@ static constexpr auto GETDATA_TX_INTERVAL{60s};
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
-/** Time during which a peer must stall block download progress before being disconnected. */
-static constexpr auto BLOCK_STALLING_TIMEOUT{2s};
+/** Default time during which a peer must stall block download progress before being disconnected.
+ * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */
+static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
+/** Maximum timeout for stalling block download. */
+static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
@@ -584,14 +584,17 @@ private:
/**
* Reconsider orphan transactions after a parent has been accepted to the mempool.
*
- * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only one
- * orphan will be reconsidered on each call of this function. This set
- * may be added to if accepting an orphan causes its children to be
- * reconsidered.
- * @return True if there are still orphans in this peer's work set.
+ * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only
+ * one orphan will be reconsidered on each call of this function. If an
+ * accepted orphan has orphaned children, those will need to be
+ * reconsidered, creating more work, possibly for other peers.
+ * @return True if meaningful work was done (an orphan was accepted/rejected).
+ * If no meaningful work was done, then the work set for this peer
+ * will be empty.
*/
bool ProcessOrphanTx(Peer& peer)
- EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
+ EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
+
/** Process a single headers message from a peer.
*
* @param[in] pfrom CNode of the peer
@@ -771,6 +774,9 @@ private:
/** Number of preferable block download peers. */
int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
+ /** Stalling timeout for blocks in IBD */
+ std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
+
bool AlreadyHaveTx(const GenTxid& gtxid)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
@@ -1391,7 +1397,7 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
nonce, strSubVersion, nNodeStartingHeight, tx_relay));
if (fLogIPs) {
- LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(), tx_relay, nodeid);
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
} else {
LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
}
@@ -1730,8 +1736,7 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
{
- if (fImporting) return "Importing...";
- if (fReindex) return "Reindexing...";
+ if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
// Ensure this peer exists and hasn't been disconnected
PeerRef peer = GetPeerRef(peer_id);
@@ -1809,7 +1814,8 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
/**
* Evict orphan txn pool entries based on a newly connected
* block, remember the recently confirmed transactions, and delete tracked
- * announcements for them. Also save the time of the last tip update.
+ * announcements for them. Also save the time of the last tip update and
+ * possibly reduce dynamic block stalling timeout.
*/
void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
{
@@ -1832,6 +1838,16 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
}
}
+
+ // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
+ auto stalling_timeout = m_block_stalling_timeout.load();
+ Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
+ const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
+ LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
+ }
+ }
}
void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
@@ -2897,13 +2913,11 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
{
AssertLockHeld(g_msgproc_mutex);
- AssertLockHeld(cs_main);
+ LOCK(cs_main);
CTransactionRef porphanTx = nullptr;
- NodeId from_peer = -1;
- bool more = false;
- while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id, from_peer, more)) {
+ while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) {
const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
const TxValidationState& state = result.m_state;
const uint256& orphanHash = porphanTx->GetHash();
@@ -2911,20 +2925,20 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
- m_orphanage.AddChildrenToWorkSet(*porphanTx, peer.m_id);
+ m_orphanage.AddChildrenToWorkSet(*porphanTx);
m_orphanage.EraseTx(orphanHash);
for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
AddToCompactExtraTransactions(removedTx);
}
- break;
+ return true;
} else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
if (state.IsInvalid()) {
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
orphanHash.ToString(),
- from_peer,
+ peer.m_id,
state.ToString());
// Maybe punish peer that gave us an invalid orphan tx
- MaybePunishNodeForTx(from_peer, state);
+ MaybePunishNodeForTx(peer.m_id, state);
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee
@@ -2959,11 +2973,11 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
}
}
m_orphanage.EraseTx(orphanHash);
- break;
+ return true;
}
}
- return more;
+ return false;
}
bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
@@ -3217,7 +3231,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Disconnect if we connected to ourself
if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
{
- LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
+ LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
pfrom.fDisconnect = true;
return;
}
@@ -3344,11 +3358,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
std::string remoteAddr;
if (fLogIPs)
- remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
+ remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s\n",
cleanSubVer, pfrom.nVersion,
- peer->m_starting_height, addrMe.ToString(), fRelay, pfrom.GetId(),
+ peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
remoteAddr);
int64_t nTimeOffset = nTime - GetTime();
@@ -3361,7 +3375,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// If the peer is old enough to have the old alert system, send it the final alert.
if (greatest_common_version <= 70012) {
- CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
+ DataStream finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
}
@@ -3391,7 +3405,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!pfrom.IsInboundConn()) {
LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
pfrom.nVersion.load(), peer->m_starting_height,
- pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
+ pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
pfrom.ConnectionTypeAsString());
}
@@ -3679,7 +3693,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
UpdateBlockAvailability(pfrom.GetId(), inv.hash);
- if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) {
+ if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
// Headers-first is the primary method of announcement on
// the network. If a node fell back to sending blocks by
// inv, it may be for a re-org, or because we haven't
@@ -3812,8 +3826,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
- if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave))
- {
+ if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
@@ -3889,7 +3902,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
return;
}
@@ -4033,7 +4046,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
- m_orphanage.AddChildrenToWorkSet(tx, peer->m_id);
+ m_orphanage.AddChildrenToWorkSet(tx);
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
@@ -4045,9 +4058,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
AddToCompactExtraTransactions(removedTx);
}
-
- // Recursively process any orphan transactions that depended on this one
- ProcessOrphanTx(*peer);
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
@@ -4171,7 +4181,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::CMPCTBLOCK)
{
// Ignore cmpctblock received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4387,7 +4397,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::BLOCKTXN)
{
// Ignore blocktxn received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4462,7 +4472,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::HEADERS)
{
// Ignore headers received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4507,7 +4517,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (msg_type == NetMsgType::BLOCK)
{
// Ignore block received while importing
- if (fImporting || fReindex) {
+ if (m_chainman.m_blockman.LoadingBlocks()) {
LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
return;
}
@@ -4856,16 +4866,12 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
}
}
- bool has_more_orphans;
- {
- LOCK(cs_main);
- has_more_orphans = ProcessOrphanTx(*peer);
- }
+ const bool processed_orphan = ProcessOrphanTx(*peer);
if (pfrom->fDisconnect)
return false;
- if (has_more_orphans) return true;
+ if (processed_orphan) return true;
// this maintains the order of responses
// and prevents m_getdata_requests to grow unbounded
@@ -4911,6 +4917,12 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) fMoreWork = true;
}
+ // Does this peer has an orphan ready to reconsider?
+ // (Note: we may have provided a parent for an orphan provided
+ // by another peer that was already processed; in that case,
+ // the extra work may not be noticed, possibly resulting in an
+ // unnecessary 100ms delay)
+ if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true;
} catch (const std::exception& e) {
LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
} catch (...) {
@@ -5092,7 +5104,7 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
if (now > m_stale_tip_check_time) {
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer
- if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
+ if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
count_seconds(now - m_last_tip_update.load()));
m_connman.SetTryNewOutboundPeer(true);
@@ -5399,7 +5411,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}
}
- if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting && !fReindex) {
+ if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
const CBlockIndex* pindexStart = m_chainman.m_best_header;
@@ -5713,12 +5725,19 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
- if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
+ auto stalling_timeout = m_block_stalling_timeout.load();
+ if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
+ // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
+ // bandwidth is insufficient.
+ const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
+ if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
+ LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
+ }
return true;
}
// In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 782b692d30..85ae8fab36 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -599,7 +599,7 @@ std::string OnionToString(Span<const uint8_t> addr)
return EncodeBase32(address) + ".onion";
}
-std::string CNetAddr::ToStringIP() const
+std::string CNetAddr::ToStringAddr() const
{
switch (m_net) {
case NET_IPV4:
@@ -622,11 +622,6 @@ std::string CNetAddr::ToStringIP() const
assert(false);
}
-std::string CNetAddr::ToString() const
-{
- return ToStringIP();
-}
-
bool operator==(const CNetAddr& a, const CNetAddr& b)
{
return a.m_net == b.m_net && a.m_addr == b.m_addr;
@@ -916,25 +911,17 @@ std::vector<unsigned char> CService::GetKey() const
return key;
}
-std::string CService::ToStringPort() const
+std::string CService::ToStringAddrPort() const
{
- return strprintf("%u", port);
-}
+ const auto port_str = strprintf("%u", port);
-std::string CService::ToStringIPPort() const
-{
if (IsIPv4() || IsTor() || IsI2P() || IsInternal()) {
- return ToStringIP() + ":" + ToStringPort();
+ return ToStringAddr() + ":" + port_str;
} else {
- return "[" + ToStringIP() + "]:" + ToStringPort();
+ return "[" + ToStringAddr() + "]:" + port_str;
}
}
-std::string CService::ToString() const
-{
- return ToStringIPPort();
-}
-
CSubNet::CSubNet():
valid(false)
{
@@ -1098,7 +1085,7 @@ std::string CSubNet::ToString() const
break;
}
- return network.ToString() + suffix;
+ return network.ToStringAddr() + suffix;
}
bool CSubNet::IsValid() const
@@ -1106,29 +1093,6 @@ bool CSubNet::IsValid() const
return valid;
}
-bool CSubNet::SanityCheck() const
-{
- switch (network.m_net) {
- case NET_IPV4:
- case NET_IPV6:
- break;
- case NET_ONION:
- case NET_I2P:
- case NET_CJDNS:
- return true;
- case NET_INTERNAL:
- case NET_UNROUTABLE:
- case NET_MAX:
- return false;
- }
-
- for (size_t x = 0; x < network.m_addr.size(); ++x) {
- if (network.m_addr[x] & ~netmask[x]) return false;
- }
-
- return true;
-}
-
bool operator==(const CSubNet& a, const CSubNet& b)
{
return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16);
diff --git a/src/netaddress.h b/src/netaddress.h
index 7f782674d3..3d15b0b123 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -193,8 +193,7 @@ public:
bool IsAddrV1Compatible() const;
enum Network GetNetwork() const;
- std::string ToString() const;
- std::string ToStringIP() const;
+ std::string ToStringAddr() const;
bool GetInAddr(struct in_addr* pipv4Addr) const;
Network GetNetClass() const;
@@ -476,8 +475,6 @@ protected:
/// Is this value valid? (only used to signal parse errors)
bool valid;
- bool SanityCheck() const;
-
public:
/**
* Construct an invalid subnet (empty, `Match()` always returns false).
@@ -536,9 +533,7 @@ public:
friend bool operator!=(const CService& a, const CService& b) { return !(a == b); }
friend bool operator<(const CService& a, const CService& b);
std::vector<unsigned char> GetKey() const;
- std::string ToString() const;
- std::string ToStringPort() const;
- std::string ToStringIPPort() const;
+ std::string ToStringAddrPort() const;
CService(const struct in6_addr& ipv6Addr, uint16_t port);
explicit CService(const struct sockaddr_in6& addr);
diff --git a/src/netbase.cpp b/src/netbase.cpp
index fac4b3b5d5..797f1e17f2 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -488,7 +488,7 @@ std::unique_ptr<Sock> CreateSockTCP(const CService& address_family)
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!address_family.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
- LogPrintf("Cannot create socket for %s: unsupported network\n", address_family.ToString());
+ LogPrintf("Cannot create socket for %s: unsupported network\n", address_family.ToStringAddrPort());
return nullptr;
}
@@ -549,11 +549,11 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (sock.Get() == INVALID_SOCKET) {
- LogPrintf("Cannot connect to %s: invalid socket\n", addrConnect.ToString());
+ LogPrintf("Cannot connect to %s: invalid socket\n", addrConnect.ToStringAddrPort());
return false;
}
if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
- LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToString());
+ LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToStringAddrPort());
return false;
}
@@ -570,11 +570,11 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
Sock::Event occurred;
if (!sock.Wait(std::chrono::milliseconds{nTimeout}, requested, &occurred)) {
LogPrintf("wait for connect to %s failed: %s\n",
- addrConnect.ToString(),
+ addrConnect.ToStringAddrPort(),
NetworkErrorString(WSAGetLastError()));
return false;
} else if (occurred == 0) {
- LogPrint(BCLog::NET, "connection attempt to %s timed out\n", addrConnect.ToString());
+ LogPrint(BCLog::NET, "connection attempt to %s timed out\n", addrConnect.ToStringAddrPort());
return false;
}
@@ -586,13 +586,13 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
socklen_t sockerr_len = sizeof(sockerr);
if (sock.GetSockOpt(SOL_SOCKET, SO_ERROR, (sockopt_arg_type)&sockerr, &sockerr_len) ==
SOCKET_ERROR) {
- LogPrintf("getsockopt() for %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
+ LogPrintf("getsockopt() for %s failed: %s\n", addrConnect.ToStringAddrPort(), NetworkErrorString(WSAGetLastError()));
return false;
}
if (sockerr != 0) {
LogConnectFailure(manual_connection,
"connect() to %s failed after wait: %s",
- addrConnect.ToString(),
+ addrConnect.ToStringAddrPort(),
NetworkErrorString(sockerr));
return false;
}
@@ -603,7 +603,7 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT
else
#endif
{
- LogConnectFailure(manual_connection, "connect() to %s failed: %s", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
+ LogConnectFailure(manual_connection, "connect() to %s failed: %s", addrConnect.ToStringAddrPort(), NetworkErrorString(WSAGetLastError()));
return false;
}
}
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index b8a57acf80..a81099a26c 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -352,7 +352,7 @@ bool BlockManager::LoadBlockIndexDB(const Consensus::Params& consensus_params)
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
FlatFilePos pos(*it, 0);
- if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
+ if (AutoFile{OpenBlockFile(pos, true)}.IsNull()) {
return false;
}
}
@@ -454,13 +454,13 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
- CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
+ AutoFile fileout{OpenUndoFile(pos)};
if (fileout.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Write index header
- unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
+ unsigned int nSize = GetSerializeSize(blockundo, CLIENT_VERSION);
fileout << messageStart << nSize;
// Write undo data
@@ -489,14 +489,14 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
}
// Open history file to read
- CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
+ AutoFile filein{OpenUndoFile(pos, true)};
if (filein.IsNull()) {
return error("%s: OpenUndoFile failed", __func__);
}
// Read block
uint256 hashChecksum;
- CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
+ HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
try {
verifier << pindex->pprev->GetBlockHash();
verifier >> blockundo;
@@ -768,7 +768,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
{
FlatFilePos hpos = pos;
hpos.nPos -= 8; // Seek back 8 bytes for meta header
- CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
+ AutoFile filein{OpenBlockFile(hpos, true)};
if (filein.IsNull()) {
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
}
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index cdf667c754..b6007897df 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -48,10 +48,7 @@ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = CMessageHeader::MESSAG
extern std::atomic_bool fImporting;
extern std::atomic_bool fReindex;
-/** Pruning-related variables and constants */
-/** True if we're running in -prune mode. */
extern bool fPruneMode;
-/** Number of bytes of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
// Because validation code takes pointers to the map's CBlockIndex objects, if
@@ -176,6 +173,17 @@ public:
/** Store block on disk. If dbp is not nullptr, then it provides the known position of the block within a block file on disk. */
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
+ /** Whether running in -prune mode. */
+ [[nodiscard]] bool IsPruneMode() const { return fPruneMode; }
+
+ /** Attempt to stay below this number of bytes of block files. */
+ [[nodiscard]] uint64_t GetPruneTarget() const { return nPruneTarget; }
+
+ [[nodiscard]] bool LoadingBlocks() const
+ {
+ return fImporting || fReindex;
+ }
+
/** Calculate the amount of disk space the block & undo files currently use */
uint64_t CalculateCurrentUsage();
diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp
index 99dc319ec0..41c0ff2118 100644
--- a/src/node/chainstate.cpp
+++ b/src/node/chainstate.cpp
@@ -44,10 +44,10 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) {
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex());
}
- if (nPruneTarget == std::numeric_limits<uint64_t>::max()) {
+ if (chainman.m_blockman.GetPruneTarget() == std::numeric_limits<uint64_t>::max()) {
LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n");
- } else if (nPruneTarget) {
- LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", nPruneTarget / 1024 / 1024);
+ } else if (chainman.m_blockman.GetPruneTarget()) {
+ LogPrintf("Prune configured to target %u MiB on disk for block and undo files.\n", chainman.m_blockman.GetPruneTarget() / 1024 / 1024);
}
LOCK(cs_main);
@@ -64,7 +64,12 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
pblocktree.reset();
- pblocktree.reset(new CBlockTreeDB(cache_sizes.block_tree_db, options.block_tree_db_in_memory, options.reindex));
+ pblocktree = std::make_unique<CBlockTreeDB>(DBParams{
+ .path = chainman.m_options.datadir / "blocks" / "index",
+ .cache_bytes = static_cast<size_t>(cache_sizes.block_tree_db),
+ .memory_only = options.block_tree_db_in_memory,
+ .wipe_data = options.reindex,
+ .options = chainman.m_options.block_tree_db});
if (options.reindex) {
pblocktree->WriteReindexing(true);
@@ -187,12 +192,23 @@ ChainstateLoadResult VerifyLoadedChainstate(ChainstateManager& chainman, const C
"Only rebuild the block database if you are sure that your computer's date and time are correct")};
}
- if (!CVerifyDB().VerifyDB(
- *chainstate, chainman.GetConsensus(), chainstate->CoinsDB(),
- options.check_level,
- options.check_blocks)) {
+ VerifyDBResult result = CVerifyDB().VerifyDB(
+ *chainstate, chainman.GetConsensus(), chainstate->CoinsDB(),
+ options.check_level,
+ options.check_blocks);
+ switch (result) {
+ case VerifyDBResult::SUCCESS:
+ case VerifyDBResult::INTERRUPTED:
+ case VerifyDBResult::SKIPPED_MISSING_BLOCKS:
+ break;
+ case VerifyDBResult::CORRUPTED_BLOCK_DB:
return {ChainstateLoadStatus::FAILURE, _("Corrupted block database detected")};
- }
+ case VerifyDBResult::SKIPPED_L3_CHECKS:
+ if (options.require_full_verification) {
+ return {ChainstateLoadStatus::FAILURE_INSUFFICIENT_DBCACHE, _("Insufficient dbcache for block verification")};
+ }
+ break;
+ } // no default case, so the compiler can warn about missing cases
}
}
diff --git a/src/node/chainstate.h b/src/node/chainstate.h
index d3c7656bf2..7838a62d0c 100644
--- a/src/node/chainstate.h
+++ b/src/node/chainstate.h
@@ -25,6 +25,7 @@ struct ChainstateLoadOptions {
bool reindex{false};
bool reindex_chainstate{false};
bool prune{false};
+ bool require_full_verification{true};
int64_t check_blocks{DEFAULT_CHECKBLOCKS};
int64_t check_level{DEFAULT_CHECKLEVEL};
std::function<bool()> check_interrupt;
@@ -35,7 +36,13 @@ struct ChainstateLoadOptions {
//! case, and treat other cases as errors. More complex applications may want to
//! try reindexing in the generic failure case, and pass an interrupt callback
//! and exit cleanly in the interrupted case.
-enum class ChainstateLoadStatus { SUCCESS, FAILURE, FAILURE_INCOMPATIBLE_DB, INTERRUPTED };
+enum class ChainstateLoadStatus {
+ SUCCESS,
+ FAILURE,
+ FAILURE_INCOMPATIBLE_DB,
+ FAILURE_INSUFFICIENT_DBCACHE,
+ INTERRUPTED,
+};
//! Chainstate load status code and optional error string.
using ChainstateLoadResult = std::tuple<ChainstateLoadStatus, bilingual_str>;
diff --git a/src/node/chainstatemanager_args.cpp b/src/node/chainstatemanager_args.cpp
index b0d929626b..9801e6e959 100644
--- a/src/node/chainstatemanager_args.cpp
+++ b/src/node/chainstatemanager_args.cpp
@@ -5,6 +5,9 @@
#include <node/chainstatemanager_args.h>
#include <arith_uint256.h>
+#include <kernel/chainstatemanager_opts.h>
+#include <node/coins_view_args.h>
+#include <node/database_args.h>
#include <tinyformat.h>
#include <uint256.h>
#include <util/strencodings.h>
@@ -34,6 +37,10 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, Chains
if (auto value{args.GetIntArg("-maxtipage")}) opts.max_tip_age = std::chrono::seconds{*value};
+ ReadDatabaseArgs(args, opts.block_tree_db);
+ ReadDatabaseArgs(args, opts.coins_db);
+ ReadCoinsViewArgs(args, opts.coins_view);
+
return std::nullopt;
}
} // namespace node
diff --git a/src/node/coins_view_args.cpp b/src/node/coins_view_args.cpp
new file mode 100644
index 0000000000..67c9b8dbac
--- /dev/null
+++ b/src/node/coins_view_args.cpp
@@ -0,0 +1,16 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/coins_view_args.h>
+
+#include <txdb.h>
+#include <util/system.h>
+
+namespace node {
+void ReadCoinsViewArgs(const ArgsManager& args, CoinsViewOptions& options)
+{
+ if (auto value = args.GetIntArg("-dbbatchsize")) options.batch_write_bytes = *value;
+ if (auto value = args.GetIntArg("-dbcrashratio")) options.simulate_crash_ratio = *value;
+}
+} // namespace node
diff --git a/src/node/coins_view_args.h b/src/node/coins_view_args.h
new file mode 100644
index 0000000000..71a7a671fd
--- /dev/null
+++ b/src/node/coins_view_args.h
@@ -0,0 +1,15 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_COINS_VIEW_ARGS_H
+#define BITCOIN_NODE_COINS_VIEW_ARGS_H
+
+class ArgsManager;
+struct CoinsViewOptions;
+
+namespace node {
+void ReadCoinsViewArgs(const ArgsManager& args, CoinsViewOptions& options);
+} // namespace node
+
+#endif // BITCOIN_NODE_COINS_VIEW_ARGS_H
diff --git a/src/node/database_args.cpp b/src/node/database_args.cpp
new file mode 100644
index 0000000000..2c53b4b47e
--- /dev/null
+++ b/src/node/database_args.cpp
@@ -0,0 +1,18 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/database_args.h>
+
+#include <dbwrapper.h>
+#include <util/system.h>
+
+namespace node {
+void ReadDatabaseArgs(const ArgsManager& args, DBOptions& options)
+{
+ // Settings here apply to all databases (chainstate, blocks, and index
+ // databases), but it'd be easy to parse database-specific options by adding
+ // a database_type string or enum parameter to this function.
+ if (auto value = args.GetBoolArg("-forcecompactdb")) options.force_compact = *value;
+}
+} // namespace node
diff --git a/src/node/database_args.h b/src/node/database_args.h
new file mode 100644
index 0000000000..001976f219
--- /dev/null
+++ b/src/node/database_args.h
@@ -0,0 +1,15 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_DATABASE_ARGS_H
+#define BITCOIN_NODE_DATABASE_ARGS_H
+
+class ArgsManager;
+struct DBOptions;
+
+namespace node {
+void ReadDatabaseArgs(const ArgsManager& args, DBOptions& options);
+} // namespace node
+
+#endif // BITCOIN_NODE_DATABASE_ARGS_H
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 4f3dc99bbf..9c2db5ff0c 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -295,8 +295,7 @@ public:
bool isInitialBlockDownload() override {
return chainman().ActiveChainstate().IsInitialBlockDownload();
}
- bool getReindex() override { return node::fReindex; }
- bool getImporting() override { return node::fImporting; }
+ bool isLoadingBlocks() override { return node::fReindex || node::fImporting; }
void setNetworkActive(bool active) override
{
if (m_context->connman) {
@@ -711,8 +710,9 @@ public:
LOCK(::cs_main);
return chainman().m_blockman.m_have_pruned;
}
- bool isReadyToBroadcast() override { return !node::fImporting && !node::fReindex && !isInitialBlockDownload(); }
- bool isInitialBlockDownload() override {
+ bool isReadyToBroadcast() override { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); }
+ bool isInitialBlockDownload() override
+ {
return chainman().ActiveChainstate().IsInitialBlockDownload();
}
bool shutdownRequested() override { return ShutdownRequested(); }
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index c2b6fd1dc3..c7bc9a9a3d 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -56,34 +56,27 @@ void RegenerateCommitments(CBlock& block, ChainstateManager& chainman)
block.hashMerkleRoot = BlockMerkleRoot(block);
}
-BlockAssembler::Options::Options()
+static BlockAssembler::Options ClampOptions(BlockAssembler::Options options)
{
- blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
- nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
- test_block_validity = true;
+ // Limit weight to between 4K and DEFAULT_BLOCK_MAX_WEIGHT for sanity:
+ options.nBlockMaxWeight = std::clamp<size_t>(options.nBlockMaxWeight, 4000, DEFAULT_BLOCK_MAX_WEIGHT);
+ return options;
}
BlockAssembler::BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool, const Options& options)
- : test_block_validity{options.test_block_validity},
- chainparams{chainstate.m_chainman.GetParams()},
- m_mempool(mempool),
- m_chainstate(chainstate)
+ : chainparams{chainstate.m_chainman.GetParams()},
+ m_mempool{mempool},
+ m_chainstate{chainstate},
+ m_options{ClampOptions(options)}
{
- blockMinFeeRate = options.blockMinFeeRate;
- // Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity:
- nBlockMaxWeight = std::max<size_t>(4000, std::min<size_t>(MAX_BLOCK_WEIGHT - 4000, options.nBlockMaxWeight));
}
-void ApplyArgsManOptions(const ArgsManager& gArgs, BlockAssembler::Options& options)
+void ApplyArgsManOptions(const ArgsManager& args, BlockAssembler::Options& options)
{
// Block resource limits
- // If -blockmaxweight is not given, limit to DEFAULT_BLOCK_MAX_WEIGHT
- options.nBlockMaxWeight = gArgs.GetIntArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT);
- if (gArgs.IsArgSet("-blockmintxfee")) {
- std::optional<CAmount> parsed = ParseMoney(gArgs.GetArg("-blockmintxfee", ""));
- options.blockMinFeeRate = CFeeRate{parsed.value_or(DEFAULT_BLOCK_MIN_TX_FEE)};
- } else {
- options.blockMinFeeRate = CFeeRate{DEFAULT_BLOCK_MIN_TX_FEE};
+ options.nBlockMaxWeight = args.GetIntArg("-blockmaxweight", options.nBlockMaxWeight);
+ if (const auto blockmintxfee{args.GetArg("-blockmintxfee")}) {
+ if (const auto parsed{ParseMoney(*blockmintxfee)}) options.blockMinFeeRate = CFeeRate{*parsed};
}
}
static BlockAssembler::Options ConfiguredOptions()
@@ -176,7 +169,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]);
BlockValidationState state;
- if (test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev,
+ if (m_options.test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev,
GetAdjustedTime, /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
@@ -205,7 +198,7 @@ void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const
{
// TODO: switch to weight-based accounting for packages instead of vsize-based accounting.
- if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= nBlockMaxWeight) {
+ if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= m_options.nBlockMaxWeight) {
return false;
}
if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST) {
@@ -377,7 +370,7 @@ void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSele
packageSigOpsCost = modit->nSigOpCostWithAncestors;
}
- if (packageFees < blockMinFeeRate.GetFee(packageSize)) {
+ if (packageFees < m_options.blockMinFeeRate.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
return;
}
@@ -394,7 +387,7 @@ void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSele
++nConsecutiveFailed;
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
- nBlockMaxWeight - 4000) {
+ m_options.nBlockMaxWeight - 4000) {
// Give up if we're close to full and haven't succeeded in a while
break;
}
diff --git a/src/node/miner.h b/src/node/miner.h
index ea9e470a64..f1ccffff55 100644
--- a/src/node/miner.h
+++ b/src/node/miner.h
@@ -6,6 +6,7 @@
#ifndef BITCOIN_NODE_MINER_H
#define BITCOIN_NODE_MINER_H
+#include <policy/policy.h>
#include <primitives/block.h>
#include <txmempool.h>
@@ -132,13 +133,6 @@ private:
// The constructed block template
std::unique_ptr<CBlockTemplate> pblocktemplate;
- // Configuration parameters for the block size
- unsigned int nBlockMaxWeight;
- CFeeRate blockMinFeeRate;
-
- // Whether to call TestBlockValidity() at the end of CreateNewBlock().
- const bool test_block_validity;
-
// Information on the current status of the block
uint64_t nBlockWeight;
uint64_t nBlockTx;
@@ -156,10 +150,11 @@ private:
public:
struct Options {
- Options();
- size_t nBlockMaxWeight;
- CFeeRate blockMinFeeRate;
- bool test_block_validity;
+ // Configuration parameters for the block size
+ size_t nBlockMaxWeight{DEFAULT_BLOCK_MAX_WEIGHT};
+ CFeeRate blockMinFeeRate{DEFAULT_BLOCK_MIN_TX_FEE};
+ // Whether to call TestBlockValidity() at the end of CreateNewBlock().
+ bool test_block_validity{true};
};
explicit BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool);
@@ -172,6 +167,8 @@ public:
inline static std::optional<int64_t> m_last_block_weight{};
private:
+ const Options m_options;
+
// utility functions
/** Clear the block's state and prepare for assembling a new block */
void resetBlock();
diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp
index bab1b75211..cccf95e552 100644
--- a/src/node/utxo_snapshot.cpp
+++ b/src/node/utxo_snapshot.cpp
@@ -7,12 +7,17 @@
#include <fs.h>
#include <logging.h>
#include <streams.h>
+#include <sync.h>
+#include <tinyformat.h>
+#include <txdb.h>
#include <uint256.h>
#include <util/system.h>
#include <validation.h>
+#include <cassert>
#include <cstdio>
#include <optional>
+#include <string>
namespace node {
diff --git a/src/node/utxo_snapshot.h b/src/node/utxo_snapshot.h
index b5ed9ef9fe..c5c018c9e8 100644
--- a/src/node/utxo_snapshot.h
+++ b/src/node/utxo_snapshot.h
@@ -7,13 +7,16 @@
#define BITCOIN_NODE_UTXO_SNAPSHOT_H
#include <fs.h>
-#include <uint256.h>
+#include <kernel/cs_main.h>
#include <serialize.h>
-#include <validation.h>
+#include <sync.h>
+#include <uint256.h>
+#include <cstdint>
#include <optional>
+#include <string_view>
-extern RecursiveMutex cs_main;
+class Chainstate;
namespace node {
//! Metadata describing a serialized version of a UTXO set from which an
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index e4eb932e5c..d244de1bb2 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -528,7 +528,7 @@ bool CBlockPolicyEstimator::_removeTx(const uint256& hash, bool inBlock)
}
CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath)
- : m_estimation_filepath{estimation_filepath}, nBestSeenHeight{0}, firstRecordedHeight{0}, historicalFirst{0}, historicalBest{0}, trackedTxs{0}, untrackedTxs{0}
+ : m_estimation_filepath{estimation_filepath}
{
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
size_t bucketIndex = 0;
diff --git a/src/policy/fees.h b/src/policy/fees.h
index dd4f031180..1c24b8c7c3 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -242,16 +242,16 @@ public:
private:
mutable Mutex m_cs_fee_estimator;
- unsigned int nBestSeenHeight GUARDED_BY(m_cs_fee_estimator);
- unsigned int firstRecordedHeight GUARDED_BY(m_cs_fee_estimator);
- unsigned int historicalFirst GUARDED_BY(m_cs_fee_estimator);
- unsigned int historicalBest GUARDED_BY(m_cs_fee_estimator);
+ unsigned int nBestSeenHeight GUARDED_BY(m_cs_fee_estimator){0};
+ unsigned int firstRecordedHeight GUARDED_BY(m_cs_fee_estimator){0};
+ unsigned int historicalFirst GUARDED_BY(m_cs_fee_estimator){0};
+ unsigned int historicalBest GUARDED_BY(m_cs_fee_estimator){0};
struct TxStatsInfo
{
- unsigned int blockHeight;
- unsigned int bucketIndex;
- TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
+ unsigned int blockHeight{0};
+ unsigned int bucketIndex{0};
+ TxStatsInfo() {}
};
// map of txids to information about that transaction
@@ -262,8 +262,8 @@ private:
std::unique_ptr<TxConfirmStats> shortStats PT_GUARDED_BY(m_cs_fee_estimator);
std::unique_ptr<TxConfirmStats> longStats PT_GUARDED_BY(m_cs_fee_estimator);
- unsigned int trackedTxs GUARDED_BY(m_cs_fee_estimator);
- unsigned int untrackedTxs GUARDED_BY(m_cs_fee_estimator);
+ unsigned int trackedTxs GUARDED_BY(m_cs_fee_estimator){0};
+ unsigned int untrackedTxs GUARDED_BY(m_cs_fee_estimator){0};
std::vector<double> buckets GUARDED_BY(m_cs_fee_estimator); // The upper-bound of the range for the bucket (inclusive)
std::map<double, unsigned int> bucketMap GUARDED_BY(m_cs_fee_estimator); // Map of bucket upper-bound to index into all vectors by bucket
diff --git a/src/psbt.cpp b/src/psbt.cpp
index 50ccd9e2c0..fe45f2318c 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -132,6 +132,18 @@ void PSBTInput::FillSignatureData(SignatureData& sigdata) const
for (const auto& [pubkey, leaf_origin] : m_tap_bip32_paths) {
sigdata.taproot_misc_pubkeys.emplace(pubkey, leaf_origin);
}
+ for (const auto& [hash, preimage] : ripemd160_preimages) {
+ sigdata.ripemd160_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : sha256_preimages) {
+ sigdata.sha256_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : hash160_preimages) {
+ sigdata.hash160_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : hash256_preimages) {
+ sigdata.hash256_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
}
void PSBTInput::FromSignatureData(const SignatureData& sigdata)
diff --git a/src/psbt.h b/src/psbt.h
index 40d69cd454..c497584f36 100644
--- a/src/psbt.h
+++ b/src/psbt.h
@@ -206,7 +206,7 @@ struct PSBTInput
// Taproot fields
std::vector<unsigned char> m_tap_key_sig;
std::map<std::pair<XOnlyPubKey, uint256>, std::vector<unsigned char>> m_tap_script_sigs;
- std::map<std::pair<CScript, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> m_tap_scripts;
+ std::map<std::pair<std::vector<unsigned char>, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> m_tap_scripts;
std::map<XOnlyPubKey, std::pair<std::set<uint256>, KeyOriginInfo>> m_tap_bip32_paths;
XOnlyPubKey m_tap_internal_key;
uint256 m_tap_merkle_root;
@@ -621,7 +621,7 @@ struct PSBTInput
}
uint8_t leaf_ver = script_v.back();
script_v.pop_back();
- const auto leaf_script = std::make_pair(CScript(script_v.begin(), script_v.end()), (int)leaf_ver);
+ const auto leaf_script = std::make_pair(script_v, (int)leaf_ver);
m_tap_scripts[leaf_script].insert(std::vector<unsigned char>(key.begin() + 1, key.end()));
break;
}
@@ -713,7 +713,7 @@ struct PSBTOutput
CScript witness_script;
std::map<CPubKey, KeyOriginInfo> hd_keypaths;
XOnlyPubKey m_tap_internal_key;
- std::vector<std::tuple<uint8_t, uint8_t, CScript>> m_tap_tree;
+ std::vector<std::tuple<uint8_t, uint8_t, std::vector<unsigned char>>> m_tap_tree;
std::map<XOnlyPubKey, std::pair<std::set<uint256>, KeyOriginInfo>> m_tap_bip32_paths;
std::map<std::vector<unsigned char>, std::vector<unsigned char>> unknown;
std::set<PSBTProprietary> m_proprietary;
@@ -864,7 +864,7 @@ struct PSBTOutput
while (!s_tree.empty()) {
uint8_t depth;
uint8_t leaf_ver;
- CScript script;
+ std::vector<unsigned char> script;
s_tree >> depth;
s_tree >> leaf_ver;
s_tree >> script;
@@ -889,7 +889,7 @@ struct PSBTOutput
} else if (key.size() != 33) {
throw std::ios_base::failure("Output Taproot BIP32 keypath key is not at 33 bytes");
}
- XOnlyPubKey xonly(uint256({key.begin() + 1, key.begin() + 33}));
+ XOnlyPubKey xonly(uint256(Span<uint8_t>(key).last(32)));
std::set<uint256> leaf_hashes;
uint64_t value_len = ReadCompactSize(s);
size_t before_hashes = s.size();
@@ -1164,7 +1164,7 @@ struct PartiallySignedTransaction
// Make sure that we got an unsigned tx
if (!tx) {
- throw std::ios_base::failure("No unsigned transcation was provided");
+ throw std::ios_base::failure("No unsigned transaction was provided");
}
// Read input data
diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp
index d15aba5cdd..0a96be038b 100644
--- a/src/qt/askpassphrasedialog.cpp
+++ b/src/qt/askpassphrasedialog.cpp
@@ -89,11 +89,10 @@ void AskPassphraseDialog::accept()
oldpass.reserve(MAX_PASSPHRASE_SIZE);
newpass1.reserve(MAX_PASSPHRASE_SIZE);
newpass2.reserve(MAX_PASSPHRASE_SIZE);
- // TODO: get rid of this .c_str() by implementing SecureString::operator=(std::string)
- // Alternately, find a way to make this input mlock()'d to begin with.
- oldpass.assign(ui->passEdit1->text().toStdString().c_str());
- newpass1.assign(ui->passEdit2->text().toStdString().c_str());
- newpass2.assign(ui->passEdit3->text().toStdString().c_str());
+
+ oldpass.assign(std::string_view{ui->passEdit1->text().toStdString()});
+ newpass1.assign(std::string_view{ui->passEdit2->text().toStdString()});
+ newpass2.assign(std::string_view{ui->passEdit3->text().toStdString()});
secureClearPassFields();
@@ -154,8 +153,19 @@ void AskPassphraseDialog::accept()
case Unlock:
try {
if (!model->setWalletLocked(false, oldpass)) {
- QMessageBox::critical(this, tr("Wallet unlock failed"),
- tr("The passphrase entered for the wallet decryption was incorrect."));
+ // Check if the passphrase has a null character (see #27067 for details)
+ if (oldpass.find('\0') == std::string::npos) {
+ QMessageBox::critical(this, tr("Wallet unlock failed"),
+ tr("The passphrase entered for the wallet decryption was incorrect."));
+ } else {
+ QMessageBox::critical(this, tr("Wallet unlock failed"),
+ tr("The passphrase entered for the wallet decryption is incorrect. "
+ "It contains a null character (ie - a zero byte). "
+ "If the passphrase was set with a version of this software prior to 25.0, "
+ "please try again with only the characters up to — but not including — "
+ "the first null character. If this is successful, please set a new "
+ "passphrase to avoid this issue in the future."));
+ }
} else {
QDialog::accept(); // Success
}
@@ -174,8 +184,18 @@ void AskPassphraseDialog::accept()
}
else
{
- QMessageBox::critical(this, tr("Wallet encryption failed"),
- tr("The passphrase entered for the wallet decryption was incorrect."));
+ // Check if the old passphrase had a null character (see #27067 for details)
+ if (oldpass.find('\0') == std::string::npos) {
+ QMessageBox::critical(this, tr("Passphrase change failed"),
+ tr("The passphrase entered for the wallet decryption was incorrect."));
+ } else {
+ QMessageBox::critical(this, tr("Passphrase change failed"),
+ tr("The old passphrase entered for the wallet decryption is incorrect. "
+ "It contains a null character (ie - a zero byte). "
+ "If the passphrase was set with a version of this software prior to 25.0, "
+ "please try again with only the characters up to — but not including — "
+ "the first null character."));
+ }
}
}
else
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index a7ffd367d7..c025d4e3aa 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -647,6 +647,8 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel, interfaces::BlockAndH
// initialize the disable state of the tray icon with the current value in the model.
trayIcon->setVisible(optionsModel->getShowTrayIcon());
}
+
+ m_mask_values_action->setChecked(_clientModel->getOptionsModel()->getOption(OptionsModel::OptionID::MaskValues).toBool());
} else {
if(trayIconMenu)
{
@@ -1071,7 +1073,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer
statusBar()->clearMessage();
// Acquire current block source
- enum BlockSource blockSource = clientModel->getBlockSource();
+ BlockSource blockSource{clientModel->getBlockSource()};
switch (blockSource) {
case BlockSource::NETWORK:
if (synctype == SyncType::HEADER_PRESYNC) {
@@ -1091,9 +1093,6 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer
progressBarLabel->setText(tr("Processing blocks on disk…"));
}
break;
- case BlockSource::REINDEX:
- progressBarLabel->setText(tr("Reindexing blocks on disk…"));
- break;
case BlockSource::NONE:
if (synctype != SyncType::BLOCK_SYNC) {
return;
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index c0d1a0e226..8411ec4696 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -146,15 +146,10 @@ uint256 ClientModel::getBestBlockHash()
return m_cached_tip_blocks;
}
-enum BlockSource ClientModel::getBlockSource() const
+BlockSource ClientModel::getBlockSource() const
{
- if (m_node.getReindex())
- return BlockSource::REINDEX;
- else if (m_node.getImporting())
- return BlockSource::DISK;
- else if (getNumConnections() > 0)
- return BlockSource::NETWORK;
-
+ if (m_node.isLoadingBlocks()) return BlockSource::DISK;
+ if (getNumConnections() > 0) return BlockSource::NETWORK;
return BlockSource::NONE;
}
@@ -285,7 +280,7 @@ bool ClientModel::getProxyInfo(std::string& ip_port) const
{
Proxy ipv4, ipv6;
if (m_node.getProxy((Network) 1, ipv4) && m_node.getProxy((Network) 2, ipv6)) {
- ip_port = ipv4.proxy.ToStringIPPort();
+ ip_port = ipv4.proxy.ToStringAddrPort();
return true;
}
return false;
diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h
index 9ff64fe772..493e18a07d 100644
--- a/src/qt/clientmodel.h
+++ b/src/qt/clientmodel.h
@@ -32,9 +32,8 @@ QT_END_NAMESPACE
enum class BlockSource {
NONE,
- REINDEX,
DISK,
- NETWORK
+ NETWORK,
};
enum class SyncType {
@@ -72,8 +71,8 @@ public:
int getHeaderTipHeight() const;
int64_t getHeaderTipTime() const;
- //! Returns enum BlockSource of the current importing/syncing state
- enum BlockSource getBlockSource() const;
+ //! Returns the block source of the current importing/syncing state
+ BlockSource getBlockSource() const;
//! Return warnings to be displayed in status bar
QString getStatusBarWarnings() const;
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 53b0c3832b..6dec4b2e42 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -406,24 +406,21 @@ void OptionsDialog::updateProxyValidationState()
void OptionsDialog::updateDefaultProxyNets()
{
+ CNetAddr ui_proxy_netaddr;
+ LookupHost(ui->proxyIp->text().toStdString(), ui_proxy_netaddr, /*fAllowLookup=*/false);
+ const CService ui_proxy{ui_proxy_netaddr, ui->proxyPort->text().toUShort()};
+
Proxy proxy;
- std::string strProxy;
- QString strDefaultProxyGUI;
-
- model->node().getProxy(NET_IPV4, proxy);
- strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
- strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
- (strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachIPv4->setChecked(true) : ui->proxyReachIPv4->setChecked(false);
-
- model->node().getProxy(NET_IPV6, proxy);
- strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
- strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
- (strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachIPv6->setChecked(true) : ui->proxyReachIPv6->setChecked(false);
-
- model->node().getProxy(NET_ONION, proxy);
- strProxy = proxy.proxy.ToStringIP() + ":" + proxy.proxy.ToStringPort();
- strDefaultProxyGUI = ui->proxyIp->text() + ":" + ui->proxyPort->text();
- (strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachTor->setChecked(true) : ui->proxyReachTor->setChecked(false);
+ bool has_proxy;
+
+ has_proxy = model->node().getProxy(NET_IPV4, proxy);
+ ui->proxyReachIPv4->setChecked(has_proxy && proxy.proxy == ui_proxy);
+
+ has_proxy = model->node().getProxy(NET_IPV6, proxy);
+ ui->proxyReachIPv6->setChecked(has_proxy && proxy.proxy == ui_proxy);
+
+ has_proxy = model->node().getProxy(NET_ONION, proxy);
+ ui->proxyReachTor->setChecked(has_proxy && proxy.proxy == ui_proxy);
}
ProxyAddressValidator::ProxyAddressValidator(QObject *parent) :
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index cd0a1a19ee..bee8fafddc 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -18,6 +18,7 @@
#include <netbase.h>
#include <txdb.h> // for -dbcache defaults
#include <util/string.h>
+#include <util/system.h>
#include <validation.h> // For DEFAULT_SCRIPTCHECK_THREADS
#include <wallet/wallet.h> // For DEFAULT_SPEND_ZEROCONF_CHANGE
@@ -31,7 +32,7 @@
const char *DEFAULT_GUI_PROXY_HOST = "127.0.0.1";
-static const QString GetDefaultProxyAddress();
+static QString GetDefaultProxyAddress();
/** Map GUI option ID to node setting name. */
static const char* SettingName(OptionsModel::OptionID option)
@@ -59,7 +60,7 @@ static const char* SettingName(OptionsModel::OptionID option)
}
/** Call node.updateRwSetting() with Bitcoin 22.x workaround. */
-static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID option, const util::SettingsValue& value)
+static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID option, const std::string& suffix, const util::SettingsValue& value)
{
if (value.isNum() &&
(option == OptionsModel::DatabaseCache ||
@@ -73,9 +74,9 @@ static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID optio
// in later releases by https://github.com/bitcoin/bitcoin/pull/24498.
// If new numeric settings are added, they can be written as numbers
// instead of strings, because bitcoin 22.x will not try to read these.
- node.updateRwSetting(SettingName(option), value.getValStr());
+ node.updateRwSetting(SettingName(option) + suffix, value.getValStr());
} else {
- node.updateRwSetting(SettingName(option), value);
+ node.updateRwSetting(SettingName(option) + suffix, value);
}
}
@@ -131,13 +132,6 @@ void OptionsModel::addOverriddenOption(const std::string &option)
bool OptionsModel::Init(bilingual_str& error)
{
// Initialize display settings from stored settings.
- m_prune_size_gb = PruneSizeGB(node().getPersistentSetting("prune"));
- ProxySetting proxy = ParseProxyString(SettingToString(node().getPersistentSetting("proxy"), GetDefaultProxyAddress().toStdString()));
- m_proxy_ip = proxy.ip;
- m_proxy_port = proxy.port;
- ProxySetting onion = ParseProxyString(SettingToString(node().getPersistentSetting("onion"), GetDefaultProxyAddress().toStdString()));
- m_onion_ip = onion.ip;
- m_onion_port = onion.port;
language = QString::fromStdString(SettingToString(node().getPersistentSetting("lang"), ""));
checkAndMigrate();
@@ -227,6 +221,8 @@ bool OptionsModel::Init(bilingual_str& error)
m_use_embedded_monospaced_font = settings.value("UseEmbeddedMonospacedFont").toBool();
Q_EMIT useEmbeddedMonospacedFontChanged(m_use_embedded_monospaced_font);
+ m_mask_values = settings.value("mask_values", false).toBool();
+
return true;
}
@@ -308,7 +304,7 @@ static std::string ProxyString(bool is_set, QString ip, QString port)
return is_set ? QString(ip + ":" + port).toStdString() : "";
}
-static const QString GetDefaultProxyAddress()
+static QString GetDefaultProxyAddress()
{
return QString("%1:%2").arg(DEFAULT_GUI_PROXY_HOST).arg(DEFAULT_GUI_PROXY_PORT);
}
@@ -318,8 +314,6 @@ void OptionsModel::SetPruneTargetGB(int prune_target_gb)
const util::SettingsValue cur_value = node().getPersistentSetting("prune");
const util::SettingsValue new_value = PruneSetting(prune_target_gb > 0, prune_target_gb);
- m_prune_size_gb = prune_target_gb;
-
// Force setting to take effect. It is still safe to change the value at
// this point because this function is only called after the intro screen is
// shown, before the node starts.
@@ -332,7 +326,12 @@ void OptionsModel::SetPruneTargetGB(int prune_target_gb)
PruneSizeGB(cur_value) != PruneSizeGB(new_value)) {
// Call UpdateRwSetting() instead of setOption() to avoid setting
// RestartRequired flag
- UpdateRwSetting(node(), Prune, new_value);
+ UpdateRwSetting(node(), Prune, "", new_value);
+ }
+
+ // Keep previous pruning size, if pruning was disabled.
+ if (PruneEnabled(cur_value)) {
+ UpdateRwSetting(node(), Prune, "-prev", PruneEnabled(new_value) ? util::SettingsValue{} : cur_value);
}
}
@@ -360,9 +359,9 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
return successful;
}
-QVariant OptionsModel::getOption(OptionID option) const
+QVariant OptionsModel::getOption(OptionID option, const std::string& suffix) const
{
- auto setting = [&]{ return node().getPersistentSetting(SettingName(option)); };
+ auto setting = [&]{ return node().getPersistentSetting(SettingName(option) + suffix); };
QSettings settings;
switch (option) {
@@ -389,19 +388,30 @@ QVariant OptionsModel::getOption(OptionID option) const
// default proxy
case ProxyUse:
+ case ProxyUseTor:
return ParseProxyString(SettingToString(setting(), "")).is_set;
case ProxyIP:
- return m_proxy_ip;
+ case ProxyIPTor: {
+ ProxySetting proxy = ParseProxyString(SettingToString(setting(), ""));
+ if (proxy.is_set) {
+ return proxy.ip;
+ } else if (suffix.empty()) {
+ return getOption(option, "-prev");
+ } else {
+ return ParseProxyString(GetDefaultProxyAddress().toStdString()).ip;
+ }
+ }
case ProxyPort:
- return m_proxy_port;
-
- // separate Tor proxy
- case ProxyUseTor:
- return ParseProxyString(SettingToString(setting(), "")).is_set;
- case ProxyIPTor:
- return m_onion_ip;
- case ProxyPortTor:
- return m_onion_port;
+ case ProxyPortTor: {
+ ProxySetting proxy = ParseProxyString(SettingToString(setting(), ""));
+ if (proxy.is_set) {
+ return proxy.port;
+ } else if (suffix.empty()) {
+ return getOption(option, "-prev");
+ } else {
+ return ParseProxyString(GetDefaultProxyAddress().toStdString()).port;
+ }
+ }
#ifdef ENABLE_WALLET
case SpendZeroConfChange:
@@ -426,7 +436,9 @@ QVariant OptionsModel::getOption(OptionID option) const
case Prune:
return PruneEnabled(setting());
case PruneSize:
- return m_prune_size_gb;
+ return PruneEnabled(setting()) ? PruneSizeGB(setting()) :
+ suffix.empty() ? getOption(option, "-prev") :
+ DEFAULT_PRUNE_TARGET_GB;
case DatabaseCache:
return qlonglong(SettingToInt(setting(), nDefaultDbCache));
case ThreadsScriptVerif:
@@ -435,15 +447,17 @@ QVariant OptionsModel::getOption(OptionID option) const
return SettingToBool(setting(), DEFAULT_LISTEN);
case Server:
return SettingToBool(setting(), false);
+ case MaskValues:
+ return m_mask_values;
default:
return QVariant();
}
}
-bool OptionsModel::setOption(OptionID option, const QVariant& value)
+bool OptionsModel::setOption(OptionID option, const QVariant& value, const std::string& suffix)
{
- auto changed = [&] { return value.isValid() && value != getOption(option); };
- auto update = [&](const util::SettingsValue& value) { return UpdateRwSetting(node(), option, value); };
+ auto changed = [&] { return value.isValid() && value != getOption(option, suffix); };
+ auto update = [&](const util::SettingsValue& value) { return UpdateRwSetting(node(), option, suffix, value); };
bool successful = true; /* set to false on parse error */
QSettings settings;
@@ -481,52 +495,60 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
// default proxy
case ProxyUse:
if (changed()) {
- update(ProxyString(value.toBool(), m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(ProxyString(value.toBool(), getOption(ProxyIP).toString(), getOption(ProxyPort).toString()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case ProxyIP:
if (changed()) {
- m_proxy_ip = value.toString();
- if (getOption(ProxyUse).toBool()) {
- update(ProxyString(true, m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUse).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, value.toString(), getOption(ProxyPort).toString()));
}
+ if (suffix.empty() && getOption(ProxyUse).toBool()) setRestartRequired(true);
}
break;
case ProxyPort:
if (changed()) {
- m_proxy_port = value.toString();
- if (getOption(ProxyUse).toBool()) {
- update(ProxyString(true, m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUse).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, getOption(ProxyIP).toString(), value.toString()));
}
+ if (suffix.empty() && getOption(ProxyUse).toBool()) setRestartRequired(true);
}
break;
// separate Tor proxy
case ProxyUseTor:
if (changed()) {
- update(ProxyString(value.toBool(), m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(ProxyString(value.toBool(), getOption(ProxyIPTor).toString(), getOption(ProxyPortTor).toString()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case ProxyIPTor:
if (changed()) {
- m_onion_ip = value.toString();
- if (getOption(ProxyUseTor).toBool()) {
- update(ProxyString(true, m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUseTor).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, value.toString(), getOption(ProxyPortTor).toString()));
}
+ if (suffix.empty() && getOption(ProxyUseTor).toBool()) setRestartRequired(true);
}
break;
case ProxyPortTor:
if (changed()) {
- m_onion_port = value.toString();
- if (getOption(ProxyUseTor).toBool()) {
- update(ProxyString(true, m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUseTor).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, getOption(ProxyIPTor).toString(), value.toString()));
}
+ if (suffix.empty() && getOption(ProxyUseTor).toBool()) setRestartRequired(true);
}
break;
@@ -580,17 +602,20 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
break;
case Prune:
if (changed()) {
- update(PruneSetting(value.toBool(), m_prune_size_gb));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(PruneSetting(value.toBool(), getOption(PruneSize).toInt()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case PruneSize:
if (changed()) {
- m_prune_size_gb = ParsePruneSizeGB(value);
- if (getOption(Prune).toBool()) {
- update(PruneSetting(true, m_prune_size_gb));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(Prune).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(PruneSetting(true, ParsePruneSizeGB(value)));
}
+ if (suffix.empty() && getOption(Prune).toBool()) setRestartRequired(true);
}
break;
case DatabaseCache:
@@ -612,6 +637,10 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
setRestartRequired(true);
}
break;
+ case MaskValues:
+ m_mask_values = value.toBool();
+ settings.setValue("mask_values", m_mask_values);
+ break;
default:
break;
}
@@ -640,6 +669,11 @@ bool OptionsModel::isRestartRequired() const
return settings.value("fRestartRequired", false).toBool();
}
+bool OptionsModel::hasSigner()
+{
+ return gArgs.GetArg("-signer", "") != "";
+}
+
void OptionsModel::checkAndMigrate()
{
// Migration of default values
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index e36fbc5b31..f28a1087ba 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -72,6 +72,7 @@ public:
Listen, // bool
Server, // bool
EnablePSBTControls, // bool
+ MaskValues, // bool
OptionIDRowCount,
};
@@ -81,8 +82,8 @@ public:
int rowCount(const QModelIndex & parent = QModelIndex()) const override;
QVariant data(const QModelIndex & index, int role = Qt::DisplayRole) const override;
bool setData(const QModelIndex & index, const QVariant & value, int role = Qt::EditRole) override;
- QVariant getOption(OptionID option) const;
- bool setOption(OptionID option, const QVariant& value);
+ QVariant getOption(OptionID option, const std::string& suffix="") const;
+ bool setOption(OptionID option, const QVariant& value, const std::string& suffix="");
/** Updates current unit in memory, settings and emits displayUnitChanged(new_unit) signal */
void setDisplayUnit(const QVariant& new_unit);
@@ -98,6 +99,9 @@ public:
bool getEnablePSBTControls() const { return m_enable_psbt_controls; }
const QString& getOverriddenByCommandLine() { return strOverriddenByCommandLine; }
+ /** Whether -signer was set or not */
+ bool hasSigner();
+
/* Explicit setters */
void SetPruneTargetGB(int prune_target_gb);
@@ -120,15 +124,7 @@ private:
bool fCoinControlFeatures;
bool m_sub_fee_from_amount;
bool m_enable_psbt_controls;
-
- //! In-memory settings for display. These are stored persistently by the
- //! bitcoin node but it's also nice to store them in memory to prevent them
- //! getting cleared when enable/disable toggles are used in the GUI.
- int m_prune_size_gb;
- QString m_proxy_ip;
- QString m_proxy_port;
- QString m_onion_ip;
- QString m_onion_port;
+ bool m_mask_values;
/* settings that were overridden by command-line */
QString strOverriddenByCommandLine;
diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp
index c9caec39cf..0f00d167f7 100644
--- a/src/qt/overviewpage.cpp
+++ b/src/qt/overviewpage.cpp
@@ -173,6 +173,7 @@ void OverviewPage::handleTransactionClicked(const QModelIndex &index)
void OverviewPage::setPrivacy(bool privacy)
{
m_privacy = privacy;
+ clientModel->getOptionsModel()->setOption(OptionsModel::OptionID::MaskValues, privacy);
const auto& balances = walletModel->getCachedBalance();
if (balances.balance != -1) {
setBalance(balances);
@@ -262,7 +263,6 @@ void OverviewPage::setWalletModel(WalletModel *model)
// Set up transaction list
filter.reset(new TransactionFilterProxy());
filter->setSourceModel(model->getTransactionTableModel());
- filter->setLimit(NUM_ITEMS);
filter->setDynamicSortFilter(true);
filter->setSortRole(Qt::EditRole);
filter->setShowInactive(false);
@@ -271,6 +271,10 @@ void OverviewPage::setWalletModel(WalletModel *model)
ui->listTransactions->setModel(filter.get());
ui->listTransactions->setModelColumn(TransactionTableModel::ToAddress);
+ connect(filter.get(), &TransactionFilterProxy::rowsInserted, this, &OverviewPage::LimitTransactionRows);
+ connect(filter.get(), &TransactionFilterProxy::rowsRemoved, this, &OverviewPage::LimitTransactionRows);
+ connect(filter.get(), &TransactionFilterProxy::rowsMoved, this, &OverviewPage::LimitTransactionRows);
+ LimitTransactionRows();
// Keep up to date with wallet
setBalance(model->getCachedBalance());
connect(model, &WalletModel::balanceChanged, this, &OverviewPage::setBalance);
@@ -299,6 +303,16 @@ void OverviewPage::changeEvent(QEvent* e)
QWidget::changeEvent(e);
}
+// Only show most recent NUM_ITEMS rows
+void OverviewPage::LimitTransactionRows()
+{
+ if (filter && ui->listTransactions && ui->listTransactions->model() && filter.get() == ui->listTransactions->model()) {
+ for (int i = 0; i < filter->rowCount(); ++i) {
+ ui->listTransactions->setRowHidden(i, i >= NUM_ITEMS);
+ }
+ }
+}
+
void OverviewPage::updateDisplayUnit()
{
if (walletModel && walletModel->getOptionsModel()) {
diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h
index 2ca38b78dd..5c487ee116 100644
--- a/src/qt/overviewpage.h
+++ b/src/qt/overviewpage.h
@@ -60,6 +60,7 @@ private:
std::unique_ptr<TransactionFilterProxy> filter;
private Q_SLOTS:
+ void LimitTransactionRows();
void updateDisplayUnit();
void handleTransactionClicked(const QModelIndex &index);
void updateAlerts(const QString &warnings);
diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp
index 85ade624cf..52d4e45d49 100644
--- a/src/qt/recentrequeststablemodel.cpp
+++ b/src/qt/recentrequeststablemodel.cpp
@@ -175,7 +175,7 @@ void RecentRequestsTableModel::addNewRequest(const SendCoinsRecipient &recipient
newEntry.date = QDateTime::currentDateTime();
newEntry.recipient = recipient;
- CDataStream ss(SER_DISK, CLIENT_VERSION);
+ DataStream ss{};
ss << newEntry;
if (!walletModel->wallet().setAddressReceiveRequest(DecodeDestination(recipient.address.toStdString()), ToString(newEntry.id), ss.str()))
@@ -188,7 +188,7 @@ void RecentRequestsTableModel::addNewRequest(const SendCoinsRecipient &recipient
void RecentRequestsTableModel::addNewRequest(const std::string &recipient)
{
std::vector<uint8_t> data(recipient.begin(), recipient.end());
- CDataStream ss(data, SER_DISK, CLIENT_VERSION);
+ DataStream ss{data};
RecentRequestEntry entry;
ss >> entry;
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 843cd46d13..b46a3c039b 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -780,8 +780,8 @@ void RPCConsole::addWallet(WalletModel * const walletModel)
{
// use name for text and wallet model for internal data object (to allow to move to a wallet id later)
ui->WalletSelector->addItem(walletModel->getDisplayName(), QVariant::fromValue(walletModel));
- if (ui->WalletSelector->count() == 2 && !isVisible()) {
- // First wallet added, set to default so long as the window isn't presently visible (and potentially in use)
+ if (ui->WalletSelector->count() == 2) {
+ // First wallet added, set to default to match wallet RPC behavior
ui->WalletSelector->setCurrentIndex(1);
}
if (ui->WalletSelector->count() > 2) {
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 1604cad503..89dd0ada62 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -203,7 +203,7 @@ void SendCoinsDialog::setModel(WalletModel *_model)
if (model->wallet().hasExternalSigner()) {
//: "device" usually means a hardware wallet.
ui->sendButton->setText(tr("Sign on device"));
- if (gArgs.GetArg("-signer", "") != "") {
+ if (model->getOptionsModel()->hasSigner()) {
ui->sendButton->setEnabled(true);
ui->sendButton->setToolTip(tr("Connect your hardware wallet first."));
} else {
@@ -699,7 +699,7 @@ void SendCoinsDialog::setBalance(const interfaces::WalletBalances& balances)
CAmount balance = balances.balance;
if (model->wallet().hasExternalSigner()) {
ui->labelBalanceName->setText(tr("External balance:"));
- } else if (model->wallet().privateKeysDisabled()) {
+ } else if (model->wallet().isLegacy() && model->wallet().privateKeysDisabled()) {
balance = balances.watch_only_balance;
ui->labelBalanceName->setText(tr("Watch-only balance:"));
}
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 049326070e..d005e08d14 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -75,7 +75,7 @@ void TestAddAddressesToSendBook(interfaces::Node& node)
auto wallet_loader = interfaces::MakeWalletLoader(*test.m_node.chain, *Assert(test.m_node.args));
test.m_node.wallet_loader = wallet_loader.get();
node.setContext(&test.m_node);
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), "", gArgs, CreateMockWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
{
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 59a5934890..62f2019438 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -159,7 +159,7 @@ void TestGUI(interfaces::Node& node)
auto wallet_loader = interfaces::MakeWalletLoader(*test.m_node.chain, *Assert(test.m_node.args));
test.m_node.wallet_loader = wallet_loader.get();
node.setContext(&test.m_node);
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), "", gArgs, CreateMockWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
{
@@ -289,7 +289,7 @@ void TestGUI(interfaces::Node& node)
std::vector<std::string> requests = walletModel.wallet().getAddressReceiveRequests();
QCOMPARE(requests.size(), size_t{1});
RecentRequestEntry entry;
- CDataStream{MakeUCharSpan(requests[0]), SER_DISK, CLIENT_VERSION} >> entry;
+ DataStream{MakeUCharSpan(requests[0])} >> entry;
QCOMPARE(entry.nVersion, int{1});
QCOMPARE(entry.id, int64_t{1});
QVERIFY(entry.date.isValid());
diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp
index 3cc0cc839d..173fd326a3 100644
--- a/src/qt/transactionfilterproxy.cpp
+++ b/src/qt/transactionfilterproxy.cpp
@@ -88,25 +88,8 @@ void TransactionFilterProxy::setWatchOnlyFilter(WatchOnlyFilter filter)
invalidateFilter();
}
-void TransactionFilterProxy::setLimit(int limit)
-{
- this->limitRows = limit;
-}
-
void TransactionFilterProxy::setShowInactive(bool _showInactive)
{
this->showInactive = _showInactive;
invalidateFilter();
}
-
-int TransactionFilterProxy::rowCount(const QModelIndex &parent) const
-{
- if(limitRows != -1)
- {
- return std::min(QSortFilterProxyModel::rowCount(parent), limitRows);
- }
- else
- {
- return QSortFilterProxyModel::rowCount(parent);
- }
-}
diff --git a/src/qt/transactionfilterproxy.h b/src/qt/transactionfilterproxy.h
index 8e5f72d764..73c4f21426 100644
--- a/src/qt/transactionfilterproxy.h
+++ b/src/qt/transactionfilterproxy.h
@@ -42,14 +42,9 @@ public:
void setMinAmount(const CAmount& minimum);
void setWatchOnlyFilter(WatchOnlyFilter filter);
- /** Set maximum number of rows returned, -1 if unlimited. */
- void setLimit(int limit);
-
/** Set whether to show conflicted transactions. */
void setShowInactive(bool showInactive);
- int rowCount(const QModelIndex &parent = QModelIndex()) const override;
-
protected:
bool filterAcceptsRow(int source_row, const QModelIndex & source_parent) const override;
@@ -60,7 +55,6 @@ private:
quint32 typeFilter;
WatchOnlyFilter watchOnlyFilter{WatchOnlyFilter_All};
CAmount minAmount{0};
- int limitRows{-1};
bool showInactive{true};
};
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index 3b32137bd4..25d54bdce6 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -93,10 +93,7 @@ public:
TransactionTableModel *parent;
- /* Local cache of wallet.
- * As it is in the same order as the CWallet, by definition
- * this is sorted by sha256.
- */
+ //! Local cache of wallet sorted by transaction hash
QList<TransactionRecord> cachedWallet;
/** True when model finishes loading all wallet transactions on start */
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index cb8491e27a..3c69d46b7e 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -477,13 +477,6 @@ WalletModel::UnlockContext::~UnlockContext()
}
}
-void WalletModel::UnlockContext::CopyFrom(UnlockContext&& rhs)
-{
- // Transfer context; old object no longer relocks wallet
- *this = rhs;
- rhs.relock = false;
-}
-
bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
{
CCoinControl coin_control;
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 604a9e03c8..17a39349f3 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -111,7 +111,7 @@ public:
bool setWalletLocked(bool locked, const SecureString &passPhrase=SecureString());
bool changePassphrase(const SecureString &oldPass, const SecureString &newPass);
- // RAI object for unlocking wallet, returned by requestUnlock()
+ // RAII object for unlocking wallet, returned by requestUnlock()
class UnlockContext
{
public:
@@ -120,18 +120,16 @@ public:
bool isValid() const { return valid; }
- // Copy constructor is disabled.
+ // Disable unused copy/move constructors/assignments explicitly.
UnlockContext(const UnlockContext&) = delete;
- // Move operator and constructor transfer the context
- UnlockContext(UnlockContext&& obj) { CopyFrom(std::move(obj)); }
- UnlockContext& operator=(UnlockContext&& rhs) { CopyFrom(std::move(rhs)); return *this; }
+ UnlockContext(UnlockContext&&) = delete;
+ UnlockContext& operator=(const UnlockContext&) = delete;
+ UnlockContext& operator=(UnlockContext&&) = delete;
+
private:
WalletModel *wallet;
- bool valid;
- mutable bool relock; // mutable, as it can be set to false by copying
-
- UnlockContext& operator=(const UnlockContext&) = default;
- void CopyFrom(UnlockContext&& rhs);
+ const bool valid;
+ const bool relock;
};
UnlockContext requestUnlock();
diff --git a/src/random.cpp b/src/random.cpp
index 23ea9ba6b7..432592589a 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -8,23 +8,22 @@
#include <compat/cpuid.h>
#include <crypto/sha256.h>
#include <crypto/sha512.h>
-#include <support/cleanse.h>
-#ifdef WIN32
-#include <compat/compat.h>
-#include <wincrypt.h>
-#endif
#include <logging.h>
#include <randomenv.h>
-#include <support/allocators/secure.h>
#include <span.h>
-#include <sync.h> // for Mutex
-#include <util/time.h> // for GetTimeMicros()
+#include <support/allocators/secure.h>
+#include <support/cleanse.h>
+#include <sync.h>
+#include <util/time.h>
#include <cmath>
#include <cstdlib>
#include <thread>
-#ifndef WIN32
+#ifdef WIN32
+#include <windows.h>
+#include <wincrypt.h>
+#else
#include <fcntl.h>
#include <sys/time.h>
#endif
@@ -599,18 +598,15 @@ uint256 GetRandHash() noexcept
void FastRandomContext::RandomSeed()
{
uint256 seed = GetRandHash();
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
requires_seed = false;
}
uint256 FastRandomContext::rand256() noexcept
{
- if (bytebuf_size < 32) {
- FillByteBuffer();
- }
+ if (requires_seed) RandomSeed();
uint256 ret;
- memcpy(ret.begin(), bytebuf + 64 - bytebuf_size, 32);
- bytebuf_size -= 32;
+ rng.Keystream(ret.data(), ret.size());
return ret;
}
@@ -624,9 +620,9 @@ std::vector<unsigned char> FastRandomContext::randbytes(size_t len)
return ret;
}
-FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), bytebuf_size(0), bitbuf_size(0)
+FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), bitbuf_size(0)
{
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
}
bool Random_SanityCheck()
@@ -637,7 +633,7 @@ bool Random_SanityCheck()
* GetOSRand() overwrites all 32 bytes of the output given a maximum
* number of tries.
*/
- static const ssize_t MAX_TRIES = 1024;
+ static constexpr int MAX_TRIES{1024};
uint8_t data[NUM_OS_RANDOM_BYTES];
bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
int num_overwritten;
@@ -675,25 +671,22 @@ bool Random_SanityCheck()
return true;
}
-FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), bytebuf_size(0), bitbuf_size(0)
+FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), bitbuf_size(0)
{
if (!fDeterministic) {
return;
}
uint256 seed;
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
}
FastRandomContext& FastRandomContext::operator=(FastRandomContext&& from) noexcept
{
requires_seed = from.requires_seed;
rng = from.rng;
- std::copy(std::begin(from.bytebuf), std::end(from.bytebuf), std::begin(bytebuf));
- bytebuf_size = from.bytebuf_size;
bitbuf = from.bitbuf;
bitbuf_size = from.bitbuf_size;
from.requires_seed = true;
- from.bytebuf_size = 0;
from.bitbuf_size = 0;
return *this;
}
diff --git a/src/random.h b/src/random.h
index e890e909c7..49c0dff5bf 100644
--- a/src/random.h
+++ b/src/random.h
@@ -15,6 +15,7 @@
#include <chrono>
#include <cstdint>
#include <limits>
+#include <vector>
/**
* Overall design of the RNG and entropy sources.
@@ -145,23 +146,11 @@ private:
bool requires_seed;
ChaCha20 rng;
- unsigned char bytebuf[64];
- int bytebuf_size;
-
uint64_t bitbuf;
int bitbuf_size;
void RandomSeed();
- void FillByteBuffer()
- {
- if (requires_seed) {
- RandomSeed();
- }
- rng.Keystream(bytebuf, sizeof(bytebuf));
- bytebuf_size = sizeof(bytebuf);
- }
-
void FillBitBuffer()
{
bitbuf = rand64();
@@ -185,10 +174,10 @@ public:
/** Generate a random 64-bit integer. */
uint64_t rand64() noexcept
{
- if (bytebuf_size < 8) FillByteBuffer();
- uint64_t ret = ReadLE64(bytebuf + 64 - bytebuf_size);
- bytebuf_size -= 8;
- return ret;
+ if (requires_seed) RandomSeed();
+ unsigned char buf[8];
+ rng.Keystream(buf, 8);
+ return ReadLE64(buf);
}
/** Generate a random (bits)-bit integer. */
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 35d090c71d..3e4d5a587d 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -13,21 +13,21 @@
#include <compat/cpuid.h>
#include <crypto/sha512.h>
#include <support/cleanse.h>
-#include <util/time.h> // for GetTime()
-#ifdef WIN32
-#include <compat/compat.h>
-#endif
+#include <util/time.h>
#include <algorithm>
#include <atomic>
+#include <cstdint>
+#include <cstring>
#include <chrono>
#include <climits>
#include <thread>
#include <vector>
-#include <stdint.h>
-#include <string.h>
-#ifndef WIN32
+#ifdef WIN32
+#include <windows.h>
+#include <winreg.h>
+#else
#include <sys/types.h> // must go before a number of other headers
#include <fcntl.h>
#include <netinet/in.h>
diff --git a/src/rest.cpp b/src/rest.cpp
index add2bb73b0..a874f4eb6d 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -236,7 +236,7 @@ static bool rest_headers(const std::any& context,
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssHeader{};
for (const CBlockIndex *pindex : headers) {
ssHeader << pindex->GetBlockHeader();
}
@@ -248,7 +248,7 @@ static bool rest_headers(const std::any& context,
}
case RESTResponseFormat::HEX: {
- CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssHeader{};
for (const CBlockIndex *pindex : headers) {
ssHeader << pindex->GetBlockHeader();
}
@@ -435,7 +435,7 @@ static bool rest_filter_header(const std::any& context, HTTPRequest* req, const
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssHeader{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssHeader{};
for (const uint256& header : filter_headers) {
ssHeader << header;
}
@@ -446,7 +446,7 @@ static bool rest_filter_header(const std::any& context, HTTPRequest* req, const
return true;
}
case RESTResponseFormat::HEX: {
- CDataStream ssHeader{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssHeader{};
for (const uint256& header : filter_headers) {
ssHeader << header;
}
@@ -534,7 +534,7 @@ static bool rest_block_filter(const std::any& context, HTTPRequest* req, const s
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ssResp{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssResp{};
ssResp << filter;
std::string binaryResp = ssResp.str();
@@ -543,7 +543,7 @@ static bool rest_block_filter(const std::any& context, HTTPRequest* req, const s
return true;
}
case RESTResponseFormat::HEX: {
- CDataStream ssResp{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream ssResp{};
ssResp << filter;
std::string strHex = HexStr(ssResp) + "\n";
@@ -793,7 +793,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
if (fInputParsed) //don't allow sending input over URI and HTTP RAW DATA
return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and raw post data is not allowed");
- CDataStream oss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream oss{};
oss << strRequestMutable;
oss >> fCheckMemPool;
oss >> vOutPoints;
@@ -866,7 +866,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
case RESTResponseFormat::BINARY: {
// serialize data
// use exact same output as mentioned in Bip64
- CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssGetUTXOResponse{};
ssGetUTXOResponse << active_height << active_hash << bitmap << outs;
std::string ssGetUTXOResponseString = ssGetUTXOResponse.str();
@@ -876,7 +876,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
}
case RESTResponseFormat::HEX: {
- CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssGetUTXOResponse{};
ssGetUTXOResponse << active_height << active_hash << bitmap << outs;
std::string strHex = HexStr(ssGetUTXOResponse) + "\n";
@@ -946,7 +946,7 @@ static bool rest_blockhash_by_height(const std::any& context, HTTPRequest* req,
}
switch (rf) {
case RESTResponseFormat::BINARY: {
- CDataStream ss_blockhash(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss_blockhash{};
ss_blockhash << pblockindex->GetBlockHash();
req->WriteHeader("Content-Type", "application/octet-stream");
req->WriteReply(HTTP_OK, ss_blockhash.str());
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 2b39580043..28a619fe54 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -460,7 +460,7 @@ static RPCHelpMan getblockfrompeer()
// Fetching blocks before the node has syncing past their height can prevent block files from
// being pruned, so we avoid it if the node is in prune mode.
- if (node::fPruneMode && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) {
+ if (chainman.m_blockman.IsPruneMode() && index->nHeight > WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()->nHeight)) {
throw JSONRPCError(RPC_MISC_ERROR, "In prune mode, only blocks that the node has already synced previously can be fetched from a peer");
}
@@ -565,7 +565,7 @@ static RPCHelpMan getblockheader()
if (!fVerbose)
{
- CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssBlock{};
ssBlock << pblockindex->GetBlockHeader();
std::string strHex = HexStr(ssBlock);
return strHex;
@@ -775,10 +775,11 @@ static RPCHelpMan pruneblockchain()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- if (!node::fPruneMode)
+ ChainstateManager& chainman = EnsureAnyChainman(request.context);
+ if (!chainman.m_blockman.IsPruneMode()) {
throw JSONRPCError(RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode.");
+ }
- ChainstateManager& chainman = EnsureAnyChainman(request.context);
LOCK(cs_main);
Chainstate& active_chainstate = chainman.ActiveChainstate();
CChain& active_chain = active_chainstate.m_chain;
@@ -1109,7 +1110,7 @@ static RPCHelpMan verifychain()
{"nblocks", RPCArg::Type::NUM, RPCArg::DefaultHint{strprintf("%d, 0=all", DEFAULT_CHECKBLOCKS)}, "The number of blocks to check."},
},
RPCResult{
- RPCResult::Type::BOOL, "", "Verified or not"},
+ RPCResult::Type::BOOL, "", "Verification finished successfully. If false, check debug.log for reason."},
RPCExamples{
HelpExampleCli("verifychain", "")
+ HelpExampleRpc("verifychain", "")
@@ -1124,7 +1125,7 @@ static RPCHelpMan verifychain()
Chainstate& active_chainstate = chainman.ActiveChainstate();
return CVerifyDB().VerifyDB(
- active_chainstate, chainman.GetParams().GetConsensus(), active_chainstate.CoinsTip(), check_level, check_depth);
+ active_chainstate, chainman.GetParams().GetConsensus(), active_chainstate.CoinsTip(), check_level, check_depth) == VerifyDBResult::SUCCESS;
},
};
}
@@ -1266,15 +1267,15 @@ RPCHelpMan getblockchaininfo()
obj.pushKV("initialblockdownload", active_chainstate.IsInitialBlockDownload());
obj.pushKV("chainwork", tip.nChainWork.GetHex());
obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage());
- obj.pushKV("pruned", node::fPruneMode);
- if (node::fPruneMode) {
+ obj.pushKV("pruned", chainman.m_blockman.IsPruneMode());
+ if (chainman.m_blockman.IsPruneMode()) {
obj.pushKV("pruneheight", chainman.m_blockman.GetFirstStoredBlock(tip)->nHeight);
// if 0, execution bypasses the whole if block.
bool automatic_pruning{args.GetIntArg("-prune", 0) != 1};
obj.pushKV("automatic_pruning", automatic_pruning);
if (automatic_pruning) {
- obj.pushKV("prune_target_size", node::nPruneTarget);
+ obj.pushKV("prune_target_size", chainman.m_blockman.GetPruneTarget());
}
}
@@ -2307,7 +2308,7 @@ static RPCHelpMan scanblocks()
RPCArg{"start_height", RPCArg::Type::NUM, RPCArg::Default{0}, "Height to start to scan from"},
RPCArg{"stop_height", RPCArg::Type::NUM, RPCArg::DefaultHint{"chain tip"}, "Height to stop to scan"},
RPCArg{"filtertype", RPCArg::Type::STR, RPCArg::Default{BlockFilterTypeName(BlockFilterType::BASIC)}, "The type name of the filter"},
- RPCArg{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ RPCArg{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
{"filter_false_positives", RPCArg::Type::BOOL, RPCArg::Default{false}, "Filter false positives (slower and may fail on pruned nodes). Otherwise they may occur at a rate of 1/M"},
},
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index 0e202a963d..3a69e2d8a2 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -853,15 +853,16 @@ static RPCHelpMan submitpackage()
NONFATAL_UNREACHABLE();
}
}
+ size_t num_broadcast{0};
for (const auto& tx : txns) {
- size_t num_submitted{0};
std::string err_string;
- const auto err = BroadcastTransaction(node, tx, err_string, 0, true, true);
+ const auto err = BroadcastTransaction(node, tx, err_string, /*max_tx_fee=*/0, /*relay=*/true, /*wait_callback=*/true);
if (err != TransactionError::OK) {
throw JSONRPCTransactionError(err,
strprintf("transaction broadcast failed: %s (all transactions were submitted, %d transactions were broadcast successfully)",
- err_string, num_submitted));
+ err_string, num_broadcast));
}
+ num_broadcast++;
}
UniValue rpc_result{UniValue::VOBJ};
UniValue tx_result_map{UniValue::VOBJ};
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 764c4c675b..8753f845a5 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -441,7 +441,7 @@ static RPCHelpMan prioritisetransaction()
"Accepts the transaction into mined blocks at a higher (or lower) priority\n",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id."},
- {"dummy", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "API-Compatibility for previous API. Must be zero or null.\n"
+ {"dummy", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "API-Compatibility for previous API. Must be zero or null.\n"
" DEPRECATED. For forward compatibility use named arguments and omit this parameter."},
{"fee_delta", RPCArg::Type::NUM, RPCArg::Optional::NO, "The fee value (in satoshis) to add (or subtract, if negative).\n"
" Note, that this value is not a fee rate. It is a value to modify absolute fee of the TX.\n"
@@ -513,8 +513,8 @@ static RPCHelpMan getblocktemplate()
{
{"template_request", RPCArg::Type::OBJ, RPCArg::Default{UniValue::VOBJ}, "Format of the template",
{
- {"mode", RPCArg::Type::STR, /* treat as named arg */ RPCArg::Optional::OMITTED_NAMED_ARG, "This must be set to \"template\", \"proposal\" (see BIP 23), or omitted"},
- {"capabilities", RPCArg::Type::ARR, /* treat as named arg */ RPCArg::Optional::OMITTED_NAMED_ARG, "A list of strings",
+ {"mode", RPCArg::Type::STR, /* treat as named arg */ RPCArg::Optional::OMITTED, "This must be set to \"template\", \"proposal\" (see BIP 23), or omitted"},
+ {"capabilities", RPCArg::Type::ARR, /* treat as named arg */ RPCArg::Optional::OMITTED, "A list of strings",
{
{"str", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "client side supported feature, 'longpoll', 'coinbasevalue', 'proposal', 'serverlist', 'workid'"},
}},
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index f0e5b90509..7ffa777ef4 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -196,7 +196,7 @@ static RPCHelpMan getpeerinfo()
obj.pushKV("id", stats.nodeid);
obj.pushKV("addr", stats.m_addr_name);
if (stats.addrBind.IsValid()) {
- obj.pushKV("addrbind", stats.addrBind.ToString());
+ obj.pushKV("addrbind", stats.addrBind.ToStringAddrPort());
}
if (!(stats.addrLocal.empty())) {
obj.pushKV("addrlocal", stats.addrLocal);
@@ -496,7 +496,7 @@ static RPCHelpMan getaddednodeinfo()
UniValue addresses(UniValue::VARR);
if (info.fConnected) {
UniValue address(UniValue::VOBJ);
- address.pushKV("address", info.resolvedAddress.ToString());
+ address.pushKV("address", info.resolvedAddress.ToStringAddrPort());
address.pushKV("connected", info.fInbound ? "inbound" : "outbound");
addresses.push_back(address);
}
@@ -571,7 +571,7 @@ static UniValue GetNetworksInfo()
obj.pushKV("name", GetNetworkName(network));
obj.pushKV("limited", !IsReachable(network));
obj.pushKV("reachable", IsReachable(network));
- obj.pushKV("proxy", proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string());
+ obj.pushKV("proxy", proxy.IsValid() ? proxy.proxy.ToStringAddrPort() : std::string());
obj.pushKV("proxy_randomize_credentials", proxy.randomize_credentials);
networks.push_back(obj);
}
@@ -664,7 +664,7 @@ static RPCHelpMan getnetworkinfo()
for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost)
{
UniValue rec(UniValue::VOBJ);
- rec.pushKV("address", item.first.ToString());
+ rec.pushKV("address", item.first.ToStringAddr());
rec.pushKV("port", item.second.nPort);
rec.pushKV("score", item.second.nScore);
localAddresses.push_back(rec);
@@ -702,9 +702,7 @@ static RPCHelpMan setban()
throw std::runtime_error(help.ToString());
}
NodeContext& node = EnsureAnyNodeContext(request.context);
- if (!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureBanman(node);
CSubNet subNet;
CNetAddr netAddr;
@@ -726,7 +724,7 @@ static RPCHelpMan setban()
if (strCommand == "add")
{
- if (isSubnet ? node.banman->IsBanned(subNet) : node.banman->IsBanned(netAddr)) {
+ if (isSubnet ? banman.IsBanned(subNet) : banman.IsBanned(netAddr)) {
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned");
}
@@ -741,12 +739,12 @@ static RPCHelpMan setban()
}
if (isSubnet) {
- node.banman->Ban(subNet, banTime, absolute);
+ banman.Ban(subNet, banTime, absolute);
if (node.connman) {
node.connman->DisconnectNode(subNet);
}
} else {
- node.banman->Ban(netAddr, banTime, absolute);
+ banman.Ban(netAddr, banTime, absolute);
if (node.connman) {
node.connman->DisconnectNode(netAddr);
}
@@ -754,7 +752,7 @@ static RPCHelpMan setban()
}
else if(strCommand == "remove")
{
- if (!( isSubnet ? node.banman->Unban(subNet) : node.banman->Unban(netAddr) )) {
+ if (!( isSubnet ? banman.Unban(subNet) : banman.Unban(netAddr) )) {
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously manually banned.");
}
}
@@ -785,13 +783,10 @@ static RPCHelpMan listbanned()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- NodeContext& node = EnsureAnyNodeContext(request.context);
- if(!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureAnyBanman(request.context);
banmap_t banMap;
- node.banman->GetBanned(banMap);
+ banman.GetBanned(banMap);
const int64_t current_time{GetTime()};
UniValue bannedAddresses(UniValue::VARR);
@@ -825,12 +820,9 @@ static RPCHelpMan clearbanned()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- NodeContext& node = EnsureAnyNodeContext(request.context);
- if (!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureAnyBanman(request.context);
- node.banman->ClearBanned();
+ banman.ClearBanned();
return UniValue::VNULL;
},
@@ -909,7 +901,7 @@ static RPCHelpMan getnodeaddresses()
UniValue obj(UniValue::VOBJ);
obj.pushKV("time", int64_t{TicksSinceEpoch<std::chrono::seconds>(addr.nTime)});
obj.pushKV("services", (uint64_t)addr.nServices);
- obj.pushKV("address", addr.ToStringIP());
+ obj.pushKV("address", addr.ToStringAddr());
obj.pushKV("port", addr.GetPort());
obj.pushKV("network", GetNetworkName(addr.GetNetClass()));
ret.push_back(obj);
diff --git a/src/rpc/node.cpp b/src/rpc/node.cpp
index 79b8277968..5918bc6e38 100644
--- a/src/rpc/node.cpp
+++ b/src/rpc/node.cpp
@@ -240,11 +240,11 @@ static RPCHelpMan logging()
" - \"none\", \"0\" : even if other logging categories are specified, ignore all of them.\n"
,
{
- {"include", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The categories to add to debug logging",
+ {"include", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "The categories to add to debug logging",
{
{"include_category", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "the valid logging category"},
}},
- {"exclude", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The categories to remove from debug logging",
+ {"exclude", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "The categories to remove from debug logging",
{
{"exclude_category", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "the valid logging category"},
}},
@@ -294,16 +294,16 @@ static RPCHelpMan echo(const std::string& name)
"\nThe difference between echo and echojson is that echojson has argument conversion enabled in the client-side table in "
"bitcoin-cli and the GUI. There is no server-side difference.",
{
- {"arg0", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg1", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg2", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg3", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg4", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg5", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg6", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg7", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg8", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
- {"arg9", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg0", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg1", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg2", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg3", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg4", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg5", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg6", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg7", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg8", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
+ {"arg9", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "", RPCArgOptions{.skip_type_check = true}},
},
RPCResult{RPCResult::Type::ANY, "", "Returns whatever was passed in"},
RPCExamples{""},
@@ -376,7 +376,7 @@ static RPCHelpMan getindexinfo()
return RPCHelpMan{"getindexinfo",
"\nReturns the status of one or all available indices currently running in the node.\n",
{
- {"index_name", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Filter results for an index with a specific name."},
+ {"index_name", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Filter results for an index with a specific name."},
},
RPCResult{
RPCResult::Type::OBJ_DYN, "", "", {
diff --git a/src/rpc/output_script.cpp b/src/rpc/output_script.cpp
index 911c769e61..bb04f58424 100644
--- a/src/rpc/output_script.cpp
+++ b/src/rpc/output_script.cpp
@@ -230,7 +230,7 @@ static RPCHelpMan deriveaddresses()
"For more information on output descriptors, see the documentation in the doc/descriptors.md file.\n"},
{
{"descriptor", RPCArg::Type::STR, RPCArg::Optional::NO, "The descriptor."},
- {"range", RPCArg::Type::RANGE, RPCArg::Optional::OMITTED_NAMED_ARG, "If a ranged descriptor is used, this specifies the end or the range (in [begin,end] notation) to derive."},
+ {"range", RPCArg::Type::RANGE, RPCArg::Optional::OMITTED, "If a ranged descriptor is used, this specifies the end or the range (in [begin,end] notation) to derive."},
},
RPCResult{
RPCResult::Type::ARR, "", "",
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 981dead3b8..5ed8aee9ea 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -54,8 +54,11 @@ using node::PSBTAnalysis;
using node::ReadBlockFromDisk;
using node::UndoReadFromDisk;
-static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry, Chainstate& active_chainstate, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_TXID)
+static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry,
+ Chainstate& active_chainstate, const CTxUndo* txundo = nullptr,
+ TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS)
{
+ CHECK_NONFATAL(verbosity >= TxVerbosity::SHOW_DETAILS);
// Call into TxToUniv() in bitcoin-common to decode the transaction hex.
//
// Blockchain contextual information (confirmations and blocktime) is not
@@ -187,7 +190,7 @@ static RPCHelpMan getrawtransaction()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
{"verbosity|verbose", RPCArg::Type::NUM, RPCArg::Default{0}, "0 for hex-encoded data, 1 for a JSON object, and 2 for JSON object with fee and prevout",
RPCArgOptions{.skip_type_check = true}},
- {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED_NAMED_ARG, "The block in which to look for the transaction"},
+ {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "The block in which to look for the transaction"},
},
{
RPCResult{"if verbosity is not set or set to 0",
@@ -519,15 +522,17 @@ static RPCHelpMan decodescript()
if (can_wrap_P2WSH) {
UniValue sr(UniValue::VOBJ);
CScript segwitScr;
+ FlatSigningProvider provider;
if (which_type == TxoutType::PUBKEY) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0])));
} else if (which_type == TxoutType::PUBKEYHASH) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]}));
} else {
// Scripts that are not fit for P2WPKH are encoded as P2WSH.
+ provider.scripts[CScriptID(script)] = script;
segwitScr = GetScriptForDestination(WitnessV0ScriptHash(script));
}
- ScriptToUniv(segwitScr, /*out=*/sr, /*include_hex=*/true, /*include_address=*/true);
+ ScriptToUniv(segwitScr, /*out=*/sr, /*include_hex=*/true, /*include_address=*/true, /*provider=*/&provider);
sr.pushKV("p2sh-segwit", EncodeDestination(ScriptHash(segwitScr)));
r.pushKV("segwit", sr);
}
@@ -639,7 +644,7 @@ static RPCHelpMan signrawtransactionwithkey()
{"privatekey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "private key in base58-encoding"},
},
},
- {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The previous dependent transaction outputs",
+ {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "The previous dependent transaction outputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -1578,7 +1583,7 @@ static RPCHelpMan utxoupdatepsbt()
"\nUpdates all segwit inputs and outputs in a PSBT with data from output descriptors, the UTXO set or the mempool.\n",
{
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"},
- {"descriptors", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "An array of either strings or objects", {
+ {"descriptors", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "An array of either strings or objects", {
{"", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "An output descriptor"},
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "An object with an output descriptor and extra information", {
{"desc", RPCArg::Type::STR, RPCArg::Optional::NO, "An output descriptor"},
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 15b8e1dcd0..3ba930f84f 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -21,12 +21,8 @@
#include <util/strencodings.h>
#include <util/translation.h>
-CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf)
+void AddInputs(CMutableTransaction& rawTx, const UniValue& inputs_in, std::optional<bool> rbf)
{
- if (outputs_in.isNull()) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument must be non-null");
- }
-
UniValue inputs;
if (inputs_in.isNull()) {
inputs = UniValue::VARR;
@@ -34,18 +30,6 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
inputs = inputs_in.get_array();
}
- const bool outputs_is_obj = outputs_in.isObject();
- UniValue outputs = outputs_is_obj ? outputs_in.get_obj() : outputs_in.get_array();
-
- CMutableTransaction rawTx;
-
- if (!locktime.isNull()) {
- int64_t nLockTime = locktime.getInt<int64_t>();
- if (nLockTime < 0 || nLockTime > LOCKTIME_MAX)
- throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, locktime out of range");
- rawTx.nLockTime = nLockTime;
- }
-
for (unsigned int idx = 0; idx < inputs.size(); idx++) {
const UniValue& input = inputs[idx];
const UniValue& o = input.get_obj();
@@ -84,6 +68,16 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
rawTx.vin.push_back(in);
}
+}
+
+void AddOutputs(CMutableTransaction& rawTx, const UniValue& outputs_in)
+{
+ if (outputs_in.isNull()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument must be non-null");
+ }
+
+ const bool outputs_is_obj = outputs_in.isObject();
+ UniValue outputs = outputs_is_obj ? outputs_in.get_obj() : outputs_in.get_array();
if (!outputs_is_obj) {
// Translate array of key-value pairs into dict
@@ -132,6 +126,21 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
rawTx.vout.push_back(out);
}
}
+}
+
+CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf)
+{
+ CMutableTransaction rawTx;
+
+ if (!locktime.isNull()) {
+ int64_t nLockTime = locktime.getInt<int64_t>();
+ if (nLockTime < 0 || nLockTime > LOCKTIME_MAX)
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, locktime out of range");
+ rawTx.nLockTime = nLockTime;
+ }
+
+ AddInputs(rawTx, inputs_in, rbf);
+ AddOutputs(rawTx, outputs_in);
if (rbf.has_value() && rbf.value() && rawTx.vin.size() > 0 && !SignalsOptInRBF(CTransaction(rawTx))) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter combination: Sequence number(s) contradict replaceable option");
diff --git a/src/rpc/rawtransaction_util.h b/src/rpc/rawtransaction_util.h
index 0c3823bc1e..a863432b7a 100644
--- a/src/rpc/rawtransaction_util.h
+++ b/src/rpc/rawtransaction_util.h
@@ -38,6 +38,13 @@ void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const
*/
void ParsePrevouts(const UniValue& prevTxsUnival, FillableSigningProvider* keystore, std::map<COutPoint, Coin>& coins);
+
+/** Normalize univalue-represented inputs and add them to the transaction */
+void AddInputs(CMutableTransaction& rawTx, const UniValue& inputs_in, bool rbf);
+
+/** Normalize univalue-represented outputs and add them to the transaction */
+void AddOutputs(CMutableTransaction& rawTx, const UniValue& outputs_in);
+
/** Create a transaction from univalue parameters */
CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf);
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 0bb5533d71..6f37fe2a99 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -86,7 +86,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd);
/** the umask determines what permissions are used to create this file -
- * these are set to 077 in init.cpp unless overridden with -sysperms.
+ * these are set to 0077 in util/system.cpp.
*/
std::ofstream file;
fs::path filepath_tmp = GetAuthCookieFile(true);
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 9f57a56297..44d7e2676b 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -168,7 +168,7 @@ static RPCHelpMan stop()
// to the client (intended for testing)
"\nRequest a graceful shutdown of " PACKAGE_NAME ".",
{
- {"wait", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "how long to wait in ms", RPCArgOptions{.hidden=true}},
+ {"wait", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "how long to wait in ms", RPCArgOptions{.hidden=true}},
},
RPCResult{RPCResult::Type::STR, "", "A string with the content '" + RESULT + "'"},
RPCExamples{""},
diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp
index 50f9ce7b3c..7a708ec813 100644
--- a/src/rpc/server_util.cpp
+++ b/src/rpc/server_util.cpp
@@ -39,6 +39,20 @@ CTxMemPool& EnsureAnyMemPool(const std::any& context)
return EnsureMemPool(EnsureAnyNodeContext(context));
}
+
+BanMan& EnsureBanman(const NodeContext& node)
+{
+ if (!node.banman) {
+ throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
+ }
+ return *node.banman;
+}
+
+BanMan& EnsureAnyBanman(const std::any& context)
+{
+ return EnsureBanman(EnsureAnyNodeContext(context));
+}
+
ArgsManager& EnsureArgsman(const NodeContext& node)
{
if (!node.args) {
diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h
index fa008a8155..9af9572431 100644
--- a/src/rpc/server_util.h
+++ b/src/rpc/server_util.h
@@ -13,6 +13,7 @@ class CConnman;
class CTxMemPool;
class ChainstateManager;
class PeerManager;
+class BanMan;
namespace node {
struct NodeContext;
} // namespace node
@@ -20,6 +21,8 @@ struct NodeContext;
node::NodeContext& EnsureAnyNodeContext(const std::any& context);
CTxMemPool& EnsureMemPool(const node::NodeContext& node);
CTxMemPool& EnsureAnyMemPool(const std::any& context);
+BanMan& EnsureBanman(const node::NodeContext& node);
+BanMan& EnsureAnyBanman(const std::any& context);
ArgsManager& EnsureArgsman(const node::NodeContext& node);
ArgsManager& EnsureAnyArgsman(const std::any& context);
ChainstateManager& EnsureChainman(const node::NodeContext& node);
diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp
index 8c5468634d..24b5d04115 100644
--- a/src/rpc/txoutproof.cpp
+++ b/src/rpc/txoutproof.cpp
@@ -34,7 +34,7 @@ static RPCHelpMan gettxoutproof()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A transaction hash"},
},
},
- {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED_NAMED_ARG, "If specified, looks for txid in the block with this hash"},
+ {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "If specified, looks for txid in the block with this hash"},
},
RPCResult{
RPCResult::Type::STR, "data", "A string that is a serialized, hex-encoded data for the proof."
@@ -112,7 +112,7 @@ static RPCHelpMan gettxoutproof()
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block");
}
- CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
+ DataStream ssMB{};
CMerkleBlock mb(block, setTxids);
ssMB << mb;
std::string strHex = HexStr(ssMB);
@@ -138,7 +138,7 @@ static RPCHelpMan verifytxoutproof()
RPCExamples{""},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- CDataStream ssMB(ParseHexV(request.params[0], "proof"), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
+ DataStream ssMB{ParseHexV(request.params[0], "proof")};
CMerkleBlock merkleBlock;
ssMB >> merkleBlock;
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 9619c3df99..a1020c3b2b 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <clientversion.h>
#include <consensus/amount.h>
#include <key_io.h>
#include <outputtype.h>
@@ -30,14 +31,6 @@ std::string GetAllOutputTypes()
return Join(ret, ", ");
}
-void RPCTypeCheckArgument(const UniValue& value, const UniValueType& typeExpected)
-{
- if (!typeExpected.typeAny && value.type() != typeExpected.type) {
- throw JSONRPCError(RPC_TYPE_ERROR,
- strprintf("JSON value of type %s is not of expected type %s", uvTypeName(value.type()), uvTypeName(typeExpected.type)));
- }
-}
-
void RPCTypeCheckObj(const UniValue& o,
const std::map<std::string, UniValueType>& typesExpected,
bool fAllowNull,
@@ -563,12 +556,39 @@ UniValue RPCHelpMan::HandleRequest(const JSONRPCRequest& request) const
if (request.mode == JSONRPCRequest::GET_HELP || !IsValidNumArgs(request.params.size())) {
throw std::runtime_error(ToString());
}
+ UniValue arg_mismatch{UniValue::VOBJ};
for (size_t i{0}; i < m_args.size(); ++i) {
- m_args.at(i).MatchesType(request.params[i]);
+ const auto& arg{m_args.at(i)};
+ UniValue match{arg.MatchesType(request.params[i])};
+ if (!match.isTrue()) {
+ arg_mismatch.pushKV(strprintf("Position %s (%s)", i + 1, arg.m_names), std::move(match));
+ }
+ }
+ if (!arg_mismatch.empty()) {
+ throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Wrong type passed:\n%s", arg_mismatch.write(4)));
}
UniValue ret = m_fun(*this, request);
if (gArgs.GetBoolArg("-rpcdoccheck", DEFAULT_RPC_DOC_CHECK)) {
- CHECK_NONFATAL(std::any_of(m_results.m_results.begin(), m_results.m_results.end(), [&ret](const RPCResult& res) { return res.MatchesType(ret); }));
+ UniValue mismatch{UniValue::VARR};
+ for (const auto& res : m_results.m_results) {
+ UniValue match{res.MatchesType(ret)};
+ if (match.isTrue()) {
+ mismatch.setNull();
+ break;
+ }
+ mismatch.push_back(match);
+ }
+ if (!mismatch.isNull()) {
+ std::string explain{
+ mismatch.empty() ? "no possible results defined" :
+ mismatch.size() == 1 ? mismatch[0].write(4) :
+ mismatch.write(4)};
+ throw std::runtime_error{
+ strprintf("Internal bug detected: RPC call \"%s\" returned incorrect type:\n%s\n%s %s\nPlease report this issue here: %s\n",
+ m_name, explain,
+ PACKAGE_NAME, FormatFullVersion(),
+ PACKAGE_BUGREPORT)};
+ }
}
return ret;
}
@@ -664,42 +684,50 @@ UniValue RPCHelpMan::GetArgMap() const
return arr;
}
-void RPCArg::MatchesType(const UniValue& request) const
+static std::optional<UniValue::VType> ExpectedType(RPCArg::Type type)
{
- if (m_opts.skip_type_check) return;
- if (IsOptional() && request.isNull()) return;
- switch (m_type) {
+ using Type = RPCArg::Type;
+ switch (type) {
case Type::STR_HEX:
case Type::STR: {
- RPCTypeCheckArgument(request, UniValue::VSTR);
- return;
+ return UniValue::VSTR;
}
case Type::NUM: {
- RPCTypeCheckArgument(request, UniValue::VNUM);
- return;
+ return UniValue::VNUM;
}
case Type::AMOUNT: {
// VNUM or VSTR, checked inside AmountFromValue()
- return;
+ return std::nullopt;
}
case Type::RANGE: {
// VNUM or VARR, checked inside ParseRange()
- return;
+ return std::nullopt;
}
case Type::BOOL: {
- RPCTypeCheckArgument(request, UniValue::VBOOL);
- return;
+ return UniValue::VBOOL;
}
case Type::OBJ:
case Type::OBJ_USER_KEYS: {
- RPCTypeCheckArgument(request, UniValue::VOBJ);
- return;
+ return UniValue::VOBJ;
}
case Type::ARR: {
- RPCTypeCheckArgument(request, UniValue::VARR);
- return;
+ return UniValue::VARR;
}
} // no default case, so the compiler can warn about missing cases
+ NONFATAL_UNREACHABLE();
+}
+
+UniValue RPCArg::MatchesType(const UniValue& request) const
+{
+ if (m_opts.skip_type_check) return true;
+ if (IsOptional() && request.isNull()) return true;
+ const auto exp_type{ExpectedType(m_type)};
+ if (!exp_type) return true; // nothing to check
+
+ if (*exp_type != request.getType()) {
+ return strprintf("JSON value of type %s is not of expected type %s", uvTypeName(request.getType()), uvTypeName(*exp_type));
+ }
+ return true;
}
std::string RPCArg::GetFirstName() const
@@ -768,7 +796,6 @@ std::string RPCArg::ToDescriptionString(bool is_named_arg) const
ret += ", optional, default=" + std::get<RPCArg::Default>(m_fallback).write();
} else {
switch (std::get<RPCArg::Optional>(m_fallback)) {
- case RPCArg::Optional::OMITTED_NAMED_ARG: // Deprecated alias for OMITTED, can be removed
case RPCArg::Optional::OMITTED: {
if (is_named_arg) ret += ", optional"; // Default value is "null" in dicts. Otherwise,
// nothing to do. Element is treated as if not present and has no default value
@@ -883,53 +910,77 @@ void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const
NONFATAL_UNREACHABLE();
}
-bool RPCResult::MatchesType(const UniValue& result) const
+static std::optional<UniValue::VType> ExpectedType(RPCResult::Type type)
{
- if (m_skip_type_check) {
- return true;
- }
- switch (m_type) {
+ using Type = RPCResult::Type;
+ switch (type) {
case Type::ELISION:
case Type::ANY: {
- return true;
+ return std::nullopt;
}
case Type::NONE: {
- return UniValue::VNULL == result.getType();
+ return UniValue::VNULL;
}
case Type::STR:
case Type::STR_HEX: {
- return UniValue::VSTR == result.getType();
+ return UniValue::VSTR;
}
case Type::NUM:
case Type::STR_AMOUNT:
case Type::NUM_TIME: {
- return UniValue::VNUM == result.getType();
+ return UniValue::VNUM;
}
case Type::BOOL: {
- return UniValue::VBOOL == result.getType();
+ return UniValue::VBOOL;
}
case Type::ARR_FIXED:
case Type::ARR: {
- if (UniValue::VARR != result.getType()) return false;
+ return UniValue::VARR;
+ }
+ case Type::OBJ_DYN:
+ case Type::OBJ: {
+ return UniValue::VOBJ;
+ }
+ } // no default case, so the compiler can warn about missing cases
+ NONFATAL_UNREACHABLE();
+}
+
+UniValue RPCResult::MatchesType(const UniValue& result) const
+{
+ if (m_skip_type_check) {
+ return true;
+ }
+
+ const auto exp_type = ExpectedType(m_type);
+ if (!exp_type) return true; // can be any type, so nothing to check
+
+ if (*exp_type != result.getType()) {
+ return strprintf("returned type is %s, but declared as %s in doc", uvTypeName(result.getType()), uvTypeName(*exp_type));
+ }
+
+ if (UniValue::VARR == result.getType()) {
+ UniValue errors(UniValue::VOBJ);
for (size_t i{0}; i < result.get_array().size(); ++i) {
// If there are more results than documented, re-use the last doc_inner.
const RPCResult& doc_inner{m_inner.at(std::min(m_inner.size() - 1, i))};
- if (!doc_inner.MatchesType(result.get_array()[i])) return false;
+ UniValue match{doc_inner.MatchesType(result.get_array()[i])};
+ if (!match.isTrue()) errors.pushKV(strprintf("%d", i), match);
}
- return true; // empty result array is valid
+ if (errors.empty()) return true; // empty result array is valid
+ return errors;
}
- case Type::OBJ_DYN:
- case Type::OBJ: {
- if (UniValue::VOBJ != result.getType()) return false;
+
+ if (UniValue::VOBJ == result.getType()) {
if (!m_inner.empty() && m_inner.at(0).m_type == Type::ELISION) return true;
+ UniValue errors(UniValue::VOBJ);
if (m_type == Type::OBJ_DYN) {
const RPCResult& doc_inner{m_inner.at(0)}; // Assume all types are the same, randomly pick the first
for (size_t i{0}; i < result.get_obj().size(); ++i) {
- if (!doc_inner.MatchesType(result.get_obj()[i])) {
- return false;
- }
+ UniValue match{doc_inner.MatchesType(result.get_obj()[i])};
+ if (!match.isTrue()) errors.pushKV(result.getKeys()[i], match);
}
- return true; // empty result obj is valid
+ if (errors.empty()) return true; // empty result obj is valid
+ return errors;
}
std::set<std::string> doc_keys;
for (const auto& doc_entry : m_inner) {
@@ -939,7 +990,7 @@ bool RPCResult::MatchesType(const UniValue& result) const
result.getObjMap(result_obj);
for (const auto& result_entry : result_obj) {
if (doc_keys.find(result_entry.first) == doc_keys.end()) {
- return false; // missing documentation
+ errors.pushKV(result_entry.first, "key returned that was not in doc");
}
}
@@ -947,18 +998,18 @@ bool RPCResult::MatchesType(const UniValue& result) const
const auto result_it{result_obj.find(doc_entry.m_key_name)};
if (result_it == result_obj.end()) {
if (!doc_entry.m_optional) {
- return false; // result is missing a required key
+ errors.pushKV(doc_entry.m_key_name, "key missing, despite not being optional in doc");
}
continue;
}
- if (!doc_entry.MatchesType(result_it->second)) {
- return false; // wrong type
- }
+ UniValue match{doc_entry.MatchesType(result_it->second)};
+ if (!match.isTrue()) errors.pushKV(doc_entry.m_key_name, match);
}
- return true;
+ if (errors.empty()) return true;
+ return errors;
}
- } // no default case, so the compiler can warn about missing cases
- NONFATAL_UNREACHABLE();
+
+ return true;
}
void RPCResult::CheckInnerDoc() const
diff --git a/src/rpc/util.h b/src/rpc/util.h
index 30c46bfdcd..e3783c8f76 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -62,11 +62,6 @@ struct UniValueType {
UniValue::VType type;
};
-/**
- * Type-check one argument; throws JSONRPCError if wrong type given.
- */
-void RPCTypeCheckArgument(const UniValue& value, const UniValueType& typeExpected);
-
/*
Check for expected keys/value types in an Object.
*/
@@ -154,18 +149,14 @@ struct RPCArg {
/** Required arg */
NO,
/**
- * The arg is optional for one of two reasons:
- *
- * Optional arg that is a named argument and has a default value of
- * `null`.
- *
- * Optional argument with default value omitted because they are
- * implicitly clear. That is, elements in an array may not
- * exist by default.
+ * Optional argument for which the default value is omitted from
+ * help text for one of two reasons:
+ * - It's a named argument and has a default value of `null`.
+ * - Its default value is implicitly clear. That is, elements in an
+ * array may not exist by default.
* When possible, the default value should be specified.
*/
OMITTED,
- OMITTED_NAMED_ARG, // Deprecated alias for OMITTED, can be removed
};
/** Hint for default value */
using DefaultHint = std::string;
@@ -214,8 +205,11 @@ struct RPCArg {
bool IsOptional() const;
- /** Check whether the request JSON type matches. */
- void MatchesType(const UniValue& request) const;
+ /**
+ * Check whether the request JSON type matches.
+ * Returns true if type matches, or object describing error(s) if not.
+ */
+ UniValue MatchesType(const UniValue& request) const;
/** Return the first of all aliases */
std::string GetFirstName() const;
@@ -324,8 +318,10 @@ struct RPCResult {
std::string ToStringObj() const;
/** Return the description string, including the result type. */
std::string ToDescriptionString() const;
- /** Check whether the result JSON type matches. */
- bool MatchesType(const UniValue& result) const;
+ /** Check whether the result JSON type matches.
+ * Returns true if type matches, or object describing error(s) if not.
+ */
+ UniValue MatchesType(const UniValue& result) const;
private:
void CheckInnerDoc() const;
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 864eb8864f..857fee1818 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -4,11 +4,13 @@
#include <script/descriptor.h>
+#include <hash.h>
#include <key_io.h>
#include <pubkey.h>
#include <script/miniscript.h>
#include <script/script.h>
#include <script/standard.h>
+#include <uint256.h>
#include <span.h>
#include <util/bip32.h>
@@ -1012,7 +1014,7 @@ public:
return false;
}
- bool IsSolvable() const override { return false; } // For now, mark these descriptors as non-solvable (as we don't have signing logic for them).
+ bool IsSolvable() const override { return true; }
bool IsSingleType() const final { return true; }
};
@@ -1618,8 +1620,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo
}
}
if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH)) {
- CScriptID scriptid;
- CRIPEMD160().Write(data[0].data(), data[0].size()).Finalize(scriptid.begin());
+ CScriptID scriptid{RIPEMD160(data[0])};
CScript subscript;
if (provider.GetCScript(scriptid, subscript)) {
auto sub = InferScript(subscript, ParseScriptContext::P2WSH, provider);
@@ -1643,7 +1644,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo
for (const auto& [depth, script, leaf_ver] : *tree) {
std::unique_ptr<DescriptorImpl> subdesc;
if (leaf_ver == TAPROOT_LEAF_TAPSCRIPT) {
- subdesc = InferScript(script, ParseScriptContext::P2TR, provider);
+ subdesc = InferScript(CScript(script.begin(), script.end()), ParseScriptContext::P2TR, provider);
}
if (!subdesc) {
ok = false;
@@ -1832,17 +1833,17 @@ DescriptorCache DescriptorCache::MergeAndDiff(const DescriptorCache& other)
return diff;
}
-const ExtPubKeyMap DescriptorCache::GetCachedParentExtPubKeys() const
+ExtPubKeyMap DescriptorCache::GetCachedParentExtPubKeys() const
{
return m_parent_xpubs;
}
-const std::unordered_map<uint32_t, ExtPubKeyMap> DescriptorCache::GetCachedDerivedExtPubKeys() const
+std::unordered_map<uint32_t, ExtPubKeyMap> DescriptorCache::GetCachedDerivedExtPubKeys() const
{
return m_derived_xpubs;
}
-const ExtPubKeyMap DescriptorCache::GetCachedLastHardenedExtPubKeys() const
+ExtPubKeyMap DescriptorCache::GetCachedLastHardenedExtPubKeys() const
{
return m_last_hardened_xpubs;
}
diff --git a/src/script/descriptor.h b/src/script/descriptor.h
index 16ee2f6d97..39b1a37f9a 100644
--- a/src/script/descriptor.h
+++ b/src/script/descriptor.h
@@ -35,7 +35,7 @@ public:
/** Retrieve a cached parent xpub
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedParentExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const;
/** Cache an xpub derived at an index
@@ -49,7 +49,7 @@ public:
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
* @param[in] der_index Derivation index of the xpub
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, CExtPubKey& xpub) const;
/** Cache a last hardened xpub
@@ -61,16 +61,16 @@ public:
/** Retrieve a cached last hardened xpub
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedLastHardenedExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const;
/** Retrieve all cached parent xpubs */
- const ExtPubKeyMap GetCachedParentExtPubKeys() const;
+ ExtPubKeyMap GetCachedParentExtPubKeys() const;
/** Retrieve all cached derived xpubs */
- const std::unordered_map<uint32_t, ExtPubKeyMap> GetCachedDerivedExtPubKeys() const;
+ std::unordered_map<uint32_t, ExtPubKeyMap> GetCachedDerivedExtPubKeys() const;
/** Retrieve all cached last hardened xpubs */
- const ExtPubKeyMap GetCachedLastHardenedExtPubKeys() const;
+ ExtPubKeyMap GetCachedLastHardenedExtPubKeys() const;
/** Combine another DescriptorCache into this one.
* Returns a cache containing the items from the other cache unknown to current cache
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index a942ff349b..5f4a1aceb2 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1439,7 +1439,7 @@ void PrecomputedTransactionData::Init(const T& txTo, std::vector<CTxOut>&& spent
hashOutputs = SHA256Uint256(m_outputs_single_hash);
m_bip143_segwit_ready = true;
}
- if (uses_bip341_taproot) {
+ if (uses_bip341_taproot && m_spent_outputs_ready) {
m_spent_amounts_single_hash = GetSpentAmountsSHA256(m_spent_outputs);
m_spent_scripts_single_hash = GetSpentScriptsSHA256(m_spent_outputs);
m_bip341_taproot_ready = true;
@@ -1825,9 +1825,20 @@ static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CS
return true;
}
-uint256 ComputeTapleafHash(uint8_t leaf_version, const CScript& script)
+uint256 ComputeTapleafHash(uint8_t leaf_version, Span<const unsigned char> script)
{
- return (HashWriter{HASHER_TAPLEAF} << leaf_version << script).GetSHA256();
+ return (HashWriter{HASHER_TAPLEAF} << leaf_version << CompactSizeWriter(script.size()) << script).GetSHA256();
+}
+
+uint256 ComputeTapbranchHash(Span<const unsigned char> a, Span<const unsigned char> b)
+{
+ HashWriter ss_branch{HASHER_TAPBRANCH};
+ if (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())) {
+ ss_branch << a << b;
+ } else {
+ ss_branch << b << a;
+ }
+ return ss_branch.GetSHA256();
}
uint256 ComputeTaprootMerkleRoot(Span<const unsigned char> control, const uint256& tapleaf_hash)
@@ -1839,14 +1850,8 @@ uint256 ComputeTaprootMerkleRoot(Span<const unsigned char> control, const uint25
const int path_len = (control.size() - TAPROOT_CONTROL_BASE_SIZE) / TAPROOT_CONTROL_NODE_SIZE;
uint256 k = tapleaf_hash;
for (int i = 0; i < path_len; ++i) {
- HashWriter ss_branch{HASHER_TAPBRANCH};
Span node{Span{control}.subspan(TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * i, TAPROOT_CONTROL_NODE_SIZE)};
- if (std::lexicographical_compare(k.begin(), k.end(), node.begin(), node.end())) {
- ss_branch << k << node;
- } else {
- ss_branch << node << k;
- }
- k = ss_branch.GetSHA256();
+ k = ComputeTapbranchHash(k, node);
}
return k;
}
@@ -1917,18 +1922,18 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion,
} else {
// Script path spending (stack size is >1 after removing optional annex)
const valtype& control = SpanPopBack(stack);
- const valtype& script_bytes = SpanPopBack(stack);
- exec_script = CScript(script_bytes.begin(), script_bytes.end());
+ const valtype& script = SpanPopBack(stack);
if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) {
return set_error(serror, SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE);
}
- execdata.m_tapleaf_hash = ComputeTapleafHash(control[0] & TAPROOT_LEAF_MASK, exec_script);
+ execdata.m_tapleaf_hash = ComputeTapleafHash(control[0] & TAPROOT_LEAF_MASK, script);
if (!VerifyTaprootCommitment(control, program, execdata.m_tapleaf_hash)) {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH);
}
execdata.m_tapleaf_hash_init = true;
if ((control[0] & TAPROOT_LEAF_MASK) == TAPROOT_LEAF_TAPSCRIPT) {
// Tapscript (leaf version 0xc0)
+ exec_script = CScript(script.begin(), script.end());
execdata.m_validation_weight_left = ::GetSerializeSize(witness.stack, PROTOCOL_VERSION) + VALIDATION_WEIGHT_OFFSET;
execdata.m_validation_weight_left_init = true;
return ExecuteWitnessScript(stack, exec_script, flags, SigVersion::TAPSCRIPT, checker, execdata, serror);
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 42282e6e5c..ac1013302d 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -333,7 +333,10 @@ public:
};
/** Compute the BIP341 tapleaf hash from leaf version & script. */
-uint256 ComputeTapleafHash(uint8_t leaf_version, const CScript& script);
+uint256 ComputeTapleafHash(uint8_t leaf_version, Span<const unsigned char> script);
+/** Compute the BIP341 tapbranch hash from two branches.
+ * Spans must be 32 bytes each. */
+uint256 ComputeTapbranchHash(Span<const unsigned char> a, Span<const unsigned char> b);
/** Compute the BIP341 taproot script tree Merkle root from control block and leaf hash.
* Requires control block to have valid length (33 + k*32, with k in {0,1,..,128}). */
uint256 ComputeTaprootMerkleRoot(Span<const unsigned char> control, const uint256& tapleaf_hash);
diff --git a/src/script/miniscript.cpp b/src/script/miniscript.cpp
index 5e471cbe89..3937638cf8 100644
--- a/src/script/miniscript.cpp
+++ b/src/script/miniscript.cpp
@@ -172,8 +172,8 @@ Type ComputeType(Fragment fragment, Type x, Type y, Type z, const std::vector<Ty
(y & "B"_mst).If(x << "Bdu"_mst) | // B=B_y*B_x*d_x*u_x
(x & "o"_mst).If(y << "z"_mst) | // o=o_x*z_y
(x & y & "m"_mst).If(x << "e"_mst && (x | y) << "s"_mst) | // m=m_x*m_y*e_x*(s_x+s_y)
- (x & y & "zes"_mst) | // z=z_x*z_y, e=e_x*e_y, s=s_x*s_y
- (y & "ufd"_mst) | // u=u_y, f=f_y, d=d_y
+ (x & y & "zs"_mst) | // z=z_x*z_y, s=s_x*s_y
+ (y & "ufde"_mst) | // u=u_y, f=f_y, d=d_y, e=e_y
"x"_mst | // x
((x | y) & "ghij"_mst) | // g=g_x+g_y, h=h_x+h_y, i=i_x+i_y, j=j_x+j_y
(x & y & "k"_mst); // k=k_x*k_y
@@ -201,7 +201,7 @@ Type ComputeType(Fragment fragment, Type x, Type y, Type z, const std::vector<Ty
(y & z & "u"_mst) | // u=u_y*u_z
(z & "f"_mst).If((x << "s"_mst) || (y << "f"_mst)) | // f=(s_x+f_y)*f_z
(z & "d"_mst) | // d=d_z
- (x & z & "e"_mst).If(x << "s"_mst || y << "f"_mst) | // e=e_x*e_z*(s_x+f_y)
+ (z & "e"_mst).If(x << "s"_mst || y << "f"_mst) | // e=e_z*(s_x+f_y)
(x & y & z & "m"_mst).If(x << "e"_mst && (x | y | z) << "s"_mst) | // m=m_x*m_y*m_z*e_x*(s_x+s_y+s_z)
(z & (x | y) & "s"_mst) | // s=s_z*(s_x+s_y)
"x"_mst | // x
@@ -279,6 +279,76 @@ size_t ComputeScriptLen(Fragment fragment, Type sub0typ, size_t subsize, uint32_
assert(false);
}
+InputStack& InputStack::SetAvailable(Availability avail) {
+ available = avail;
+ if (avail == Availability::NO) {
+ stack.clear();
+ size = std::numeric_limits<size_t>::max();
+ has_sig = false;
+ malleable = false;
+ non_canon = false;
+ }
+ return *this;
+}
+
+InputStack& InputStack::SetWithSig() {
+ has_sig = true;
+ return *this;
+}
+
+InputStack& InputStack::SetNonCanon() {
+ non_canon = true;
+ return *this;
+}
+
+InputStack& InputStack::SetMalleable(bool x) {
+ malleable = x;
+ return *this;
+}
+
+InputStack operator+(InputStack a, InputStack b) {
+ a.stack = Cat(std::move(a.stack), std::move(b.stack));
+ if (a.available != Availability::NO && b.available != Availability::NO) a.size += b.size;
+ a.has_sig |= b.has_sig;
+ a.malleable |= b.malleable;
+ a.non_canon |= b.non_canon;
+ if (a.available == Availability::NO || b.available == Availability::NO) {
+ a.SetAvailable(Availability::NO);
+ } else if (a.available == Availability::MAYBE || b.available == Availability::MAYBE) {
+ a.SetAvailable(Availability::MAYBE);
+ }
+ return a;
+}
+
+InputStack operator|(InputStack a, InputStack b) {
+ // If only one is invalid, pick the other one. If both are invalid, pick an arbitrary one.
+ if (a.available == Availability::NO) return b;
+ if (b.available == Availability::NO) return a;
+ // If only one of the solutions has a signature, we must pick the other one.
+ if (!a.has_sig && b.has_sig) return a;
+ if (!b.has_sig && a.has_sig) return b;
+ if (!a.has_sig && !b.has_sig) {
+ // If neither solution requires a signature, the result is inevitably malleable.
+ a.malleable = true;
+ b.malleable = true;
+ } else {
+ // If both options require a signature, prefer the non-malleable one.
+ if (b.malleable && !a.malleable) return a;
+ if (a.malleable && !b.malleable) return b;
+ }
+ // Between two malleable or two non-malleable solutions, pick the smaller one between
+ // YESes, and the bigger ones between MAYBEs. Prefer YES over MAYBE.
+ if (a.available == Availability::YES && b.available == Availability::YES) {
+ return std::move(a.size <= b.size ? a : b);
+ } else if (a.available == Availability::MAYBE && b.available == Availability::MAYBE) {
+ return std::move(a.size >= b.size ? a : b);
+ } else if (a.available == Availability::YES) {
+ return a;
+ } else {
+ return b;
+ }
+}
+
std::optional<std::vector<Opcode>> DecomposeScript(const CScript& script)
{
std::vector<Opcode> out;
diff --git a/src/script/miniscript.h b/src/script/miniscript.h
index fa3b0350e9..c42b530c4d 100644
--- a/src/script/miniscript.h
+++ b/src/script/miniscript.h
@@ -223,6 +223,11 @@ enum class Fragment {
// WRAP_U(X) is represented as OR_I(X,0)
};
+enum class Availability {
+ NO,
+ YES,
+ MAYBE,
+};
namespace internal {
@@ -235,6 +240,62 @@ size_t ComputeScriptLen(Fragment fragment, Type sub0typ, size_t subsize, uint32_
//! A helper sanitizer/checker for the output of CalcType.
Type SanitizeType(Type x);
+//! An object representing a sequence of witness stack elements.
+struct InputStack {
+ /** Whether this stack is valid for its intended purpose (satisfaction or dissatisfaction of a Node).
+ * The MAYBE value is used for size estimation, when keys/preimages may actually be unavailable,
+ * but may be available at signing time. This makes the InputStack structure and signing logic,
+ * filled with dummy signatures/preimages usable for witness size estimation.
+ */
+ Availability available = Availability::YES;
+ //! Whether this stack contains a digital signature.
+ bool has_sig = false;
+ //! Whether this stack is malleable (can be turned into an equally valid other stack by a third party).
+ bool malleable = false;
+ //! Whether this stack is non-canonical (using a construction known to be unnecessary for satisfaction).
+ //! Note that this flag does not affect the satisfaction algorithm; it is only used for sanity checking.
+ bool non_canon = false;
+ //! Serialized witness size.
+ size_t size = 0;
+ //! Data elements.
+ std::vector<std::vector<unsigned char>> stack;
+ //! Construct an empty stack (valid).
+ InputStack() {}
+ //! Construct a valid single-element stack (with an element up to 75 bytes).
+ InputStack(std::vector<unsigned char> in) : size(in.size() + 1), stack(Vector(std::move(in))) {}
+ //! Change availability
+ InputStack& SetAvailable(Availability avail);
+ //! Mark this input stack as having a signature.
+ InputStack& SetWithSig();
+ //! Mark this input stack as non-canonical (known to not be necessary in non-malleable satisfactions).
+ InputStack& SetNonCanon();
+ //! Mark this input stack as malleable.
+ InputStack& SetMalleable(bool x = true);
+ //! Concatenate two input stacks.
+ friend InputStack operator+(InputStack a, InputStack b);
+ //! Choose between two potential input stacks.
+ friend InputStack operator|(InputStack a, InputStack b);
+};
+
+/** A stack consisting of a single zero-length element (interpreted as 0 by the script interpreter in numeric context). */
+static const auto ZERO = InputStack(std::vector<unsigned char>());
+/** A stack consisting of a single malleable 32-byte 0x0000...0000 element (for dissatisfying hash challenges). */
+static const auto ZERO32 = InputStack(std::vector<unsigned char>(32, 0)).SetMalleable();
+/** A stack consisting of a single 0x01 element (interpreted as 1 by the script interpreted in numeric context). */
+static const auto ONE = InputStack(Vector((unsigned char)1));
+/** The empty stack. */
+static const auto EMPTY = InputStack();
+/** A stack representing the lack of any (dis)satisfactions. */
+static const auto INVALID = InputStack().SetAvailable(Availability::NO);
+
+//! A pair of a satisfaction and a dissatisfaction InputStack.
+struct InputResult {
+ InputStack nsat, sat;
+
+ template<typename A, typename B>
+ InputResult(A&& in_nsat, B&& in_sat) : nsat(std::forward<A>(in_nsat)), sat(std::forward<B>(in_sat)) {}
+};
+
//! Class whose objects represent the maximum of a list of integers.
template<typename I>
struct MaxInt {
@@ -785,6 +846,226 @@ private:
assert(false);
}
+ template<typename Ctx>
+ internal::InputResult ProduceInput(const Ctx& ctx) const {
+ using namespace internal;
+
+ // Internal function which is invoked for every tree node, constructing satisfaction/dissatisfactions
+ // given those of its subnodes.
+ auto helper = [&ctx](const Node& node, Span<InputResult> subres) -> InputResult {
+ switch (node.fragment) {
+ case Fragment::PK_K: {
+ std::vector<unsigned char> sig;
+ Availability avail = ctx.Sign(node.keys[0], sig);
+ return {ZERO, InputStack(std::move(sig)).SetWithSig().SetAvailable(avail)};
+ }
+ case Fragment::PK_H: {
+ std::vector<unsigned char> key = ctx.ToPKBytes(node.keys[0]), sig;
+ Availability avail = ctx.Sign(node.keys[0], sig);
+ return {ZERO + InputStack(key), (InputStack(std::move(sig)).SetWithSig() + InputStack(key)).SetAvailable(avail)};
+ }
+ case Fragment::MULTI: {
+ // sats[j] represents the best stack containing j valid signatures (out of the first i keys).
+ // In the loop below, these stacks are built up using a dynamic programming approach.
+ // sats[0] starts off being {0}, due to the CHECKMULTISIG bug that pops off one element too many.
+ std::vector<InputStack> sats = Vector(ZERO);
+ for (size_t i = 0; i < node.keys.size(); ++i) {
+ std::vector<unsigned char> sig;
+ Availability avail = ctx.Sign(node.keys[i], sig);
+ // Compute signature stack for just the i'th key.
+ auto sat = InputStack(std::move(sig)).SetWithSig().SetAvailable(avail);
+ // Compute the next sats vector: next_sats[0] is a copy of sats[0] (no signatures). All further
+ // next_sats[j] are equal to either the existing sats[j], or sats[j-1] plus a signature for the
+ // current (i'th) key. The very last element needs all signatures filled.
+ std::vector<InputStack> next_sats;
+ next_sats.push_back(sats[0]);
+ for (size_t j = 1; j < sats.size(); ++j) next_sats.push_back(sats[j] | (std::move(sats[j - 1]) + sat));
+ next_sats.push_back(std::move(sats[sats.size() - 1]) + std::move(sat));
+ // Switch over.
+ sats = std::move(next_sats);
+ }
+ // The dissatisfaction consists of k+1 stack elements all equal to 0.
+ InputStack nsat = ZERO;
+ for (size_t i = 0; i < node.k; ++i) nsat = std::move(nsat) + ZERO;
+ assert(node.k <= sats.size());
+ return {std::move(nsat), std::move(sats[node.k])};
+ }
+ case Fragment::THRESH: {
+ // sats[k] represents the best stack that satisfies k out of the *last* i subexpressions.
+ // In the loop below, these stacks are built up using a dynamic programming approach.
+ // sats[0] starts off empty.
+ std::vector<InputStack> sats = Vector(EMPTY);
+ for (size_t i = 0; i < subres.size(); ++i) {
+ // Introduce an alias for the i'th last satisfaction/dissatisfaction.
+ auto& res = subres[subres.size() - i - 1];
+ // Compute the next sats vector: next_sats[0] is sats[0] plus res.nsat (thus containing all dissatisfactions
+ // so far. next_sats[j] is either sats[j] + res.nsat (reusing j earlier satisfactions) or sats[j-1] + res.sat
+ // (reusing j-1 earlier satisfactions plus a new one). The very last next_sats[j] is all satisfactions.
+ std::vector<InputStack> next_sats;
+ next_sats.push_back(sats[0] + res.nsat);
+ for (size_t j = 1; j < sats.size(); ++j) next_sats.push_back((sats[j] + res.nsat) | (std::move(sats[j - 1]) + res.sat));
+ next_sats.push_back(std::move(sats[sats.size() - 1]) + std::move(res.sat));
+ // Switch over.
+ sats = std::move(next_sats);
+ }
+ // At this point, sats[k].sat is the best satisfaction for the overall thresh() node. The best dissatisfaction
+ // is computed by gathering all sats[i].nsat for i != k.
+ InputStack nsat = INVALID;
+ for (size_t i = 0; i < sats.size(); ++i) {
+ // i==k is the satisfaction; i==0 is the canonical dissatisfaction;
+ // the rest are non-canonical (a no-signature dissatisfaction - the i=0
+ // form - is always available) and malleable (due to overcompleteness).
+ // Marking the solutions malleable here is not strictly necessary, as they
+ // should already never be picked in non-malleable solutions due to the
+ // availability of the i=0 form.
+ if (i != 0 && i != node.k) sats[i].SetMalleable().SetNonCanon();
+ // Include all dissatisfactions (even these non-canonical ones) in nsat.
+ if (i != node.k) nsat = std::move(nsat) | std::move(sats[i]);
+ }
+ assert(node.k <= sats.size());
+ return {std::move(nsat), std::move(sats[node.k])};
+ }
+ case Fragment::OLDER: {
+ return {INVALID, ctx.CheckOlder(node.k) ? EMPTY : INVALID};
+ }
+ case Fragment::AFTER: {
+ return {INVALID, ctx.CheckAfter(node.k) ? EMPTY : INVALID};
+ }
+ case Fragment::SHA256: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatSHA256(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::RIPEMD160: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatRIPEMD160(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::HASH256: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatHASH256(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::HASH160: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatHASH160(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::AND_V: {
+ auto& x = subres[0], &y = subres[1];
+ // As the dissatisfaction here only consist of a single option, it doesn't
+ // actually need to be listed (it's not required for reasoning about malleability of
+ // other options), and is never required (no valid miniscript relies on the ability
+ // to satisfy the type V left subexpression). It's still listed here for
+ // completeness, as a hypothetical (not currently implemented) satisfier that doesn't
+ // care about malleability might in some cases prefer it still.
+ return {(y.nsat + x.sat).SetNonCanon(), y.sat + x.sat};
+ }
+ case Fragment::AND_B: {
+ auto& x = subres[0], &y = subres[1];
+ // Note that it is not strictly necessary to mark the 2nd and 3rd dissatisfaction here
+ // as malleable. While they are definitely malleable, they are also non-canonical due
+ // to the guaranteed existence of a no-signature other dissatisfaction (the 1st)
+ // option. Because of that, the 2nd and 3rd option will never be chosen, even if they
+ // weren't marked as malleable.
+ return {(y.nsat + x.nsat) | (y.sat + x.nsat).SetMalleable().SetNonCanon() | (y.nsat + x.sat).SetMalleable().SetNonCanon(), y.sat + x.sat};
+ }
+ case Fragment::OR_B: {
+ auto& x = subres[0], &z = subres[1];
+ // The (sat(Z) sat(X)) solution is overcomplete (attacker can change either into dsat).
+ return {z.nsat + x.nsat, (z.nsat + x.sat) | (z.sat + x.nsat) | (z.sat + x.sat).SetMalleable().SetNonCanon()};
+ }
+ case Fragment::OR_C: {
+ auto& x = subres[0], &z = subres[1];
+ return {INVALID, std::move(x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::OR_D: {
+ auto& x = subres[0], &z = subres[1];
+ return {z.nsat + x.nsat, std::move(x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::OR_I: {
+ auto& x = subres[0], &z = subres[1];
+ return {(x.nsat + ONE) | (z.nsat + ZERO), (x.sat + ONE) | (z.sat + ZERO)};
+ }
+ case Fragment::ANDOR: {
+ auto& x = subres[0], &y = subres[1], &z = subres[2];
+ return {(y.nsat + x.sat).SetNonCanon() | (z.nsat + x.nsat), (y.sat + x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_N:
+ return std::move(subres[0]);
+ case Fragment::WRAP_D: {
+ auto &x = subres[0];
+ return {ZERO, x.sat + ONE};
+ }
+ case Fragment::WRAP_J: {
+ auto &x = subres[0];
+ // If a dissatisfaction with a nonzero top stack element exists, an alternative dissatisfaction exists.
+ // As the dissatisfaction logic currently doesn't keep track of this nonzeroness property, and thus even
+ // if a dissatisfaction with a top zero element is found, we don't know whether another one with a
+ // nonzero top stack element exists. Make the conservative assumption that whenever the subexpression is weakly
+ // dissatisfiable, this alternative dissatisfaction exists and leads to malleability.
+ return {InputStack(ZERO).SetMalleable(x.nsat.available != Availability::NO && !x.nsat.has_sig), std::move(x.sat)};
+ }
+ case Fragment::WRAP_V: {
+ auto &x = subres[0];
+ return {INVALID, std::move(x.sat)};
+ }
+ case Fragment::JUST_0: return {EMPTY, INVALID};
+ case Fragment::JUST_1: return {INVALID, EMPTY};
+ }
+ assert(false);
+ return {INVALID, INVALID};
+ };
+
+ auto tester = [&helper](const Node& node, Span<InputResult> subres) -> InputResult {
+ auto ret = helper(node, subres);
+
+ // Do a consistency check between the satisfaction code and the type checker
+ // (the actual satisfaction code in ProduceInputHelper does not use GetType)
+
+ // For 'z' nodes, available satisfactions/dissatisfactions must have stack size 0.
+ if (node.GetType() << "z"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() == 0);
+ if (node.GetType() << "z"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() == 0);
+
+ // For 'o' nodes, available satisfactions/dissatisfactions must have stack size 1.
+ if (node.GetType() << "o"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() == 1);
+ if (node.GetType() << "o"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() == 1);
+
+ // For 'n' nodes, available satisfactions/dissatisfactions must have stack size 1 or larger. For satisfactions,
+ // the top element cannot be 0.
+ if (node.GetType() << "n"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() >= 1);
+ if (node.GetType() << "n"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() >= 1);
+ if (node.GetType() << "n"_mst && ret.sat.available != Availability::NO) assert(!ret.sat.stack.back().empty());
+
+ // For 'd' nodes, a dissatisfaction must exist, and they must not need a signature. If it is non-malleable,
+ // it must be canonical.
+ if (node.GetType() << "d"_mst) assert(ret.nsat.available != Availability::NO);
+ if (node.GetType() << "d"_mst) assert(!ret.nsat.has_sig);
+ if (node.GetType() << "d"_mst && !ret.nsat.malleable) assert(!ret.nsat.non_canon);
+
+ // For 'f'/'s' nodes, dissatisfactions/satisfactions must have a signature.
+ if (node.GetType() << "f"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.has_sig);
+ if (node.GetType() << "s"_mst && ret.sat.available != Availability::NO) assert(ret.sat.has_sig);
+
+ // For non-malleable 'e' nodes, a non-malleable dissatisfaction must exist.
+ if (node.GetType() << "me"_mst) assert(ret.nsat.available != Availability::NO);
+ if (node.GetType() << "me"_mst) assert(!ret.nsat.malleable);
+
+ // For 'm' nodes, if a satisfaction exists, it must be non-malleable.
+ if (node.GetType() << "m"_mst && ret.sat.available != Availability::NO) assert(!ret.sat.malleable);
+
+ // If a non-malleable satisfaction exists, it must be canonical.
+ if (ret.sat.available != Availability::NO && !ret.sat.malleable) assert(!ret.sat.non_canon);
+
+ return ret;
+ };
+
+ return TreeEval<InputResult>(tester);
+ }
+
public:
/** Update duplicate key information in this Node.
*
@@ -877,6 +1158,47 @@ public:
});
}
+ //! Determine whether a Miniscript node is satisfiable. fn(node) will be invoked for all
+ //! key, time, and hashing nodes, and should return their satisfiability.
+ template<typename F>
+ bool IsSatisfiable(F fn) const
+ {
+ // TreeEval() doesn't support bool as NodeType, so use int instead.
+ return TreeEval<int>([&fn](const Node& node, Span<int> subs) -> bool {
+ switch (node.fragment) {
+ case Fragment::JUST_0:
+ return false;
+ case Fragment::JUST_1:
+ return true;
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ case Fragment::MULTI:
+ case Fragment::AFTER:
+ case Fragment::OLDER:
+ case Fragment::HASH256:
+ case Fragment::HASH160:
+ case Fragment::SHA256:
+ case Fragment::RIPEMD160:
+ return bool{fn(node)};
+ case Fragment::ANDOR:
+ return (subs[0] && subs[1]) || subs[2];
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ return subs[0] && subs[1];
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ return subs[0] || subs[1];
+ case Fragment::THRESH:
+ return std::count(subs.begin(), subs.end(), true) >= node.k;
+ default: // wrappers
+ assert(subs.size() == 1);
+ return subs[0];
+ }
+ });
+ }
+
//! Check whether this node is valid at all.
bool IsValid() const { return !(GetType() == ""_mst) && ScriptSize() <= MAX_STANDARD_P2WSH_SCRIPT_SIZE; }
@@ -904,6 +1226,18 @@ public:
//! Check whether this node is safe as a script on its own.
bool IsSane() const { return IsValidTopLevel() && IsSaneSubexpression() && NeedsSignature(); }
+ //! Produce a witness for this script, if possible and given the information available in the context.
+ //! The non-malleable satisfaction is guaranteed to be valid if it exists, and ValidSatisfaction()
+ //! is true. If IsSane() holds, this satisfaction is guaranteed to succeed in case the node's
+ //! conditions are satisfied (private keys and hash preimages available, locktimes satsified).
+ template<typename Ctx>
+ Availability Satisfy(const Ctx& ctx, std::vector<std::vector<unsigned char>>& stack, bool nonmalleable = true) const {
+ auto ret = ProduceInput(ctx);
+ if (nonmalleable && (ret.sat.malleable || !ret.sat.has_sig)) return Availability::NO;
+ stack = std::move(ret.sat.stack);
+ return ret.sat.available;
+ }
+
//! Equality testing.
bool operator==(const Node<Key>& arg) const { return Compare(*this, arg) == 0; }
@@ -1378,7 +1712,7 @@ inline NodeRef<Key> Parse(Span<const char> in, const Ctx& ctx)
assert(constructed.size() == 1);
assert(constructed[0]->ScriptSize() == script_size);
if (in.size() > 0) return {};
- const NodeRef<Key> tl_node = std::move(constructed.front());
+ NodeRef<Key> tl_node = std::move(constructed.front());
tl_node->DuplicateKeyCheck(ctx);
return tl_node;
}
@@ -1813,7 +2147,7 @@ inline NodeRef<Key> DecodeScript(I& in, I last, const Ctx& ctx)
}
}
if (constructed.size() != 1) return {};
- const NodeRef<Key> tl_node = std::move(constructed.front());
+ NodeRef<Key> tl_node = std::move(constructed.front());
tl_node->DuplicateKeyCheck(ctx);
// Note that due to how ComputeType works (only assign the type to the node if the
// subs' types are valid) this would fail if any node of tree is badly typed.
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 1a8558cd9f..85589fe86b 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -10,6 +10,7 @@
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <script/keyorigin.h>
+#include <script/miniscript.h>
#include <script/signingprovider.h>
#include <script/standard.h>
#include <uint256.h>
@@ -169,13 +170,14 @@ static bool CreateTaprootScriptSig(const BaseSignatureCreator& creator, Signatur
return false;
}
-static bool SignTaprootScript(const SigningProvider& provider, const BaseSignatureCreator& creator, SignatureData& sigdata, int leaf_version, const CScript& script, std::vector<valtype>& result)
+static bool SignTaprootScript(const SigningProvider& provider, const BaseSignatureCreator& creator, SignatureData& sigdata, int leaf_version, Span<const unsigned char> script_bytes, std::vector<valtype>& result)
{
// Only BIP342 tapscript signing is supported for now.
if (leaf_version != TAPROOT_LEAF_TAPSCRIPT) return false;
SigVersion sigversion = SigVersion::TAPSCRIPT;
- uint256 leaf_hash = (HashWriter{HASHER_TAPLEAF} << uint8_t(leaf_version) << script).GetSHA256();
+ uint256 leaf_hash = ComputeTapleafHash(leaf_version, script_bytes);
+ CScript script = CScript(script_bytes.begin(), script_bytes.end());
// <xonly pubkey> OP_CHECKSIG
if (script.size() == 34 && script[33] == OP_CHECKSIG && script[0] == 0x20) {
@@ -285,7 +287,6 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
std::vector<valtype>& ret, TxoutType& whichTypeRet, SigVersion sigversion, SignatureData& sigdata)
{
CScript scriptRet;
- uint160 h160;
ret.clear();
std::vector<unsigned char> sig;
@@ -314,8 +315,8 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
ret.push_back(ToByteVector(pubkey));
return true;
}
- case TxoutType::SCRIPTHASH:
- h160 = uint160(vSolutions[0]);
+ case TxoutType::SCRIPTHASH: {
+ uint160 h160{vSolutions[0]};
if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) {
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
return true;
@@ -323,7 +324,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
// Could not find redeemScript, add to missing
sigdata.missing_redeem_script = h160;
return false;
-
+ }
case TxoutType::MULTISIG: {
size_t required = vSolutions.front()[0];
ret.push_back(valtype()); // workaround CHECKMULTISIG bug
@@ -349,8 +350,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
return true;
case TxoutType::WITNESS_V0_SCRIPTHASH:
- CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(h160.begin());
- if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) {
+ if (GetCScript(provider, sigdata, CScriptID{RIPEMD160(vSolutions[0])}, scriptRet)) {
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
return true;
}
@@ -381,6 +381,92 @@ static CScript PushAll(const std::vector<valtype>& values)
return result;
}
+template<typename M, typename K, typename V>
+miniscript::Availability MsLookupHelper(const M& map, const K& key, V& value)
+{
+ auto it = map.find(key);
+ if (it != map.end()) {
+ value = it->second;
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+}
+
+/**
+ * Context for solving a Miniscript.
+ * If enough material (access to keys, hash preimages, ..) is given, produces a valid satisfaction.
+ */
+struct Satisfier {
+ typedef CPubKey Key;
+
+ const SigningProvider& m_provider;
+ SignatureData& m_sig_data;
+ const BaseSignatureCreator& m_creator;
+ const CScript& m_witness_script;
+
+ explicit Satisfier(const SigningProvider& provider LIFETIMEBOUND, SignatureData& sig_data LIFETIMEBOUND,
+ const BaseSignatureCreator& creator LIFETIMEBOUND,
+ const CScript& witscript LIFETIMEBOUND) : m_provider(provider),
+ m_sig_data(sig_data),
+ m_creator(creator),
+ m_witness_script(witscript) {}
+
+ static bool KeyCompare(const Key& a, const Key& b) {
+ return a < b;
+ }
+
+ //! Conversion from a raw public key.
+ template <typename I>
+ std::optional<Key> FromPKBytes(I first, I last) const
+ {
+ Key pubkey{first, last};
+ if (pubkey.IsValid()) return pubkey;
+ return {};
+ }
+
+ //! Conversion from a raw public key hash.
+ template<typename I>
+ std::optional<Key> FromPKHBytes(I first, I last) const {
+ assert(last - first == 20);
+ Key pubkey;
+ CKeyID key_id;
+ std::copy(first, last, key_id.begin());
+ if (GetPubKey(m_provider, m_sig_data, key_id, pubkey)) return pubkey;
+ m_sig_data.missing_pubkeys.push_back(key_id);
+ return {};
+ }
+
+ //! Conversion to raw public key.
+ std::vector<unsigned char> ToPKBytes(const CPubKey& key) const { return {key.begin(), key.end()}; }
+
+ //! Satisfy a signature check.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ if (CreateSig(m_creator, m_sig_data, m_provider, sig, key, m_witness_script, SigVersion::WITNESS_V0)) {
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+ }
+
+ //! Time lock satisfactions.
+ bool CheckAfter(uint32_t value) const { return m_creator.Checker().CheckLockTime(CScriptNum(value)); }
+ bool CheckOlder(uint32_t value) const { return m_creator.Checker().CheckSequence(CScriptNum(value)); }
+
+
+ //! Hash preimage satisfactions.
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.sha256_preimages, hash, preimage);
+ }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.ripemd160_preimages, hash, preimage);
+ }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.hash256_preimages, hash, preimage);
+ }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.hash160_preimages, hash, preimage);
+ }
+};
+
bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& fromPubKey, SignatureData& sigdata)
{
if (sigdata.complete) return true;
@@ -416,9 +502,21 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato
{
CScript witnessscript(result[0].begin(), result[0].end());
sigdata.witness_script = witnessscript;
- TxoutType subType;
+
+ TxoutType subType{TxoutType::NONSTANDARD};
solved = solved && SignStep(provider, creator, witnessscript, result, subType, SigVersion::WITNESS_V0, sigdata) && subType != TxoutType::SCRIPTHASH && subType != TxoutType::WITNESS_V0_SCRIPTHASH && subType != TxoutType::WITNESS_V0_KEYHASH;
+
+ // If we couldn't find a solution with the legacy satisfier, try satisfying the script using Miniscript.
+ // Note we need to check if the result stack is empty before, because it might be used even if the Script
+ // isn't fully solved. For instance the CHECKMULTISIG satisfaction in SignStep() pushes partial signatures
+ // and the extractor relies on this behaviour to combine witnesses.
+ if (!solved && result.empty()) {
+ Satisfier ms_satisfier{provider, sigdata, creator, witnessscript};
+ const auto ms = miniscript::FromScript(witnessscript, ms_satisfier);
+ solved = ms && ms->Satisfy(ms_satisfier, result) == miniscript::Availability::YES;
+ }
result.push_back(std::vector<unsigned char>(witnessscript.begin(), witnessscript.end()));
+
sigdata.scriptWitness.stack = result;
sigdata.witness = true;
result.clear();
@@ -564,26 +662,25 @@ void SignatureData::MergeSignatureData(SignatureData sigdata)
signatures.insert(std::make_move_iterator(sigdata.signatures.begin()), std::make_move_iterator(sigdata.signatures.end()));
}
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data)
{
assert(nIn < txTo.vin.size());
MutableTransactionSignatureCreator creator(txTo, nIn, amount, nHashType);
- SignatureData sigdata;
- bool ret = ProduceSignature(provider, creator, fromPubKey, sigdata);
- UpdateInput(txTo.vin.at(nIn), sigdata);
+ bool ret = ProduceSignature(provider, creator, fromPubKey, sig_data);
+ UpdateInput(txTo.vin.at(nIn), sig_data);
return ret;
}
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType)
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType, SignatureData& sig_data)
{
assert(nIn < txTo.vin.size());
const CTxIn& txin = txTo.vin[nIn];
assert(txin.prevout.n < txFrom.vout.size());
const CTxOut& txout = txFrom.vout[txin.prevout.n];
- return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType);
+ return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType, sig_data);
}
namespace {
@@ -592,8 +689,10 @@ class DummySignatureChecker final : public BaseSignatureChecker
{
public:
DummySignatureChecker() = default;
- bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; }
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return sig.size() != 0; }
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return sig.size() != 0; }
+ bool CheckLockTime(const CScriptNum& nLockTime) const override { return true; }
+ bool CheckSequence(const CScriptNum& nSequence) const override { return true; }
};
}
diff --git a/src/script/sign.h b/src/script/sign.h
index b32bb55dd3..fe2354cad7 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -13,6 +13,7 @@
#include <script/interpreter.h>
#include <script/keyorigin.h>
#include <script/standard.h>
+#include <uint256.h>
class CKey;
class CKeyID;
@@ -82,6 +83,10 @@ struct SignatureData {
std::vector<CKeyID> missing_sigs; ///< KeyIDs of pubkeys for signatures which could not be found
uint160 missing_redeem_script; ///< ScriptID of the missing redeemScript (if any)
uint256 missing_witness_script; ///< SHA256 of the missing witnessScript (if any)
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> sha256_preimages; ///< Mapping from a SHA256 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> hash256_preimages; ///< Mapping from a HASH256 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> ripemd160_preimages; ///< Mapping from a RIPEMD160 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> hash160_preimages; ///< Mapping from a HASH160 hash to its preimage provided to solve a Script
SignatureData() {}
explicit SignatureData(const CScript& script) : scriptSig(script) {}
@@ -91,9 +96,24 @@ struct SignatureData {
/** Produce a script signature using a generic signature creator. */
bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata);
-/** Produce a script signature for a transaction. */
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType);
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType);
+/**
+ * Produce a satisfying script (scriptSig or witness).
+ *
+ * @param provider Utility containing the information necessary to solve a script.
+ * @param fromPubKey The script to produce a satisfaction for.
+ * @param txTo The spending transaction.
+ * @param nIn The index of the input in `txTo` refering the output being spent.
+ * @param amount The value of the output being spent.
+ * @param nHashType Signature hash type.
+ * @param sig_data Additional data provided to solve a script. Filled with the resulting satisfying
+ * script and whether the satisfaction is complete.
+ *
+ * @return True if the produced script is entirely satisfying `fromPubKey`.
+ **/
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo,
+ unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data);
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo,
+ unsigned int nIn, int nHashType, SignatureData& sig_data);
/** Extract signature data from a transaction input, and insert it. */
SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nIn, const CTxOut& txout);
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 27b9a5c741..7c4a05b6e6 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -370,12 +370,7 @@ bool IsValidDestination(const CTxDestination& dest) {
leaf.merkle_branch.push_back(a.hash);
ret.leaves.emplace_back(std::move(leaf));
}
- /* Lexicographically sort a and b's hash, and compute parent hash. */
- if (a.hash < b.hash) {
- ret.hash = (HashWriter{HASHER_TAPBRANCH} << a.hash << b.hash).GetSHA256();
- } else {
- ret.hash = (HashWriter{HASHER_TAPBRANCH} << b.hash << a.hash).GetSHA256();
- }
+ ret.hash = ComputeTapbranchHash(a.hash, b.hash);
return ret;
}
@@ -443,14 +438,14 @@ void TaprootBuilder::Insert(TaprootBuilder::NodeInfo&& node, int depth)
return branch.size() == 0 || (branch.size() == 1 && branch[0]);
}
-TaprootBuilder& TaprootBuilder::Add(int depth, const CScript& script, int leaf_version, bool track)
+TaprootBuilder& TaprootBuilder::Add(int depth, Span<const unsigned char> script, int leaf_version, bool track)
{
assert((leaf_version & ~TAPROOT_LEAF_MASK) == 0);
if (!IsValid()) return *this;
/* Construct NodeInfo object with leaf hash and (if track is true) also leaf information. */
NodeInfo node;
- node.hash = (HashWriter{HASHER_TAPLEAF} << uint8_t(leaf_version) << script).GetSHA256();
- if (track) node.leaves.emplace_back(LeafInfo{script, leaf_version, {}});
+ node.hash = ComputeTapleafHash(leaf_version, script);
+ if (track) node.leaves.emplace_back(LeafInfo{std::vector<unsigned char>(script.begin(), script.end()), leaf_version, {}});
/* Insert into the branch. */
Insert(std::move(node), depth);
return *this;
@@ -506,13 +501,13 @@ TaprootSpendData TaprootBuilder::GetSpendData() const
return spd;
}
-std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output)
+std::optional<std::vector<std::tuple<int, std::vector<unsigned char>, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output)
{
// Verify that the output matches the assumed Merkle root and internal key.
auto tweak = spenddata.internal_key.CreateTapTweak(spenddata.merkle_root.IsNull() ? nullptr : &spenddata.merkle_root);
if (!tweak || tweak->first != output) return std::nullopt;
// If the Merkle root is 0, the tree is empty, and we're done.
- std::vector<std::tuple<int, CScript, int>> ret;
+ std::vector<std::tuple<int, std::vector<unsigned char>, int>> ret;
if (spenddata.merkle_root.IsNull()) return ret;
/** Data structure to represent the nodes of the tree we're going to build. */
@@ -523,7 +518,7 @@ std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const
std::unique_ptr<TreeNode> sub[2];
/** If this is known to be a leaf node, a pointer to the (script, leaf_ver) pair.
* nullptr otherwise. */
- const std::pair<CScript, int>* leaf = nullptr;
+ const std::pair<std::vector<unsigned char>, int>* leaf = nullptr;
/** Whether or not this node has been explored (is known to be a leaf, or known to have children). */
bool explored = false;
/** Whether or not this node is an inner node (unknown until explored = true). */
@@ -607,7 +602,7 @@ std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const
node.done = true;
stack.pop_back();
} else if (node.sub[0]->done && !node.sub[1]->done && !node.sub[1]->explored && !node.sub[1]->hash.IsNull() &&
- (HashWriter{HASHER_TAPBRANCH} << node.sub[1]->hash << node.sub[1]->hash).GetSHA256() == node.hash) {
+ ComputeTapbranchHash(node.sub[1]->hash, node.sub[1]->hash) == node.hash) {
// Whenever there are nodes with two identical subtrees under it, we run into a problem:
// the control blocks for the leaves underneath those will be identical as well, and thus
// they will all be matched to the same path in the tree. The result is that at the location
@@ -641,10 +636,10 @@ std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const
return ret;
}
-std::vector<std::tuple<uint8_t, uint8_t, CScript>> TaprootBuilder::GetTreeTuples() const
+std::vector<std::tuple<uint8_t, uint8_t, std::vector<unsigned char>>> TaprootBuilder::GetTreeTuples() const
{
assert(IsComplete());
- std::vector<std::tuple<uint8_t, uint8_t, CScript>> tuples;
+ std::vector<std::tuple<uint8_t, uint8_t, std::vector<unsigned char>>> tuples;
if (m_branch.size()) {
const auto& leaves = m_branch[0]->leaves;
for (const auto& leaf : leaves) {
diff --git a/src/script/standard.h b/src/script/standard.h
index f08258af4f..18cf5c8c88 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -217,7 +217,7 @@ struct TaprootSpendData
* inference can reconstruct the full tree. Within each set, the control
* blocks are sorted by size, so that the signing logic can easily
* prefer the cheapest one. */
- std::map<std::pair<CScript, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> scripts;
+ std::map<std::pair<std::vector<unsigned char>, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> scripts;
/** Merge other TaprootSpendData (for the same scriptPubKey) into this. */
void Merge(TaprootSpendData other);
};
@@ -229,7 +229,7 @@ private:
/** Information about a tracked leaf in the Merkle tree. */
struct LeafInfo
{
- CScript script; //!< The script.
+ std::vector<unsigned char> script; //!< The script.
int leaf_version; //!< The leaf version for that script.
std::vector<uint256> merkle_branch; //!< The hashing partners above this leaf.
};
@@ -296,7 +296,7 @@ public:
/** Add a new script at a certain depth in the tree. Add() operations must be called
* in depth-first traversal order of binary tree. If track is true, it will be included in
* the GetSpendData() output. */
- TaprootBuilder& Add(int depth, const CScript& script, int leaf_version, bool track = true);
+ TaprootBuilder& Add(int depth, Span<const unsigned char> script, int leaf_version, bool track = true);
/** Like Add(), but for a Merkle node with a given hash to the tree. */
TaprootBuilder& AddOmitted(int depth, const uint256& hash);
/** Finalize the construction. Can only be called when IsComplete() is true.
@@ -314,7 +314,7 @@ public:
/** Compute spending data (after Finalize()). */
TaprootSpendData GetSpendData() const;
/** Returns a vector of tuples representing the depth, leaf version, and script */
- std::vector<std::tuple<uint8_t, uint8_t, CScript>> GetTreeTuples() const;
+ std::vector<std::tuple<uint8_t, uint8_t, std::vector<unsigned char>>> GetTreeTuples() const;
/** Returns true if there are any tapscripts */
bool HasScripts() const { return !m_branch.empty(); }
};
@@ -325,6 +325,6 @@ public:
* std::nullopt is returned. Otherwise, a vector of (depth, script, leaf_ver) tuples is
* returned, corresponding to a depth-first traversal of the script tree.
*/
-std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output);
+std::optional<std::vector<std::tuple<int, std::vector<unsigned char>, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output);
#endif // BITCOIN_SCRIPT_STANDARD_H
diff --git a/src/serialize.h b/src/serialize.h
index f1edc54031..7bc7b10779 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1005,11 +1005,11 @@ struct CSerActionUnserialize
class CSizeComputer
{
protected:
- size_t nSize;
+ size_t nSize{0};
const int nVersion;
public:
- explicit CSizeComputer(int nVersionIn) : nSize(0), nVersion(nVersionIn) {}
+ explicit CSizeComputer(int nVersionIn) : nVersion(nVersionIn) {}
void write(Span<const std::byte> src)
{
diff --git a/src/span.h b/src/span.h
index 4d00bbc244..4692eca7fb 100644
--- a/src/span.h
+++ b/src/span.h
@@ -96,7 +96,7 @@ template<typename C>
class Span
{
C* m_data;
- std::size_t m_size;
+ std::size_t m_size{0};
template <class T>
struct is_Span_int : public std::false_type {};
@@ -107,7 +107,7 @@ class Span
public:
- constexpr Span() noexcept : m_data(nullptr), m_size(0) {}
+ constexpr Span() noexcept : m_data(nullptr) {}
/** Construct a span from a begin pointer and a size.
*
diff --git a/src/streams.h b/src/streams.h
index 4f2c3ffe76..8788343809 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -182,16 +182,13 @@ public:
* >> and << read and write unformatted data using the above serialization templates.
* Fills with data in linear time; some stringstream implementations take N^2 time.
*/
-class CDataStream
+class DataStream
{
protected:
using vector_type = SerializeData;
vector_type vch;
vector_type::size_type m_read_pos{0};
- int nType;
- int nVersion;
-
public:
typedef vector_type::allocator_type allocator_type;
typedef vector_type::size_type size_type;
@@ -203,23 +200,9 @@ public:
typedef vector_type::const_iterator const_iterator;
typedef vector_type::reverse_iterator reverse_iterator;
- explicit CDataStream(int nTypeIn, int nVersionIn)
- : nType{nTypeIn},
- nVersion{nVersionIn} {}
-
- explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
- explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
- : vch(sp.data(), sp.data() + sp.size()),
- nType{nTypeIn},
- nVersion{nVersionIn} {}
-
- template <typename... Args>
- CDataStream(int nTypeIn, int nVersionIn, Args&&... args)
- : nType{nTypeIn},
- nVersion{nVersionIn}
- {
- ::SerializeMany(*this, std::forward<Args>(args)...);
- }
+ explicit DataStream() {}
+ explicit DataStream(Span<const uint8_t> sp) : DataStream{AsBytes(sp)} {}
+ explicit DataStream(Span<const value_type> sp) : vch(sp.data(), sp.data() + sp.size()) {}
std::string str() const
{
@@ -271,11 +254,6 @@ public:
bool eof() const { return size() == 0; }
int in_avail() const { return size(); }
- void SetType(int n) { nType = n; }
- int GetType() const { return nType; }
- void SetVersion(int n) { nVersion = n; }
- int GetVersion() const { return nVersion; }
-
void read(Span<value_type> dst)
{
if (dst.size() == 0) return;
@@ -283,7 +261,7 @@ public:
// Read from the beginning of the buffer
auto next_read_pos{CheckedAdd(m_read_pos, dst.size())};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
- throw std::ios_base::failure("CDataStream::read(): end of data");
+ throw std::ios_base::failure("DataStream::read(): end of data");
}
memcpy(dst.data(), &vch[m_read_pos], dst.size());
if (next_read_pos.value() == vch.size()) {
@@ -299,7 +277,7 @@ public:
// Ignore from the beginning of the buffer
auto next_read_pos{CheckedAdd(m_read_pos, num_ignore)};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
- throw std::ios_base::failure("CDataStream::ignore(): end of data");
+ throw std::ios_base::failure("DataStream::ignore(): end of data");
}
if (next_read_pos.value() == vch.size()) {
m_read_pos = 0;
@@ -324,7 +302,7 @@ public:
}
template<typename T>
- CDataStream& operator<<(const T& obj)
+ DataStream& operator<<(const T& obj)
{
// Serialize to this stream
::Serialize(*this, obj);
@@ -332,7 +310,7 @@ public:
}
template<typename T>
- CDataStream& operator>>(T&& obj)
+ DataStream& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
@@ -363,6 +341,42 @@ public:
}
};
+class CDataStream : public DataStream
+{
+private:
+ int nType;
+ int nVersion;
+
+public:
+ explicit CDataStream(int nTypeIn, int nVersionIn)
+ : nType{nTypeIn},
+ nVersion{nVersionIn} {}
+
+ explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
+ explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
+ : DataStream{sp},
+ nType{nTypeIn},
+ nVersion{nVersionIn} {}
+
+ int GetType() const { return nType; }
+ void SetVersion(int n) { nVersion = n; }
+ int GetVersion() const { return nVersion; }
+
+ template <typename T>
+ CDataStream& operator<<(const T& obj)
+ {
+ ::Serialize(*this, obj);
+ return *this;
+ }
+
+ template <typename T>
+ CDataStream& operator>>(T&& obj)
+ {
+ ::Unserialize(*this, obj);
+ return *this;
+ }
+};
+
template <typename IStream>
class BitStreamReader
{
@@ -606,8 +620,8 @@ private:
const int nVersion;
FILE *src; //!< source file
- uint64_t nSrcPos; //!< how many bytes have been read from source
- uint64_t m_read_pos; //!< how many bytes have been read from this
+ uint64_t nSrcPos{0}; //!< how many bytes have been read from source
+ uint64_t m_read_pos{0}; //!< how many bytes have been read from this
uint64_t nReadLimit; //!< up to which position we're allowed to read
uint64_t nRewind; //!< how many bytes we guarantee to rewind
std::vector<std::byte> vchBuf; //!< the buffer
@@ -653,7 +667,7 @@ private:
public:
CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn)
- : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), m_read_pos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
+ : nType(nTypeIn), nVersion(nVersionIn), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
{
if (nRewindIn >= nBufSize)
throw std::ios_base::failure("Rewind limit must be less than buffer size");
diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h
index c6bd685189..a0918bf463 100644
--- a/src/support/allocators/secure.h
+++ b/src/support/allocators/secure.h
@@ -56,6 +56,7 @@ struct secure_allocator : public std::allocator<T> {
};
// This is exactly like std::string, but with a custom allocator.
+// TODO: Consider finding a way to make incoming RPC request.params[i] mlock()ed as well
typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
#endif // BITCOIN_SUPPORT_ALLOCATORS_SECURE_H
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index fb59324f7a..24ae4bdd1e 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -19,6 +19,9 @@
#endif
#include <algorithm>
+#include <limits>
+#include <stdexcept>
+#include <utility>
#ifdef ARENA_DEBUG
#include <iomanip>
#include <iostream>
@@ -277,8 +280,8 @@ size_t PosixLockedPageAllocator::GetLimit()
/*******************************************************************************/
// Implementation: LockedPool
-LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
- allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
+LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in)
+ : allocator(std::move(allocator_in)), lf_cb(lf_cb_in)
{
}
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index 03e4e371a3..1bba459377 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -5,11 +5,11 @@
#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
#define BITCOIN_SUPPORT_LOCKEDPOOL_H
-#include <stdint.h>
+#include <cstddef>
#include <list>
#include <map>
-#include <mutex>
#include <memory>
+#include <mutex>
#include <unordered_map>
/**
@@ -198,7 +198,7 @@ private:
std::list<LockedPageArena> arenas;
LockingFailed_Callback lf_cb;
- size_t cumulative_bytes_locked;
+ size_t cumulative_bytes_locked{0};
/** Mutex protects access to this pool's data structures, including arenas.
*/
mutable std::mutex mutex;
diff --git a/src/sync.h b/src/sync.h
index 8ce2e7b124..7242a793ab 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -11,7 +11,7 @@
#include <logging/timer.h>
#endif
-#include <threadsafety.h>
+#include <threadsafety.h> // IWYU pragma: export
#include <util/macros.h>
#include <condition_variable>
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index b15df43e8c..758691cfde 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -67,22 +67,22 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
CNetAddr source = ResolveIP("252.2.2.2");
// Test: Does Addrman respond correctly when empty.
- BOOST_CHECK_EQUAL(addrman->size(), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 0U);
auto addr_null = addrman->Select().first;
- BOOST_CHECK_EQUAL(addr_null.ToString(), "[::]:0");
+ BOOST_CHECK_EQUAL(addr_null.ToStringAddrPort(), "[::]:0");
// Test: Does Addrman::Add work as expected.
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
auto addr_ret1 = addrman->Select().first;
- BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
+ BOOST_CHECK_EQUAL(addr_ret1.ToStringAddrPort(), "250.1.1.1:8333");
// Test: Does IP address deduplication work correctly.
// Expected dup IP should not be added.
CService addr1_dup = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(!addrman->Add({CAddress(addr1_dup, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
// Test: New table has one addr and we add a diff addr we should
@@ -93,7 +93,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
CService addr2 = ResolveService("250.1.1.2", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source));
- BOOST_CHECK(addrman->size() >= 1);
+ BOOST_CHECK(addrman->Size() >= 1);
// Test: reset addrman and test AddrMan::Add multiple addresses works as expected
addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
@@ -101,7 +101,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
BOOST_CHECK(addrman->Add(vAddr, source));
- BOOST_CHECK(addrman->size() >= 1);
+ BOOST_CHECK(addrman->Size() >= 1);
}
BOOST_AUTO_TEST_CASE(addrman_ports)
@@ -110,26 +110,26 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
CNetAddr source = ResolveIP("252.2.2.2");
- BOOST_CHECK_EQUAL(addrman->size(), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 0U);
// Test 7; Addr with same IP but diff port does not replace existing addr.
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
CService addr1_port = ResolveService("250.1.1.1", 8334);
BOOST_CHECK(addrman->Add({CAddress(addr1_port, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 2U);
auto addr_ret2 = addrman->Select().first;
- BOOST_CHECK(addr_ret2.ToString() == "250.1.1.1:8333" || addr_ret2.ToString() == "250.1.1.1:8334");
+ BOOST_CHECK(addr_ret2.ToStringAddrPort() == "250.1.1.1:8333" || addr_ret2.ToStringAddrPort() == "250.1.1.1:8334");
// Test: Add same IP but diff port to tried table; this converts the entry with
// the specified port to tried, but not the other.
addrman->Good(CAddress(addr1_port, NODE_NONE));
- BOOST_CHECK_EQUAL(addrman->size(), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 2U);
bool newOnly = true;
auto addr_ret3 = addrman->Select(newOnly).first;
- BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
+ BOOST_CHECK_EQUAL(addr_ret3.ToStringAddrPort(), "250.1.1.1:8333");
}
@@ -142,22 +142,22 @@ BOOST_AUTO_TEST_CASE(addrman_select)
// Test: Select from new with 1 addr in new.
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
bool newOnly = true;
auto addr_ret1 = addrman->Select(newOnly).first;
- BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
+ BOOST_CHECK_EQUAL(addr_ret1.ToStringAddrPort(), "250.1.1.1:8333");
// Test: move addr to tried, select from new expected nothing returned.
BOOST_CHECK(addrman->Good(CAddress(addr1, NODE_NONE)));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
auto addr_ret2 = addrman->Select(newOnly).first;
- BOOST_CHECK_EQUAL(addr_ret2.ToString(), "[::]:0");
+ BOOST_CHECK_EQUAL(addr_ret2.ToStringAddrPort(), "[::]:0");
auto addr_ret3 = addrman->Select().first;
- BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
+ BOOST_CHECK_EQUAL(addr_ret3.ToStringAddrPort(), "250.1.1.1:8333");
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
// Add three addresses to new table.
@@ -182,7 +182,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_CHECK(addrman->Good(CAddress(addr7, NODE_NONE)));
// Test: 6 addrs + 1 addr from last test = 7.
- BOOST_CHECK_EQUAL(addrman->size(), 7U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 7U);
// Test: Select pulls from new and tried regardless of port number.
std::set<uint16_t> ports;
@@ -200,25 +200,25 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
uint32_t num_addrs{0};
- BOOST_CHECK_EQUAL(addrman->size(), num_addrs);
+ BOOST_CHECK_EQUAL(addrman->Size(), num_addrs);
while (num_addrs < 22) { // Magic number! 250.1.1.1 - 250.1.1.22 do not collide with deterministic key = 1
CService addr = ResolveService("250.1.1." + ToString(++num_addrs));
BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
// Test: No collision in new table yet.
- BOOST_CHECK_EQUAL(addrman->size(), num_addrs);
+ BOOST_CHECK_EQUAL(addrman->Size(), num_addrs);
}
// Test: new table collision!
CService addr1 = ResolveService("250.1.1." + ToString(++num_addrs));
uint32_t collisions{1};
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions);
+ BOOST_CHECK_EQUAL(addrman->Size(), num_addrs - collisions);
CService addr2 = ResolveService("250.1.1." + ToString(++num_addrs));
BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions);
+ BOOST_CHECK_EQUAL(addrman->Size(), num_addrs - collisions);
}
BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
@@ -236,7 +236,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
}
AddressPosition addr_pos = addrman->FindAddressEntry(addr).value();
BOOST_CHECK_EQUAL(addr_pos.multiplicity, 1U);
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
// if nTime increases, an addr can occur in up to 8 buckets
// The acceptance probability decreases exponentially with existing multiplicity -
@@ -250,7 +250,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
AddressPosition addr_pos_multi = addrman->FindAddressEntry(addr).value();
BOOST_CHECK_EQUAL(addr_pos_multi.multiplicity, 8U);
// multiplicity doesn't affect size
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
}
BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
@@ -261,7 +261,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
uint32_t num_addrs{0};
- BOOST_CHECK_EQUAL(addrman->size(), num_addrs);
+ BOOST_CHECK_EQUAL(addrman->Size(), num_addrs);
while (num_addrs < 35) { // Magic number! 250.1.1.1 - 250.1.1.35 do not collide in tried with deterministic key = 1
CService addr = ResolveService("250.1.1." + ToString(++num_addrs));
@@ -290,7 +290,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
// Test: Sanity check, GetAddr should never return anything if addrman
// is empty.
- BOOST_CHECK_EQUAL(addrman->size(), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 0U);
std::vector<CAddress> vAddr1 = addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt);
BOOST_CHECK_EQUAL(vAddr1.size(), 0U);
@@ -336,11 +336,11 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
}
std::vector<CAddress> vAddr = addrman->GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt);
- size_t percent23 = (addrman->size() * 23) / 100;
+ size_t percent23 = (addrman->Size() * 23) / 100;
BOOST_CHECK_EQUAL(vAddr.size(), percent23);
BOOST_CHECK_EQUAL(vAddr.size(), 461U);
- // (Addrman.size() < number of addresses added) due to address collisions.
- BOOST_CHECK_EQUAL(addrman->size(), 2006U);
+ // (addrman.Size() < number of addresses added) due to address collisions.
+ BOOST_CHECK_EQUAL(addrman->Size(), 2006U);
}
@@ -681,7 +681,7 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
addrman->Add({new1, tried1, new2, tried2}, CNetAddr{});
addrman->Good(tried1);
addrman->Good(tried2);
- BOOST_REQUIRE_EQUAL(addrman->size(), 4);
+ BOOST_REQUIRE_EQUAL(addrman->Size(), 4);
stream << *addrman;
@@ -704,17 +704,17 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
stream >> *addrman;
- BOOST_CHECK_EQUAL(addrman->size(), 2);
+ BOOST_CHECK_EQUAL(addrman->Size(), 2);
}
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
- BOOST_CHECK(addrman->size() == 0);
+ BOOST_CHECK(addrman->Size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
@@ -724,7 +724,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
// No collisions in tried.
BOOST_CHECK(addrman->Good(addr));
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
}
// Ensure Good handles duplicates well.
@@ -736,7 +736,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
BOOST_CHECK(!addrman->Good(addr));
// Verify duplicate address not marked as a collision.
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
}
}
@@ -758,13 +758,13 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
CService addr36 = ResolveService("250.1.1.36");
BOOST_CHECK(addrman->Add({CAddress(addr36, NODE_NONE)}, source));
BOOST_CHECK(!addrman->Good(addr36));
- BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.19:0");
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToStringAddrPort(), "250.1.1.19:0");
// 36 should be discarded and 19 not evicted.
// This means we keep 19 in the tried table and
// 36 stays in the new table.
addrman->ResolveCollisions();
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
// Lets create two collisions.
for (unsigned int i = 37; i < 59; i++) {
@@ -778,28 +778,28 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
BOOST_CHECK(addrman->Add({CAddress(addr59, NODE_NONE)}, source));
BOOST_CHECK(!addrman->Good(addr59));
- BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.10:0");
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToStringAddrPort(), "250.1.1.10:0");
// Cause a second collision in the new table.
BOOST_CHECK(!addrman->Add({CAddress(addr36, NODE_NONE)}, source));
// 36 still cannot be moved from new to tried due to colliding with 19
BOOST_CHECK(!addrman->Good(addr36));
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() != "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() != "[::]:0");
// Resolve all collisions.
addrman->ResolveCollisions();
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
}
BOOST_AUTO_TEST_CASE(addrman_evictionworks)
{
auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
- BOOST_CHECK(addrman->size() == 0);
+ BOOST_CHECK(addrman->Size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
// Add 35 addresses
CNetAddr source = ResolveIP("252.2.2.2");
@@ -817,7 +817,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
BOOST_CHECK(!addrman->Good(addr));
auto info = addrman->SelectTriedCollision().first;
- BOOST_CHECK_EQUAL(info.ToString(), "250.1.1.19:0");
+ BOOST_CHECK_EQUAL(info.ToStringAddrPort(), "250.1.1.19:0");
// Ensure test of address fails, so that it is evicted.
// Update entry in tried by setting last good connection in the deep past.
@@ -826,7 +826,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// Should swap 36 for 19.
addrman->ResolveCollisions();
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
AddressPosition addr_pos{addrman->FindAddressEntry(CAddress(addr, NODE_NONE)).value()};
BOOST_CHECK(addr_pos.tried);
@@ -835,18 +835,18 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// We check this by verifying Good() returns false and also verifying that
// we have no collisions.
BOOST_CHECK(!addrman->Good(addr));
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
// 19 should fail as a collision (not a duplicate) if we now attempt to move
// it to the tried table.
CService addr19 = ResolveService("250.1.1.19");
BOOST_CHECK(!addrman->Good(addr19));
- BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.36:0");
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToStringAddrPort(), "250.1.1.36:0");
// Eviction is also successful if too much time has passed since last try
SetMockTime(GetTime() + 4 * 60 *60);
addrman->ResolveCollisions();
- BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToStringAddrPort() == "[::]:0");
//Now 19 is in tried again, and 36 back to new
AddressPosition addr_pos19{addrman->FindAddressEntry(CAddress(addr19, NODE_NONE)).value()};
BOOST_CHECK(addr_pos19.tried);
@@ -878,14 +878,14 @@ BOOST_AUTO_TEST_CASE(load_addrman)
BOOST_CHECK(Lookup("252.5.1.1", source, 8333, false));
std::vector<CAddress> addresses{CAddress(addr1, NODE_NONE), CAddress(addr2, NODE_NONE), CAddress(addr3, NODE_NONE)};
BOOST_CHECK(addrman.Add(addresses, source));
- BOOST_CHECK(addrman.size() == 3);
+ BOOST_CHECK(addrman.Size() == 3);
// Test that the de-serialization does not throw an exception.
CDataStream ssPeers1 = AddrmanToStream(addrman);
bool exceptionThrown = false;
AddrMan addrman1{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)};
- BOOST_CHECK(addrman1.size() == 0);
+ BOOST_CHECK(addrman1.Size() == 0);
try {
unsigned char pchMsgTmp[4];
ssPeers1 >> pchMsgTmp;
@@ -894,16 +894,16 @@ BOOST_AUTO_TEST_CASE(load_addrman)
exceptionThrown = true;
}
- BOOST_CHECK(addrman1.size() == 3);
+ BOOST_CHECK(addrman1.Size() == 3);
BOOST_CHECK(exceptionThrown == false);
// Test that ReadFromStream creates an addrman with the correct number of addrs.
CDataStream ssPeers2 = AddrmanToStream(addrman);
AddrMan addrman2{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)};
- BOOST_CHECK(addrman2.size() == 0);
+ BOOST_CHECK(addrman2.Size() == 0);
ReadFromStream(addrman2, ssPeers2);
- BOOST_CHECK(addrman2.size() == 3);
+ BOOST_CHECK(addrman2.Size() == 3);
}
// Produce a corrupt peers.dat that claims 20 addrs when it only has one addr.
@@ -939,7 +939,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
CDataStream ssPeers1 = MakeCorruptPeersDat();
bool exceptionThrown = false;
AddrMan addrman1{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)};
- BOOST_CHECK(addrman1.size() == 0);
+ BOOST_CHECK(addrman1.Size() == 0);
try {
unsigned char pchMsgTmp[4];
ssPeers1 >> pchMsgTmp;
@@ -947,15 +947,13 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
} catch (const std::exception&) {
exceptionThrown = true;
}
- // Even though de-serialization failed addrman is not left in a clean state.
- BOOST_CHECK(addrman1.size() == 1);
BOOST_CHECK(exceptionThrown);
// Test that ReadFromStream fails if peers.dat is corrupt
CDataStream ssPeers2 = MakeCorruptPeersDat();
AddrMan addrman2{EMPTY_NETGROUPMAN, !DETERMINISTIC, GetCheckRatio(m_node)};
- BOOST_CHECK(addrman2.size() == 0);
+ BOOST_CHECK(addrman2.Size() == 0);
BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure);
}
@@ -969,7 +967,7 @@ BOOST_AUTO_TEST_CASE(addrman_update_address)
const auto start_time{Now<NodeSeconds>() - 10000s};
addr.nTime = start_time;
BOOST_CHECK(addrman->Add({addr}, source));
- BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(), 1U);
// Updating an addrman entry with a different port doesn't change it
CAddress addr_diff_port{CAddress(ResolveService("250.1.1.1", 8334), NODE_NONE)};
@@ -990,4 +988,42 @@ BOOST_AUTO_TEST_CASE(addrman_update_address)
BOOST_CHECK_EQUAL(vAddr2.at(0).nServices, NODE_NETWORK_LIMITED);
}
+BOOST_AUTO_TEST_CASE(addrman_size)
+{
+ auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
+ const CNetAddr source = ResolveIP("252.2.2.2");
+
+ // empty addrman
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 0U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/false), 0U);
+
+ // add two ipv4 addresses, one to tried and new
+ const CAddress addr1{ResolveService("250.1.1.1", 8333), NODE_NONE};
+ BOOST_CHECK(addrman->Add({addr1}, source));
+ BOOST_CHECK(addrman->Good(addr1));
+ const CAddress addr2{ResolveService("250.1.1.2", 8333), NODE_NONE};
+ BOOST_CHECK(addrman->Add({addr2}, source));
+
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/false), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/true), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/false), 1U);
+
+ // add one i2p address to new
+ CService i2p_addr;
+ i2p_addr.SetSpecial("UDHDrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.I2P");
+ const CAddress addr3{i2p_addr, NODE_NONE};
+ BOOST_CHECK(addrman->Add({addr3}, source));
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/std::nullopt), 3U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_IPV4, /*in_new=*/std::nullopt), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_I2P, /*in_new=*/std::nullopt), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/NET_I2P, /*in_new=*/true), 1U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/true), 2U);
+ BOOST_CHECK_EQUAL(addrman->Size(/*net=*/std::nullopt, /*in_new=*/false), 1U);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index 0101bcc372..7f3ca6bf93 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -5,6 +5,8 @@
#include <test/data/base58_encode_decode.json.h>
#include <base58.h>
+#include <test/util/json.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
#include <util/vector.h>
@@ -16,8 +18,6 @@
using namespace std::literals;
-UniValue read_json(const std::string& jsondata);
-
BOOST_FIXTURE_TEST_SUITE(base58_tests, BasicTestingSetup)
// Goal: test low-level base58 encoding functionality
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index e1dafc6bac..4348a20886 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -7,6 +7,7 @@
#include <consensus/merkle.h>
#include <pow.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/txmempool.h>
#include <test/util/setup_common.h>
@@ -310,7 +311,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestSerializationTest) {
req1.indexes[2] = 3;
req1.indexes[3] = 4;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req1;
BlockTransactionsRequest req2;
@@ -330,7 +331,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestDeserializationMaxTest) {
req0.blockhash = InsecureRand256();
req0.indexes.resize(1);
req0.indexes[0] = 0xffff;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req0;
BlockTransactionsRequest req1;
@@ -350,7 +351,7 @@ BOOST_AUTO_TEST_CASE(TransactionsRequestDeserializationOverflowTest) {
req0.indexes[0] = 0x7000;
req0.indexes[1] = 0x10000 - 0x7000 - 2;
req0.indexes[2] = 0;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << req0.blockhash;
WriteCompactSize(stream, req0.indexes.size());
WriteCompactSize(stream, req0.indexes[0]);
diff --git a/src/test/blockfilter_tests.cpp b/src/test/blockfilter_tests.cpp
index 43dca57217..9388b4c96a 100644
--- a/src/test/blockfilter_tests.cpp
+++ b/src/test/blockfilter_tests.cpp
@@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(blockfilter_basic_test)
// Test serialization/unserialization.
BlockFilter block_filter2;
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << block_filter;
stream >> block_filter2;
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 3d6e103c9f..5d4c5eea0e 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -12,6 +12,7 @@
#include <random.h>
#include <serialize.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
#include <util/strencodings.h>
@@ -39,7 +40,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001");
@@ -66,7 +67,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5"));
BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!");
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001");
@@ -87,7 +88,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
uint160 hash = pubkey.GetID();
filter.insert(hash);
- CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream stream{};
stream << filter;
std::vector<unsigned char> expected = ParseHex("038fc16b080000000000000001");
@@ -340,7 +341,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize)
for (unsigned int i = 0; i < vMatched.size(); i++)
BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second);
- CDataStream merkleStream(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream merkleStream{};
merkleStream << merkleBlock;
std::vector<uint8_t> expected = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101");
diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp
index 53fbc26e15..135f107159 100644
--- a/src/test/checkqueue_tests.cpp
+++ b/src/test/checkqueue_tests.cpp
@@ -4,6 +4,7 @@
#include <checkqueue.h>
#include <sync.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/system.h>
#include <util/time.h>
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index b5f961a239..e082800fc3 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -6,6 +6,7 @@
#include <coins.h>
#include <script/standard.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <txdb.h>
#include <uint256.h>
@@ -53,9 +54,9 @@ public:
uint256 GetBestBlock() const override { return hashBestBlock_; }
- bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock) override
+ bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock, bool erase = true) override
{
- for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) {
+ for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); it = erase ? mapCoins.erase(it) : std::next(it)) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
// Same optimization used in CCoinsViewDB is to only write dirty entries.
map_[it->first] = it->second.coin;
@@ -64,7 +65,6 @@ public:
map_.erase(it->first);
}
}
- mapCoins.erase(it++);
}
if (!hashBlock.IsNull())
hashBestBlock_ = hashBlock;
@@ -126,13 +126,14 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
bool found_an_entry = false;
bool missed_an_entry = false;
bool uncached_an_entry = false;
+ bool flushed_without_erase = false;
// A simple map to track what we expect the cache stack to represent.
std::map<COutPoint, Coin> result;
// The cache stack.
- std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
- stack.push_back(new CCoinsViewCacheTest(base)); // Start with one cache.
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(base)); // Start with one cache.
// Use a limited set of random transaction ids, so we do test overwriting entries.
std::vector<uint256> txids;
@@ -154,9 +155,16 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
bool test_havecoin_after = InsecureRandBits(2) == 0;
bool result_havecoin = test_havecoin_before ? stack.back()->HaveCoin(COutPoint(txid, 0)) : false;
- const Coin& entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
+
+ // Infrequently, test usage of AccessByTxid instead of AccessCoin - the
+ // former just delegates to the latter and returns the first unspent in a txn.
+ const Coin& entry = (InsecureRandRange(500) == 0) ?
+ AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
BOOST_CHECK(coin == entry);
- BOOST_CHECK(!test_havecoin_before || result_havecoin == !entry.IsSpent());
+
+ if (test_havecoin_before) {
+ BOOST_CHECK(result_havecoin == !entry.IsSpent());
+ }
if (test_havecoin_after) {
bool ret = stack.back()->HaveCoin(COutPoint(txid, 0));
@@ -165,26 +173,31 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
if (InsecureRandRange(5) == 0 || coin.IsSpent()) {
Coin newcoin;
- newcoin.out.nValue = InsecureRand32();
+ newcoin.out.nValue = InsecureRandMoneyAmount();
newcoin.nHeight = 1;
+
+ // Infrequently test adding unspendable coins.
if (InsecureRandRange(16) == 0 && coin.IsSpent()) {
newcoin.out.scriptPubKey.assign(1 + InsecureRandBits(6), OP_RETURN);
BOOST_CHECK(newcoin.out.scriptPubKey.IsUnspendable());
added_an_unspendable_entry = true;
} else {
- newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0); // Random sizes so we can test memory usage accounting
+ // Random sizes so we can test memory usage accounting
+ newcoin.out.scriptPubKey.assign(InsecureRandBits(6), 0);
(coin.IsSpent() ? added_an_entry : updated_an_entry) = true;
coin = newcoin;
}
- stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), !coin.IsSpent() || InsecureRand32() & 1);
+ bool is_overwrite = !coin.IsSpent() || InsecureRand32() & 1;
+ stack.back()->AddCoin(COutPoint(txid, 0), std::move(newcoin), is_overwrite);
} else {
+ // Spend the coin.
removed_an_entry = true;
coin.Clear();
BOOST_CHECK(stack.back()->SpendCoin(COutPoint(txid, 0)));
}
}
- // One every 10 iterations, remove a random entry from the cache
+ // Once every 10 iterations, remove a random entry from the cache
if (InsecureRandRange(10) == 0) {
COutPoint out(txids[InsecureRand32() % txids.size()], 0);
int cacheid = InsecureRand32() % stack.size();
@@ -206,7 +219,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
found_an_entry = true;
}
}
- for (const CCoinsViewCacheTest *test : stack) {
+ for (const auto& test : stack) {
test->SelfTest();
}
}
@@ -216,7 +229,9 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
if (stack.size() > 1 && InsecureRandBool() == 0) {
unsigned int flushIndex = InsecureRandRange(stack.size() - 1);
if (fake_best_block) stack[flushIndex]->SetBestBlock(InsecureRand256());
- BOOST_CHECK(stack[flushIndex]->Flush());
+ bool should_erase = InsecureRandRange(4) < 3;
+ BOOST_CHECK(should_erase ? stack[flushIndex]->Flush() : stack[flushIndex]->Sync());
+ flushed_without_erase |= !should_erase;
}
}
if (InsecureRandRange(100) == 0) {
@@ -224,19 +239,20 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
if (stack.size() > 0 && InsecureRandBool() == 0) {
//Remove the top cache
if (fake_best_block) stack.back()->SetBestBlock(InsecureRand256());
- BOOST_CHECK(stack.back()->Flush());
- delete stack.back();
+ bool should_erase = InsecureRandRange(4) < 3;
+ BOOST_CHECK(should_erase ? stack.back()->Flush() : stack.back()->Sync());
+ flushed_without_erase |= !should_erase;
stack.pop_back();
}
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
//Add a new cache
CCoinsView* tip = base;
if (stack.size() > 0) {
- tip = stack.back();
+ tip = stack.back().get();
} else {
removed_all_caches = true;
}
- stack.push_back(new CCoinsViewCacheTest(tip));
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
if (stack.size() == 4) {
reached_4_caches = true;
}
@@ -244,12 +260,6 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
}
}
- // Clean up the stack.
- while (stack.size() > 0) {
- delete stack.back();
- stack.pop_back();
- }
-
// Verify coverage.
BOOST_CHECK(removed_all_caches);
BOOST_CHECK(reached_4_caches);
@@ -260,6 +270,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
BOOST_CHECK(found_an_entry);
BOOST_CHECK(missed_an_entry);
BOOST_CHECK(uncached_an_entry);
+ BOOST_CHECK(flushed_without_erase);
}
// Run the above simulation for multiple base types.
@@ -268,7 +279,7 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
CCoinsViewTest base;
SimulationTest(&base, false);
- CCoinsViewDB db_base{"test", /*nCacheSize=*/1 << 23, /*fMemory=*/true, /*fWipe=*/false};
+ CCoinsViewDB db_base{{.path = "test", .cache_bytes = 1 << 23, .memory_only = true}, {}};
SimulationTest(&db_base, true);
}
@@ -304,8 +315,8 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// The cache stack.
CCoinsViewTest base; // A CCoinsViewTest at the bottom.
- std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
- stack.push_back(new CCoinsViewCacheTest(&base)); // Start with one cache.
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(&base)); // Start with one cache.
// Track the txids we've used in various sets
std::set<COutPoint> coinbase_coins;
@@ -470,25 +481,18 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// Every 100 iterations, change the cache stack.
if (stack.size() > 0 && InsecureRandBool() == 0) {
BOOST_CHECK(stack.back()->Flush());
- delete stack.back();
stack.pop_back();
}
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
CCoinsView* tip = &base;
if (stack.size() > 0) {
- tip = stack.back();
+ tip = stack.back().get();
}
- stack.push_back(new CCoinsViewCacheTest(tip));
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
}
}
}
- // Clean up the stack.
- while (stack.size() > 0) {
- delete stack.back();
- stack.pop_back();
- }
-
// Verify coverage.
BOOST_CHECK(spent_a_duplicate_coinbase);
@@ -498,7 +502,7 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
BOOST_AUTO_TEST_CASE(ccoins_serialization)
{
// Good example
- CDataStream ss1(ParseHex("97f23c835800816115944e077fe7c803cfa57f29b36bf87c1d35"), SER_DISK, CLIENT_VERSION);
+ DataStream ss1{ParseHex("97f23c835800816115944e077fe7c803cfa57f29b36bf87c1d35")};
Coin cc1;
ss1 >> cc1;
BOOST_CHECK_EQUAL(cc1.fCoinBase, false);
@@ -507,7 +511,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(HexStr(cc1.out.scriptPubKey), HexStr(GetScriptForDestination(PKHash(uint160(ParseHex("816115944e077fe7c803cfa57f29b36bf87c1d35"))))));
// Good example
- CDataStream ss2(ParseHex("8ddf77bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4"), SER_DISK, CLIENT_VERSION);
+ DataStream ss2{ParseHex("8ddf77bbd123008c988f1a4a4de2161e0f50aac7f17e7f9555caa4")};
Coin cc2;
ss2 >> cc2;
BOOST_CHECK_EQUAL(cc2.fCoinBase, true);
@@ -516,7 +520,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(HexStr(cc2.out.scriptPubKey), HexStr(GetScriptForDestination(PKHash(uint160(ParseHex("8c988f1a4a4de2161e0f50aac7f17e7f9555caa4"))))));
// Smallest possible example
- CDataStream ss3(ParseHex("000006"), SER_DISK, CLIENT_VERSION);
+ DataStream ss3{ParseHex("000006")};
Coin cc3;
ss3 >> cc3;
BOOST_CHECK_EQUAL(cc3.fCoinBase, false);
@@ -525,7 +529,7 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
BOOST_CHECK_EQUAL(cc3.out.scriptPubKey.size(), 0U);
// scriptPubKey that ends beyond the end of the stream
- CDataStream ss4(ParseHex("000007"), SER_DISK, CLIENT_VERSION);
+ DataStream ss4{ParseHex("000007")};
try {
Coin cc4;
ss4 >> cc4;
@@ -534,11 +538,11 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
}
// Very large scriptPubKey (3*10^9 bytes) past the end of the stream
- CDataStream tmp(SER_DISK, CLIENT_VERSION);
+ DataStream tmp{};
uint64_t x = 3000000000ULL;
tmp << VARINT(x);
BOOST_CHECK_EQUAL(HexStr(tmp), "8a95c0bb00");
- CDataStream ss5(ParseHex("00008a95c0bb00"), SER_DISK, CLIENT_VERSION);
+ DataStream ss5{ParseHex("00008a95c0bb00")};
try {
Coin cc5;
ss5 >> cc5;
@@ -589,9 +593,9 @@ static size_t InsertCoinsMapEntry(CCoinsMap& map, CAmount value, char flags)
return inserted.first->second.coin.DynamicMemoryUsage();
}
-void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags)
+void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags, const COutPoint& outp = OUTPOINT)
{
- auto it = map.find(OUTPOINT);
+ auto it = map.find(outp);
if (it == map.end()) {
value = ABSENT;
flags = NO_ENTRY;
@@ -877,4 +881,199 @@ BOOST_AUTO_TEST_CASE(ccoins_write)
CheckWriteCoins(parent_value, child_value, parent_value, parent_flags, child_flags, parent_flags);
}
+
+Coin MakeCoin()
+{
+ Coin coin;
+ coin.out.nValue = InsecureRand32();
+ coin.nHeight = InsecureRandRange(4096);
+ coin.fCoinBase = 0;
+ return coin;
+}
+
+
+//! For CCoinsViewCache instances backed by either another cache instance or
+//! leveldb, test cache behavior and flag state (DIRTY/FRESH) by
+//!
+//! 1. Adding a random coin to the child-most cache,
+//! 2. Flushing all caches (without erasing),
+//! 3. Ensure the entry still exists in the cache and has been written to parent,
+//! 4. (if `do_erasing_flush`) Flushing the caches again (with erasing),
+//! 5. (if `do_erasing_flush`) Ensure the entry has been written to the parent and is no longer in the cache,
+//! 6. Spend the coin, ensure it no longer exists in the parent.
+//!
+void TestFlushBehavior(
+ CCoinsViewCacheTest* view,
+ CCoinsViewDB& base,
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>>& all_caches,
+ bool do_erasing_flush)
+{
+ CAmount value;
+ char flags;
+ size_t cache_usage;
+
+ auto flush_all = [&all_caches](bool erase) {
+ // Flush in reverse order to ensure that flushes happen from children up.
+ for (auto i = all_caches.rbegin(); i != all_caches.rend(); ++i) {
+ auto& cache = *i;
+ // hashBlock must be filled before flushing to disk; value is
+ // unimportant here. This is normally done during connect/disconnect block.
+ cache->SetBestBlock(InsecureRand256());
+ erase ? cache->Flush() : cache->Sync();
+ }
+ };
+
+ uint256 txid = InsecureRand256();
+ COutPoint outp = COutPoint(txid, 0);
+ Coin coin = MakeCoin();
+ // Ensure the coins views haven't seen this coin before.
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!view->HaveCoin(outp));
+
+ // --- 1. Adding a random coin to the child cache
+ //
+ view->AddCoin(outp, Coin(coin), false);
+
+ cache_usage = view->DynamicMemoryUsage();
+ // `base` shouldn't have coin (no flush yet) but `view` should have cached it.
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(view->HaveCoin(outp));
+
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, DIRTY|FRESH);
+
+ // --- 2. Flushing all caches (without erasing)
+ //
+ flush_all(/*erase=*/ false);
+
+ // CoinsMap usage should be unchanged since we didn't erase anything.
+ BOOST_CHECK_EQUAL(cache_usage, view->DynamicMemoryUsage());
+
+ // --- 3. Ensuring the entry still exists in the cache and has been written to parent
+ //
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, 0); // Flags should have been wiped.
+
+ // Both views should now have the coin.
+ BOOST_CHECK(base.HaveCoin(outp));
+ BOOST_CHECK(view->HaveCoin(outp));
+
+ if (do_erasing_flush) {
+ // --- 4. Flushing the caches again (with erasing)
+ //
+ flush_all(/*erase=*/ true);
+
+ // Memory usage should have gone down.
+ BOOST_CHECK(view->DynamicMemoryUsage() < cache_usage);
+
+ // --- 5. Ensuring the entry is no longer in the cache
+ //
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, ABSENT);
+ BOOST_CHECK_EQUAL(flags, NO_ENTRY);
+
+ view->AccessCoin(outp);
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin.out.nValue);
+ BOOST_CHECK_EQUAL(flags, 0);
+ }
+
+ // Can't overwrite an entry without specifying that an overwrite is
+ // expected.
+ BOOST_CHECK_THROW(
+ view->AddCoin(outp, Coin(coin), /*possible_overwrite=*/ false),
+ std::logic_error);
+
+ // --- 6. Spend the coin.
+ //
+ BOOST_CHECK(view->SpendCoin(outp));
+
+ // The coin should be in the cache, but spent and marked dirty.
+ GetCoinsMapEntry(view->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, SPENT);
+ BOOST_CHECK_EQUAL(flags, DIRTY);
+ BOOST_CHECK(!view->HaveCoin(outp)); // Coin should be considered spent in `view`.
+ BOOST_CHECK(base.HaveCoin(outp)); // But coin should still be unspent in `base`.
+
+ flush_all(/*erase=*/ false);
+
+ // Coin should be considered spent in both views.
+ BOOST_CHECK(!view->HaveCoin(outp));
+ BOOST_CHECK(!base.HaveCoin(outp));
+
+ // Spent coin should not be spendable.
+ BOOST_CHECK(!view->SpendCoin(outp));
+
+ // --- Bonus check: ensure that a coin added to the base view via one cache
+ // can be spent by another cache which has never seen it.
+ //
+ txid = InsecureRand256();
+ outp = COutPoint(txid, 0);
+ coin = MakeCoin();
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ all_caches[0]->AddCoin(outp, std::move(coin), false);
+ all_caches[0]->Sync();
+ BOOST_CHECK(base.HaveCoin(outp));
+ BOOST_CHECK(all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoinInCache(outp));
+
+ BOOST_CHECK(all_caches[1]->SpendCoin(outp));
+ flush_all(/*erase=*/ false);
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ flush_all(/*erase=*/ true); // Erase all cache content.
+
+ // --- Bonus check 2: ensure that a FRESH, spent coin is deleted by Sync()
+ //
+ txid = InsecureRand256();
+ outp = COutPoint(txid, 0);
+ coin = MakeCoin();
+ CAmount coin_val = coin.out.nValue;
+ BOOST_CHECK(!base.HaveCoin(outp));
+ BOOST_CHECK(!all_caches[0]->HaveCoin(outp));
+ BOOST_CHECK(!all_caches[1]->HaveCoin(outp));
+
+ // Add and spend from same cache without flushing.
+ all_caches[0]->AddCoin(outp, std::move(coin), false);
+
+ // Coin should be FRESH in the cache.
+ GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, coin_val);
+ BOOST_CHECK_EQUAL(flags, DIRTY|FRESH);
+
+ // Base shouldn't have seen coin.
+ BOOST_CHECK(!base.HaveCoin(outp));
+
+ BOOST_CHECK(all_caches[0]->SpendCoin(outp));
+ all_caches[0]->Sync();
+
+ // Ensure there is no sign of the coin after spend/flush.
+ GetCoinsMapEntry(all_caches[0]->map(), value, flags, outp);
+ BOOST_CHECK_EQUAL(value, ABSENT);
+ BOOST_CHECK_EQUAL(flags, NO_ENTRY);
+ BOOST_CHECK(!all_caches[0]->HaveCoinInCache(outp));
+ BOOST_CHECK(!base.HaveCoin(outp));
+}
+
+BOOST_AUTO_TEST_CASE(ccoins_flush_behavior)
+{
+ // Create two in-memory caches atop a leveldb view.
+ CCoinsViewDB base{{.path = "test", .cache_bytes = 1 << 23, .memory_only = true}, {}};
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> caches;
+ caches.push_back(std::make_unique<CCoinsViewCacheTest>(&base));
+ caches.push_back(std::make_unique<CCoinsViewCacheTest>(caches.back().get()));
+
+ for (const auto& view : caches) {
+ TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/false);
+ TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/true);
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index 9b369a5c50..e4e8596a5d 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -17,6 +17,7 @@
#include <crypto/muhash.h>
#include <random.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
@@ -133,14 +134,14 @@ static void TestAES256CBC(const std::string &hexkey, const std::string &hexiv, b
static void TestChaCha20(const std::string &hex_message, const std::string &hexkey, uint64_t nonce, uint64_t seek, const std::string& hexout)
{
std::vector<unsigned char> key = ParseHex(hexkey);
+ assert(key.size() == 32);
std::vector<unsigned char> m = ParseHex(hex_message);
- ChaCha20 rng(key.data(), key.size());
+ ChaCha20 rng(key.data());
rng.SetIV(nonce);
- rng.Seek(seek);
- std::vector<unsigned char> out = ParseHex(hexout);
+ rng.Seek64(seek);
std::vector<unsigned char> outres;
- outres.resize(out.size());
- assert(hex_message.empty() || m.size() == out.size());
+ outres.resize(hexout.size() / 2);
+ assert(hex_message.empty() || m.size() * 2 == hexout.size());
// perform the ChaCha20 round(s), if message is provided it will output the encrypted ciphertext otherwise the keystream
if (!hex_message.empty()) {
@@ -148,17 +149,38 @@ static void TestChaCha20(const std::string &hex_message, const std::string &hexk
} else {
rng.Keystream(outres.data(), outres.size());
}
- BOOST_CHECK(out == outres);
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
if (!hex_message.empty()) {
// Manually XOR with the keystream and compare the output
rng.SetIV(nonce);
- rng.Seek(seek);
+ rng.Seek64(seek);
std::vector<unsigned char> only_keystream(outres.size());
rng.Keystream(only_keystream.data(), only_keystream.size());
for (size_t i = 0; i != m.size(); i++) {
outres[i] = m[i] ^ only_keystream[i];
}
- BOOST_CHECK(out == outres);
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
+ }
+
+ // Repeat 10x, but fragmented into 3 chunks, to exercise the ChaCha20 class's caching.
+ for (int i = 0; i < 10; ++i) {
+ size_t lens[3];
+ lens[0] = InsecureRandRange(hexout.size() / 2U + 1U);
+ lens[1] = InsecureRandRange(hexout.size() / 2U + 1U - lens[0]);
+ lens[2] = hexout.size() / 2U - lens[0] - lens[1];
+
+ rng.Seek64(seek);
+ outres.assign(hexout.size() / 2U, 0);
+ size_t pos = 0;
+ for (int j = 0; j < 3; ++j) {
+ if (!hex_message.empty()) {
+ rng.Crypt(m.data() + pos, outres.data() + pos, lens[j]);
+ } else {
+ rng.Keystream(outres.data() + pos, lens[j]);
+ }
+ pos += lens[j];
+ }
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
}
}
@@ -460,7 +482,88 @@ BOOST_AUTO_TEST_CASE(aes_cbc_testvectors) {
BOOST_AUTO_TEST_CASE(chacha20_testvector)
{
- // Test vector from RFC 7539
+ // RFC 7539/8439 A.1 Test Vector #1:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 0,
+ "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7"
+ "da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586");
+
+ // RFC 7539/8439 A.1 Test Vector #2:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 1,
+ "9f07e7be5551387a98ba977c732d080dcb0f29a048e3656912c6533e32ee7aed"
+ "29b721769ce64e43d57133b074d839d531ed1f28510afb45ace10a1f4b794d6f");
+
+ // RFC 7539/8439 A.1 Test Vector #3:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ 0, 1,
+ "3aeb5224ecf849929b9d828db1ced4dd832025e8018b8160b82284f3c949aa5a"
+ "8eca00bbb4a73bdad192b5c42f73f2fd4e273644c8b36125a64addeb006c13a0");
+
+ // RFC 7539/8439 A.1 Test Vector #4:
+ TestChaCha20("",
+ "00ff000000000000000000000000000000000000000000000000000000000000",
+ 0, 2,
+ "72d54dfbf12ec44b362692df94137f328fea8da73990265ec1bbbea1ae9af0ca"
+ "13b25aa26cb4a648cb9b9d1be65b2c0924a66c54d545ec1b7374f4872e99f096");
+
+ // RFC 7539/8439 A.1 Test Vector #5:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0x200000000000000, 0,
+ "c2c64d378cd536374ae204b9ef933fcd1a8b2288b3dfa49672ab765b54ee27c7"
+ "8a970e0e955c14f3a88e741b97c286f75f8fc299e8148362fa198a39531bed6d");
+
+ // RFC 7539/8439 A.2 Test Vector #1:
+ TestChaCha20("0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 0,
+ "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7"
+ "da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586");
+
+ // RFC 7539/8439 A.2 Test Vector #2:
+ TestChaCha20("416e79207375626d697373696f6e20746f20746865204945544620696e74656e"
+ "6465642062792074686520436f6e7472696275746f7220666f72207075626c69"
+ "636174696f6e20617320616c6c206f722070617274206f6620616e2049455446"
+ "20496e7465726e65742d4472616674206f722052464320616e6420616e792073"
+ "746174656d656e74206d6164652077697468696e2074686520636f6e74657874"
+ "206f6620616e204945544620616374697669747920697320636f6e7369646572"
+ "656420616e20224945544620436f6e747269627574696f6e222e205375636820"
+ "73746174656d656e747320696e636c756465206f72616c2073746174656d656e"
+ "747320696e20494554462073657373696f6e732c2061732077656c6c20617320"
+ "7772697474656e20616e6420656c656374726f6e696320636f6d6d756e696361"
+ "74696f6e73206d61646520617420616e792074696d65206f7220706c6163652c"
+ "207768696368206172652061646472657373656420746f",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ 0x200000000000000, 1,
+ "a3fbf07df3fa2fde4f376ca23e82737041605d9f4f4f57bd8cff2c1d4b7955ec"
+ "2a97948bd3722915c8f3d337f7d370050e9e96d647b7c39f56e031ca5eb6250d"
+ "4042e02785ececfa4b4bb5e8ead0440e20b6e8db09d881a7c6132f420e527950"
+ "42bdfa7773d8a9051447b3291ce1411c680465552aa6c405b7764d5e87bea85a"
+ "d00f8449ed8f72d0d662ab052691ca66424bc86d2df80ea41f43abf937d3259d"
+ "c4b2d0dfb48a6c9139ddd7f76966e928e635553ba76c5c879d7b35d49eb2e62b"
+ "0871cdac638939e25e8a1e0ef9d5280fa8ca328b351c3c765989cbcf3daa8b6c"
+ "cc3aaf9f3979c92b3720fc88dc95ed84a1be059c6499b9fda236e7e818b04b0b"
+ "c39c1e876b193bfe5569753f88128cc08aaa9b63d1a16f80ef2554d7189c411f"
+ "5869ca52c5b83fa36ff216b9c1d30062bebcfd2dc5bce0911934fda79a86f6e6"
+ "98ced759c3ff9b6477338f3da4f9cd8514ea9982ccafb341b2384dd902f3d1ab"
+ "7ac61dd29c6f21ba5b862f3730e37cfdc4fd806c22f221");
+
+ // RFC 7539/8439 A.2 Test Vector #3:
+ TestChaCha20("2754776173206272696c6c69672c20616e642074686520736c6974687920746f"
+ "7665730a446964206779726520616e642067696d626c6520696e207468652077"
+ "6162653a0a416c6c206d696d737920776572652074686520626f726f676f7665"
+ "732c0a416e6420746865206d6f6d65207261746873206f757467726162652e",
+ "1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0",
+ 0x200000000000000, 42,
+ "62e6347f95ed87a45ffae7426f27a1df5fb69110044c0d73118effa95b01e5cf"
+ "166d3df2d721caf9b21e5fb14c616871fd84c54f9d65b283196c7fe4f60553eb"
+ "f39c6402c42234e32a356b3e764312a61a5532055716ead6962568f87d3f3f77"
+ "04c6a8d1bcd1bf4d50d6154b6da731b187b58dfd728afa36757a797ac188d1");
// test encryption
TestChaCha20("4c616469657320616e642047656e746c656d656e206f662074686520636c617373206f66202739393a204966204920636f756"
@@ -477,27 +580,24 @@ BOOST_AUTO_TEST_CASE(chacha20_testvector)
"224f51f3401bd9e12fde276fb8631ded8c131f823d2c06e27e4fcaec9ef3cf788a3b0aa372600a92b57974cded2b9334794cb"
"a40c63e34cdea212c4cf07d41b769a6749f3f630f4122cafe28ec4dc47e26d4346d70b98c73f3e9c53ac40c5945398b6eda1a"
"832c89c167eacd901d7e2bf363");
+}
- // Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 0, 0,
- "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b"
- "8f41518a11cc387b669b2ee6586");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000001", 0, 0,
- "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d79"
- "2b1c43fea817e9ad275ae546963");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 0x0100000000000000ULL, 0,
- "de9cba7bf3d69ef5e786dc63973f653a0b49e015adbff7134fcb7df137821031e85a050278a7084527214f73efc7fa5b52770"
- "62eb7a0433e445f41e3");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 1, 0,
- "ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd138e50d32111e4caf237ee53ca8ad6426194a88545ddc4"
- "97a0b466e7d6bbdb0041b2f586b");
- TestChaCha20("", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", 0x0706050403020100ULL, 0,
- "f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56f85ac3c134a4547b733b46413042c9440049176905d3b"
- "e59ea1c53f15916155c2be8241a38008b9a26bc35941e2444177c8ade6689de95264986d95889fb60e84629c9bd9a5acb1cc1"
- "18be563eb9b3a4a472f82e09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a475032b63fc385245fe054e3dd5"
- "a97a5f576fe064025d3ce042c566ab2c507b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f76dad3979e5c5"
- "360c3317166a1c894c94a371876a94df7628fe4eaaf2ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab78"
- "fab78c9");
+BOOST_AUTO_TEST_CASE(chacha20_midblock)
+{
+ auto key = ParseHex("0000000000000000000000000000000000000000000000000000000000000000");
+ ChaCha20 c20{key.data()};
+ // get one block of keystream
+ unsigned char block[64];
+ c20.Keystream(block, CHACHA20_ROUND_OUTPUT);
+ unsigned char b1[5], b2[7], b3[52];
+ c20 = ChaCha20{key.data()};
+ c20.Keystream(b1, 5);
+ c20.Keystream(b2, 7);
+ c20.Keystream(b3, 52);
+
+ BOOST_CHECK_EQUAL(0, memcmp(b1, block, 5));
+ BOOST_CHECK_EQUAL(0, memcmp(b2, block + 5, 7));
+ BOOST_CHECK_EQUAL(0, memcmp(b3, block + 12, 52));
}
BOOST_AUTO_TEST_CASE(poly1305_testvector)
@@ -617,7 +717,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
ChaCha20Poly1305AEAD aead(aead_K_1.data(), aead_K_1.size(), aead_K_2.data(), aead_K_2.size());
// create a chacha20 instance to compare against
- ChaCha20 cmp_ctx(aead_K_1.data(), 32);
+ ChaCha20 cmp_ctx(aead_K_1.data());
// encipher
bool res = aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, ciphertext_buf.data(), ciphertext_buf.size(), plaintext_buf.data(), plaintext_buf.size(), true);
@@ -631,7 +731,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
// manually construct the AAD keystream
cmp_ctx.SetIV(seqnr_aad);
- cmp_ctx.Seek(0);
+ cmp_ctx.Seek64(0);
cmp_ctx.Keystream(cmp_ctx_buffer.data(), 64);
BOOST_CHECK(memcmp(expected_aad_keystream.data(), cmp_ctx_buffer.data(), expected_aad_keystream.size()) == 0);
// crypt the 3 length bytes and compare the length
@@ -659,7 +759,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
}
// set nonce and block counter, output the keystream
cmp_ctx.SetIV(seqnr_aad);
- cmp_ctx.Seek(0);
+ cmp_ctx.Seek64(0);
cmp_ctx.Keystream(cmp_ctx_buffer.data(), 64);
// crypt the 3 length bytes and compare the length
@@ -925,7 +1025,7 @@ BOOST_AUTO_TEST_CASE(muhash_tests)
// Test MuHash3072 serialization
MuHash3072 serchk = FromInt(1); serchk *= FromInt(2);
std::string ser_exp = "1fa093295ea30a6a3acdc7b3f770fa538eff537528e990e2910e40bbcfd7f6696b1256901929094694b56316de342f593303dd12ac43e06dce1be1ff8301c845beb15468fff0ef002dbf80c29f26e6452bccc91b5cb9437ad410d2a67ea847887fa3c6a6553309946880fe20db2c73fe0641adbd4e86edfee0d9f8cd0ee1230898873dc13ed8ddcaf045c80faa082774279007a2253f8922ee3ef361d378a6af3ddaf180b190ac97e556888c36b3d1fb1c85aab9ccd46e3deaeb7b7cf5db067a7e9ff86b658cf3acd6662bbcce37232daa753c48b794356c020090c831a8304416e2aa7ad633c0ddb2f11be1be316a81be7f7e472071c042cb68faef549c221ebff209273638b741aba5a81675c45a5fa92fea4ca821d7a324cb1e1a2ccd3b76c4228ec8066dad2a5df6e1bd0de45c7dd5de8070bdb46db6c554cf9aefc9b7b2bbf9f75b1864d9f95005314593905c0109b71f703d49944ae94477b51dac10a816bb6d1c700bafabc8bd86fac8df24be519a2f2836b16392e18036cb13e48c5c010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
- CDataStream ss_chk(SER_DISK, PROTOCOL_VERSION);
+ DataStream ss_chk{};
ss_chk << serchk;
BOOST_CHECK_EQUAL(ser_exp, HexStr(ss_chk.str()));
@@ -938,7 +1038,7 @@ BOOST_AUTO_TEST_CASE(muhash_tests)
BOOST_CHECK_EQUAL(HexStr(out), HexStr(out3));
// Test MuHash3072 overflow, meaning the internal data is larger than the modulus.
- CDataStream ss_max(ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), SER_DISK, PROTOCOL_VERSION);
+ DataStream ss_max{ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")};
MuHash3072 overflowchk;
ss_max >> overflowchk;
diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp
index c7c34cc8c9..eafbcf5681 100644
--- a/src/test/cuckoocache_tests.cpp
+++ b/src/test/cuckoocache_tests.cpp
@@ -1,9 +1,11 @@
// Copyright (c) 2012-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <cuckoocache.h>
#include <random.h>
#include <script/sigcache.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp
index 2447c882ae..723a1ceee3 100644
--- a/src/test/dbwrapper_tests.cpp
+++ b/src/test/dbwrapper_tests.cpp
@@ -3,8 +3,10 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <dbwrapper.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
+#include <util/string.h>
#include <memory>
@@ -27,7 +29,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper)
// Perform tests both obfuscated and non-obfuscated.
for (const bool obfuscate : {false, true}) {
fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_obfuscate_true" : "dbwrapper_obfuscate_false");
- CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate);
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = obfuscate});
uint8_t key{'k'};
uint256 in = InsecureRand256();
uint256 res;
@@ -46,7 +48,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
// Perform tests both obfuscated and non-obfuscated.
for (bool obfuscate : {false, true}) {
fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_1_obfuscate_true" : "dbwrapper_1_obfuscate_false");
- CDBWrapper dbw(ph, (1 << 20), false, true, obfuscate);
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = false, .wipe_data = true, .obfuscate = obfuscate});
uint256 res;
uint32_t res_uint_32;
@@ -127,7 +129,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_batch)
// Perform tests both obfuscated and non-obfuscated.
for (const bool obfuscate : {false, true}) {
fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_batch_obfuscate_true" : "dbwrapper_batch_obfuscate_false");
- CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate);
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = obfuscate});
uint8_t key{'i'};
uint256 in = InsecureRand256();
@@ -163,7 +165,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_iterator)
// Perform tests both obfuscated and non-obfuscated.
for (const bool obfuscate : {false, true}) {
fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_iterator_obfuscate_true" : "dbwrapper_iterator_obfuscate_false");
- CDBWrapper dbw(ph, (1 << 20), true, false, obfuscate);
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = obfuscate});
// The two keys are intentionally chosen for ordering
uint8_t key{'j'};
@@ -206,7 +208,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate)
fs::create_directories(ph);
// Set up a non-obfuscated wrapper to write some initial data.
- std::unique_ptr<CDBWrapper> dbw = std::make_unique<CDBWrapper>(ph, (1 << 10), false, false, false);
+ std::unique_ptr<CDBWrapper> dbw = std::make_unique<CDBWrapper>(DBParams{.path = ph, .cache_bytes = 1 << 10, .memory_only = false, .wipe_data = false, .obfuscate = false});
uint8_t key{'k'};
uint256 in = InsecureRand256();
uint256 res;
@@ -219,7 +221,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate)
dbw.reset();
// Now, set up another wrapper that wants to obfuscate the same directory
- CDBWrapper odbw(ph, (1 << 10), false, false, true);
+ CDBWrapper odbw({.path = ph, .cache_bytes = 1 << 10, .memory_only = false, .wipe_data = false, .obfuscate = true});
// Check that the key/val we wrote with unobfuscated wrapper exists and
// is readable.
@@ -247,7 +249,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex)
fs::create_directories(ph);
// Set up a non-obfuscated wrapper to write some initial data.
- std::unique_ptr<CDBWrapper> dbw = std::make_unique<CDBWrapper>(ph, (1 << 10), false, false, false);
+ std::unique_ptr<CDBWrapper> dbw = std::make_unique<CDBWrapper>(DBParams{.path = ph, .cache_bytes = 1 << 10, .memory_only = false, .wipe_data = false, .obfuscate = false});
uint8_t key{'k'};
uint256 in = InsecureRand256();
uint256 res;
@@ -260,7 +262,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex)
dbw.reset();
// Simulate a -reindex by wiping the existing data store
- CDBWrapper odbw(ph, (1 << 10), false, true, true);
+ CDBWrapper odbw({.path = ph, .cache_bytes = 1 << 10, .memory_only = false, .wipe_data = true, .obfuscate = true});
// Check that the key/val we wrote with unobfuscated wrapper doesn't exist
uint256 res2;
@@ -279,7 +281,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex)
BOOST_AUTO_TEST_CASE(iterator_ordering)
{
fs::path ph = m_args.GetDataDirBase() / "iterator_ordering";
- CDBWrapper dbw(ph, (1 << 20), true, false, false);
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = false});
for (int x=0x00; x<256; ++x) {
uint8_t key = x;
uint32_t value = x*x;
@@ -324,12 +326,6 @@ struct StringContentsSerializer {
StringContentsSerializer() = default;
explicit StringContentsSerializer(const std::string& inp) : str(inp) {}
- StringContentsSerializer& operator+=(const std::string& s) {
- str += s;
- return *this;
- }
- StringContentsSerializer& operator+=(const StringContentsSerializer& s) { return *this += s.str; }
-
template<typename Stream>
void Serialize(Stream& s) const
{
@@ -343,44 +339,34 @@ struct StringContentsSerializer {
{
str.clear();
uint8_t c{0};
- while (true) {
- try {
- s >> c;
- str.push_back(c);
- } catch (const std::ios_base::failure&) {
- break;
- }
+ while (!s.eof()) {
+ s >> c;
+ str.push_back(c);
}
}
};
BOOST_AUTO_TEST_CASE(iterator_string_ordering)
{
- char buf[10];
-
fs::path ph = m_args.GetDataDirBase() / "iterator_string_ordering";
- CDBWrapper dbw(ph, (1 << 20), true, false, false);
- for (int x=0x00; x<10; ++x) {
- for (int y = 0; y < 10; y++) {
- snprintf(buf, sizeof(buf), "%d", x);
- StringContentsSerializer key(buf);
- for (int z = 0; z < y; z++)
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = false});
+ for (int x = 0; x < 10; ++x) {
+ for (int y = 0; y < 10; ++y) {
+ std::string key{ToString(x)};
+ for (int z = 0; z < y; ++z)
key += key;
uint32_t value = x*x;
- BOOST_CHECK(dbw.Write(key, value));
+ BOOST_CHECK(dbw.Write(StringContentsSerializer{key}, value));
}
}
std::unique_ptr<CDBIterator> it(const_cast<CDBWrapper&>(dbw).NewIterator());
for (const int seek_start : {0, 5}) {
- snprintf(buf, sizeof(buf), "%d", seek_start);
- StringContentsSerializer seek_key(buf);
- it->Seek(seek_key);
- for (unsigned int x=seek_start; x<10; ++x) {
- for (int y = 0; y < 10; y++) {
- snprintf(buf, sizeof(buf), "%d", x);
- std::string exp_key(buf);
- for (int z = 0; z < y; z++)
+ it->Seek(StringContentsSerializer{ToString(seek_start)});
+ for (unsigned int x = seek_start; x < 10; ++x) {
+ for (int y = 0; y < 10; ++y) {
+ std::string exp_key{ToString(x)};
+ for (int z = 0; z < y; ++z)
exp_key += exp_key;
StringContentsSerializer key;
uint32_t value;
@@ -405,7 +391,7 @@ BOOST_AUTO_TEST_CASE(unicodepath)
// the ANSI CreateDirectoryA call and the code page isn't UTF8.
// It will succeed if created with CreateDirectoryW.
fs::path ph = m_args.GetDataDirBase() / "test_runner_₿_🏃_20191128_104644";
- CDBWrapper dbw(ph, (1 << 20));
+ CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20});
fs::path lockPath = ph / "LOCK";
BOOST_CHECK(fs::exists(lockPath));
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index 6e4f6cdbab..c4b2b4c63b 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -45,6 +45,7 @@ constexpr int DERIVE_HARDENED = 16; // The final derivation is hardened, i.e. en
constexpr int MIXED_PUBKEYS = 32;
constexpr int XONLY_KEYS = 64; // X-only pubkeys are in use (and thus inferring/caching may swap parity of pubkeys/keyids)
constexpr int MISSING_PRIVKEYS = 128; // Not all private keys are available, so ToPrivateString will fail.
+constexpr int SIGNABLE_FAILS = 256; // We can sign with this descriptor, but actually trying to sign will fail
/** Compare two descriptors. If only one of them has a checksum, the checksum is ignored. */
bool EqualDescriptor(std::string a, std::string b)
@@ -126,8 +127,11 @@ std::set<std::pair<CPubKey, KeyOriginInfo>> GetKeyOriginData(const FlatSigningPr
return ret;
}
-void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
- bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false)
+void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags,
+ const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type,
+ const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, bool replace_apostrophe_with_h_in_prv=false,
+ bool replace_apostrophe_with_h_in_pub=false, uint32_t spender_nlocktime=0, uint32_t spender_nsequence=CTxIn::SEQUENCE_FINAL,
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> preimages={})
{
FlatSigningProvider keys_priv, keys_pub;
std::set<std::vector<uint32_t>> left_paths = paths;
@@ -303,16 +307,24 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
for (size_t n = 0; n < spks.size(); ++n) {
BOOST_CHECK_EQUAL(ref[n], HexStr(spks[n]));
- if (flags & SIGNABLE) {
+ if (flags & (SIGNABLE | SIGNABLE_FAILS)) {
CMutableTransaction spend;
+ spend.nLockTime = spender_nlocktime;
spend.vin.resize(1);
+ spend.vin[0].nSequence = spender_nsequence;
spend.vout.resize(1);
std::vector<CTxOut> utxos(1);
PrecomputedTransactionData txdata;
txdata.Init(spend, std::move(utxos), /*force=*/true);
MutableTransactionSignatureCreator creator{spend, 0, CAmount{0}, &txdata, SIGHASH_DEFAULT};
SignatureData sigdata;
- BOOST_CHECK_MESSAGE(ProduceSignature(FlatSigningProvider{keys_priv}.Merge(FlatSigningProvider{script_provider}), creator, spks[n], sigdata), prv);
+ // We assume there is no collision between the hashes (eg h1=SHA256(SHA256(x)) and h2=SHA256(x))
+ sigdata.sha256_preimages = preimages;
+ sigdata.hash256_preimages = preimages;
+ sigdata.ripemd160_preimages = preimages;
+ sigdata.hash160_preimages = preimages;
+ const auto prod_sig_res = ProduceSignature(FlatSigningProvider{keys_priv}.Merge(FlatSigningProvider{script_provider}), creator, spks[n], sigdata);
+ BOOST_CHECK_MESSAGE(prod_sig_res == !(flags & SIGNABLE_FAILS), prv);
}
/* Infer a descriptor from the generated script, and verify its solvability and that it roundtrips. */
@@ -340,29 +352,40 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv);
}
-void Check(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
+void Check(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags,
+ const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type,
+ const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, uint32_t spender_nlocktime=0,
+ uint32_t spender_nsequence=CTxIn::SEQUENCE_FINAL, std::map<std::vector<uint8_t>, std::vector<uint8_t>> preimages={})
{
bool found_apostrophes_in_prv = false;
bool found_apostrophes_in_pub = false;
// Do not replace apostrophes with 'h' in prv and pub
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/false,
+ /*replace_apostrophe_with_h_in_pub=*/false, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
// Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv
if (prv.find('\'') != std::string::npos) {
found_apostrophes_in_prv = true;
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/true,
+ /*replace_apostrophe_with_h_in_pub=*/false, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
// Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub
if (pub.find('\'') != std::string::npos) {
found_apostrophes_in_pub = true;
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/false,
+ /*replace_apostrophe_with_h_in_pub=*/true, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
// Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both
if (found_apostrophes_in_prv && found_apostrophes_in_pub) {
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/true,
+ /*replace_apostrophe_with_h_in_pub=*/true, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
}
@@ -528,9 +551,31 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("wsh(and_b(and_b(older(1),a:older(100000000)),s:pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd)))", "wsh(and_b(and_b(older(1),a:older(100000000)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)))", "and_b(older(1),a:older(100000000)) is not sane: contains mixes of timelocks expressed in blocks and seconds");
CheckUnparsable("wsh(and_b(or_b(pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),s:pk(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn)),s:pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd)))", "wsh(and_b(or_b(pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),s:pk(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)))", "and_b(or_b(pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),s:pk(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)) is not sane: contains duplicate public keys");
// Valid with extended keys.
- Check("wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", UNSOLVABLE, {{"0020acf425291b98a1d7e0d4690139442abc289175be32ef1f75945e339924246d73"}}, OutputType::BECH32, {{},{0}});
- // Valid under sh(wsh()) and with a mix of xpubs and raw keys
- Check("sh(wsh(thresh(1,pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", UNSOLVABLE | MIXED_PUBKEYS, {{"a914767e9119ff3b3ac0cb6dcfe21de1842ccf85f1c487"}}, OutputType::P2SH_SEGWIT, {{},{0}});
+ Check("wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", DEFAULT, {{"0020acf425291b98a1d7e0d4690139442abc289175be32ef1f75945e339924246d73"}}, OutputType::BECH32, {{},{0}});
+ // Valid under sh(wsh()) and with a mix of xpubs and raw keys.
+ Check("sh(wsh(thresh(1,pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE | MIXED_PUBKEYS, {{"a914767e9119ff3b3ac0cb6dcfe21de1842ccf85f1c487"}}, OutputType::P2SH_SEGWIT, {{},{0}});
+ // An exotic multisig, we can sign for both branches
+ Check("wsh(thresh(1,pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc),a:pkh(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(thresh(1,pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL),a:pkh(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(thresh(1,pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL),a:pkh(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", SIGNABLE, {{"00204a4528fbc0947e02e921b54bd476fc8cc2ebb5c6ae2ccf10ed29fe2937fb6892"}}, OutputType::BECH32, {{},{0}});
+ // We can sign for a script requiring the two kinds of timelock.
+ // But if we don't set a sequence high enough, we'll fail.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE_FAILS, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/1000, /*spender_nsequence=*/1);
+ // And same for the nLockTime.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE_FAILS, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/999, /*spender_nsequence=*/2);
+ // But if both are set to (at least) the required value, we'll succeed.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/1000, /*spender_nsequence=*/2);
+ // We can't sign for a script requiring a ripemd160 preimage without providing it.
+ Check("wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"002001549deda34cbc4a5982263191380f522695a2ddc2f99fc3a65c736264bd6cab"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ // But if we provide it, we can.
+ Check("wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"002001549deda34cbc4a5982263191380f522695a2ddc2f99fc3a65c736264bd6cab"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("ff9aa1829c90d26e73301383f549e1497b7d6325"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for sha256
+ Check("wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"002071f7283dbbb9a55ed43a54cda16ba0efd0f16dc48fe200f299e57bb5d7be8dd4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"002071f7283dbbb9a55ed43a54cda16ba0efd0f16dc48fe200f299e57bb5d7be8dd4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for hash160
+ Check("wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"00209b9d5b45735d0e15df5b41d6594602d3de472262f7b75edc6cf5f3e3fa4e3ae4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"00209b9d5b45735d0e15df5b41d6594602d3de472262f7b75edc6cf5f3e3fa4e3ae4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("292e2df59e3a22109200beed0cdc84b12e66793e"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for hash256
+ Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index 2953cf149d..a59e41dbb5 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -117,7 +117,7 @@ void FillAddrman(AddrMan& addrman, FuzzedDataProvider& fuzzed_data_provider)
const std::chrono::seconds time_penalty{fast_random_context.randrange(100000001)};
addrman.Add({addr}, source, time_penalty);
- if (n > 0 && addrman.size() % n == 0) {
+ if (n > 0 && addrman.Size() % n == 0) {
addrman.Good(addr, Now<NodeSeconds>());
}
@@ -304,7 +304,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
/*max_pct=*/fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
/*network=*/std::nullopt);
(void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool());
- (void)const_addr_man.size();
+ (void)const_addr_man.Size();
CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION);
data_stream << const_addr_man;
}
diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp
index 46026d8df3..e80c772aa4 100644
--- a/src/test/fuzz/coins_view.cpp
+++ b/src/test/fuzz/coins_view.cpp
@@ -46,7 +46,7 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CCoinsView backend_coins_view;
- CCoinsViewCache coins_view_cache{&backend_coins_view};
+ CCoinsViewCache coins_view_cache{&backend_coins_view, /*deterministic=*/true};
COutPoint random_out_point;
Coin random_coin;
CMutableTransaction random_mutable_transaction;
@@ -75,6 +75,9 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view)
(void)coins_view_cache.Flush();
},
[&] {
+ (void)coins_view_cache.Sync();
+ },
+ [&] {
coins_view_cache.SetBestBlock(ConsumeUInt256(fuzzed_data_provider));
},
[&] {
diff --git a/src/test/fuzz/coinscache_sim.cpp b/src/test/fuzz/coinscache_sim.cpp
new file mode 100644
index 0000000000..f350c9d032
--- /dev/null
+++ b/src/test/fuzz/coinscache_sim.cpp
@@ -0,0 +1,478 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <coins.h>
+#include <crypto/sha256.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/util.h>
+
+#include <assert.h>
+#include <optional>
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+namespace {
+
+/** Number of distinct COutPoint values used in this test. */
+constexpr uint32_t NUM_OUTPOINTS = 256;
+/** Number of distinct Coin values used in this test (ignoring nHeight). */
+constexpr uint32_t NUM_COINS = 256;
+/** Maximum number CCoinsViewCache objects used in this test. */
+constexpr uint32_t MAX_CACHES = 4;
+/** Data type large enough to hold NUM_COINS-1. */
+using coinidx_type = uint8_t;
+
+struct PrecomputedData
+{
+ //! Randomly generated COutPoint values.
+ COutPoint outpoints[NUM_OUTPOINTS];
+
+ //! Randomly generated Coin values.
+ Coin coins[NUM_COINS];
+
+ PrecomputedData()
+ {
+ static const uint8_t PREFIX_O[1] = {'o'}; /** Hash prefix for outpoint hashes. */
+ static const uint8_t PREFIX_S[1] = {'s'}; /** Hash prefix for coins scriptPubKeys. */
+ static const uint8_t PREFIX_M[1] = {'m'}; /** Hash prefix for coins nValue/fCoinBase. */
+
+ for (uint32_t i = 0; i < NUM_OUTPOINTS; ++i) {
+ uint32_t idx = (i * 1200U) >> 12; /* Map 3 or 4 entries to same txid. */
+ const uint8_t ser[4] = {uint8_t(idx), uint8_t(idx >> 8), uint8_t(idx >> 16), uint8_t(idx >> 24)};
+ CSHA256().Write(PREFIX_O, 1).Write(ser, sizeof(ser)).Finalize(outpoints[i].hash.begin());
+ outpoints[i].n = i;
+ }
+
+ for (uint32_t i = 0; i < NUM_COINS; ++i) {
+ const uint8_t ser[4] = {uint8_t(i), uint8_t(i >> 8), uint8_t(i >> 16), uint8_t(i >> 24)};
+ uint256 hash;
+ CSHA256().Write(PREFIX_S, 1).Write(ser, sizeof(ser)).Finalize(hash.begin());
+ /* Convert hash to scriptPubkeys (of different lengths, so SanityCheck's cached memory
+ * usage check has a chance to detect mismatches). */
+ switch (i % 5U) {
+ case 0: /* P2PKH */
+ coins[i].out.scriptPubKey.resize(25);
+ coins[i].out.scriptPubKey[0] = OP_DUP;
+ coins[i].out.scriptPubKey[1] = OP_HASH160;
+ coins[i].out.scriptPubKey[2] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 3);
+ coins[i].out.scriptPubKey[23] = OP_EQUALVERIFY;
+ coins[i].out.scriptPubKey[24] = OP_CHECKSIG;
+ break;
+ case 1: /* P2SH */
+ coins[i].out.scriptPubKey.resize(23);
+ coins[i].out.scriptPubKey[0] = OP_HASH160;
+ coins[i].out.scriptPubKey[1] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 2);
+ coins[i].out.scriptPubKey[12] = OP_EQUAL;
+ break;
+ case 2: /* P2WPKH */
+ coins[i].out.scriptPubKey.resize(22);
+ coins[i].out.scriptPubKey[0] = OP_0;
+ coins[i].out.scriptPubKey[1] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ case 3: /* P2WSH */
+ coins[i].out.scriptPubKey.resize(34);
+ coins[i].out.scriptPubKey[0] = OP_0;
+ coins[i].out.scriptPubKey[1] = 32;
+ std::copy(hash.begin(), hash.begin() + 32, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ case 4: /* P2TR */
+ coins[i].out.scriptPubKey.resize(34);
+ coins[i].out.scriptPubKey[0] = OP_1;
+ coins[i].out.scriptPubKey[1] = 32;
+ std::copy(hash.begin(), hash.begin() + 32, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ }
+ /* Hash again to construct nValue and fCoinBase. */
+ CSHA256().Write(PREFIX_M, 1).Write(ser, sizeof(ser)).Finalize(hash.begin());
+ coins[i].out.nValue = CAmount(hash.GetUint64(0) % MAX_MONEY);
+ coins[i].fCoinBase = (hash.GetUint64(1) & 7) == 0;
+ coins[i].nHeight = 0; /* Real nHeight used in simulation is set dynamically. */
+ }
+ }
+};
+
+enum class EntryType : uint8_t
+{
+ /* This entry in the cache does not exist (so we'd have to look in the parent cache). */
+ NONE,
+
+ /* This entry in the cache corresponds to an unspent coin. */
+ UNSPENT,
+
+ /* This entry in the cache corresponds to a spent coin. */
+ SPENT,
+};
+
+struct CacheEntry
+{
+ /* Type of entry. */
+ EntryType entrytype;
+
+ /* Index in the coins array this entry corresponds to (only if entrytype == UNSPENT). */
+ coinidx_type coinidx;
+
+ /* nHeight value for this entry (so the coins[coinidx].nHeight value is ignored; only if entrytype == UNSPENT). */
+ uint32_t height;
+};
+
+struct CacheLevel
+{
+ CacheEntry entry[NUM_OUTPOINTS];
+
+ void Wipe() {
+ for (uint32_t i = 0; i < NUM_OUTPOINTS; ++i) {
+ entry[i].entrytype = EntryType::NONE;
+ }
+ }
+};
+
+/** Class for the base of the hierarchy (roughly simulating a memory-backed CCoinsViewDB).
+ *
+ * The initial state consists of the empty UTXO set, though coins whose output index
+ * is 3 (mod 5) always have GetCoin() succeed (but returning an IsSpent() coin unless a UTXO
+ * exists). Coins whose output index is 4 (mod 5) have GetCoin() always succeed after being spent.
+ * This exercises code paths with spent, non-DIRTY cache entries.
+ */
+class CoinsViewBottom final : public CCoinsView
+{
+ std::map<COutPoint, Coin> m_data;
+
+public:
+ bool GetCoin(const COutPoint& outpoint, Coin& coin) const final
+ {
+ auto it = m_data.find(outpoint);
+ if (it == m_data.end()) {
+ if ((outpoint.n % 5) == 3) {
+ coin.Clear();
+ return true;
+ }
+ return false;
+ } else {
+ coin = it->second;
+ return true;
+ }
+ }
+
+ bool HaveCoin(const COutPoint& outpoint) const final
+ {
+ return m_data.count(outpoint);
+ }
+
+ uint256 GetBestBlock() const final { return {}; }
+ std::vector<uint256> GetHeadBlocks() const final { return {}; }
+ std::unique_ptr<CCoinsViewCursor> Cursor() const final { return {}; }
+ size_t EstimateSize() const final { return m_data.size(); }
+
+ bool BatchWrite(CCoinsMap& data, const uint256&, bool erase) final
+ {
+ for (auto it = data.begin(); it != data.end(); it = erase ? data.erase(it) : std::next(it)) {
+ if (it->second.flags & CCoinsCacheEntry::DIRTY) {
+ if (it->second.coin.IsSpent() && (it->first.n % 5) != 4) {
+ m_data.erase(it->first);
+ } else if (erase) {
+ m_data[it->first] = std::move(it->second.coin);
+ } else {
+ m_data[it->first] = it->second.coin;
+ }
+ } else {
+ /* For non-dirty entries being written, compare them with what we have. */
+ auto it2 = m_data.find(it->first);
+ if (it->second.coin.IsSpent()) {
+ assert(it2 == m_data.end() || it2->second.IsSpent());
+ } else {
+ assert(it2 != m_data.end());
+ assert(it->second.coin.out == it2->second.out);
+ assert(it->second.coin.fCoinBase == it2->second.fCoinBase);
+ assert(it->second.coin.nHeight == it2->second.nHeight);
+ }
+ }
+ }
+ return true;
+ }
+};
+
+} // namespace
+
+FUZZ_TARGET(coinscache_sim)
+{
+ /** Precomputed COutPoint and CCoins values. */
+ static const PrecomputedData data;
+
+ /** Dummy coinsview instance (base of the hierarchy). */
+ CoinsViewBottom bottom;
+ /** Real CCoinsViewCache objects. */
+ std::vector<std::unique_ptr<CCoinsViewCache>> caches;
+ /** Simulated cache data (sim_caches[0] matches bottom, sim_caches[i+1] matches caches[i]). */
+ CacheLevel sim_caches[MAX_CACHES + 1];
+ /** Current height in the simulation. */
+ uint32_t current_height = 1U;
+
+ // Initialize bottom simulated cache.
+ sim_caches[0].Wipe();
+
+ /** Helper lookup function in the simulated cache stack. */
+ auto lookup = [&](uint32_t outpointidx, int sim_idx = -1) -> std::optional<std::pair<coinidx_type, uint32_t>> {
+ uint32_t cache_idx = sim_idx == -1 ? caches.size() : sim_idx;
+ while (true) {
+ const auto& entry = sim_caches[cache_idx].entry[outpointidx];
+ if (entry.entrytype == EntryType::UNSPENT) {
+ return {{entry.coinidx, entry.height}};
+ } else if (entry.entrytype == EntryType::SPENT) {
+ return std::nullopt;
+ };
+ if (cache_idx == 0) break;
+ --cache_idx;
+ }
+ return std::nullopt;
+ };
+
+ /** Flush changes in top cache to the one below. */
+ auto flush = [&]() {
+ assert(caches.size() >= 1);
+ auto& cache = sim_caches[caches.size()];
+ auto& prev_cache = sim_caches[caches.size() - 1];
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ if (cache.entry[outpointidx].entrytype != EntryType::NONE) {
+ prev_cache.entry[outpointidx] = cache.entry[outpointidx];
+ cache.entry[outpointidx].entrytype = EntryType::NONE;
+ }
+ }
+ };
+
+ // Main simulation loop: read commands from the fuzzer input, and apply them
+ // to both the real cache stack and the simulation.
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ LIMITED_WHILE(provider.remaining_bytes(), 10000) {
+ // Every operation (except "Change height") moves current height forward,
+ // so it functions as a kind of epoch, making ~all UTXOs unique.
+ ++current_height;
+ // Make sure there is always at least one CCoinsViewCache.
+ if (caches.empty()) {
+ caches.emplace_back(new CCoinsViewCache(&bottom, /*deterministic=*/true));
+ sim_caches[caches.size()].Wipe();
+ }
+
+ // Execute command.
+ CallOneOf(
+ provider,
+
+ [&]() { // GetCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ Coin realcoin;
+ auto real = caches.back()->GetCoin(data.outpoints[outpointidx], realcoin);
+ // Compare results.
+ if (!sim.has_value()) {
+ assert(!real || realcoin.IsSpent());
+ } else {
+ assert(real && !realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(realcoin.out == simcoin.out);
+ assert(realcoin.fCoinBase == simcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // HaveCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ auto real = caches.back()->HaveCoin(data.outpoints[outpointidx]);
+ // Compare results.
+ assert(sim.has_value() == real);
+ },
+
+ [&]() { // HaveCoinInCache
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Invoke on real cache (there is no equivalent in simulation, so nothing to compare result with).
+ (void)caches.back()->HaveCoinInCache(data.outpoints[outpointidx]);
+ },
+
+ [&]() { // AccessCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ const auto& realcoin = caches.back()->AccessCoin(data.outpoints[outpointidx]);
+ // Compare results.
+ if (!sim.has_value()) {
+ assert(realcoin.IsSpent());
+ } else {
+ assert(!realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(simcoin.out == realcoin.out);
+ assert(simcoin.fCoinBase == realcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // AddCoin (only possible_overwrite if necessary)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ uint32_t coinidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_COINS - 1);
+ // Look up in simulation data (to know whether we must set possible_overwrite or not).
+ auto sim = lookup(outpointidx);
+ // Invoke on real caches.
+ Coin coin = data.coins[coinidx];
+ coin.nHeight = current_height;
+ caches.back()->AddCoin(data.outpoints[outpointidx], std::move(coin), sim.has_value());
+ // Apply to simulation data.
+ auto& entry = sim_caches[caches.size()].entry[outpointidx];
+ entry.entrytype = EntryType::UNSPENT;
+ entry.coinidx = coinidx;
+ entry.height = current_height;
+ },
+
+ [&]() { // AddCoin (always possible_overwrite)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ uint32_t coinidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_COINS - 1);
+ // Invoke on real caches.
+ Coin coin = data.coins[coinidx];
+ coin.nHeight = current_height;
+ caches.back()->AddCoin(data.outpoints[outpointidx], std::move(coin), true);
+ // Apply to simulation data.
+ auto& entry = sim_caches[caches.size()].entry[outpointidx];
+ entry.entrytype = EntryType::UNSPENT;
+ entry.coinidx = coinidx;
+ entry.height = current_height;
+ },
+
+ [&]() { // SpendCoin (moveto = nullptr)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Invoke on real caches.
+ caches.back()->SpendCoin(data.outpoints[outpointidx], nullptr);
+ // Apply to simulation data.
+ sim_caches[caches.size()].entry[outpointidx].entrytype = EntryType::SPENT;
+ },
+
+ [&]() { // SpendCoin (with moveto)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data (to compare the returned *moveto with).
+ auto sim = lookup(outpointidx);
+ // Invoke on real caches.
+ Coin realcoin;
+ caches.back()->SpendCoin(data.outpoints[outpointidx], &realcoin);
+ // Apply to simulation data.
+ sim_caches[caches.size()].entry[outpointidx].entrytype = EntryType::SPENT;
+ // Compare *moveto with the value expected based on simulation data.
+ if (!sim.has_value()) {
+ assert(realcoin.IsSpent());
+ } else {
+ assert(!realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(simcoin.out == realcoin.out);
+ assert(simcoin.fCoinBase == realcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // Uncache
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Apply to real caches (there is no equivalent in our simulation).
+ caches.back()->Uncache(data.outpoints[outpointidx]);
+ },
+
+ [&]() { // Add a cache level (if not already at the max).
+ if (caches.size() != MAX_CACHES) {
+ // Apply to real caches.
+ caches.emplace_back(new CCoinsViewCache(&*caches.back(), /*deterministic=*/true));
+ // Apply to simulation data.
+ sim_caches[caches.size()].Wipe();
+ }
+ },
+
+ [&]() { // Remove a cache level.
+ // Apply to real caches (this reduces caches.size(), implicitly doing the same on the simulation data).
+ caches.back()->SanityCheck();
+ caches.pop_back();
+ },
+
+ [&]() { // Flush.
+ // Apply to simulation data.
+ flush();
+ // Apply to real caches.
+ caches.back()->Flush();
+ },
+
+ [&]() { // Sync.
+ // Apply to simulation data (note that in our simulation, syncing and flushing is the same thing).
+ flush();
+ // Apply to real caches.
+ caches.back()->Sync();
+ },
+
+ [&]() { // Flush + ReallocateCache.
+ // Apply to simulation data.
+ flush();
+ // Apply to real caches.
+ caches.back()->Flush();
+ caches.back()->ReallocateCache();
+ },
+
+ [&]() { // GetCacheSize
+ (void)caches.back()->GetCacheSize();
+ },
+
+ [&]() { // DynamicMemoryUsage
+ (void)caches.back()->DynamicMemoryUsage();
+ },
+
+ [&]() { // Change height
+ current_height = provider.ConsumeIntegralInRange<uint32_t>(1, current_height - 1);
+ }
+ );
+ }
+
+ // Sanity check all the remaining caches
+ for (const auto& cache : caches) {
+ cache->SanityCheck();
+ }
+
+ // Full comparison between caches and simulation data, from bottom to top,
+ // as AccessCoin on a higher cache may affect caches below it.
+ for (unsigned sim_idx = 1; sim_idx <= caches.size(); ++sim_idx) {
+ auto& cache = *caches[sim_idx - 1];
+ size_t cache_size = 0;
+
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ cache_size += cache.HaveCoinInCache(data.outpoints[outpointidx]);
+ const auto& real = cache.AccessCoin(data.outpoints[outpointidx]);
+ auto sim = lookup(outpointidx, sim_idx);
+ if (!sim.has_value()) {
+ assert(real.IsSpent());
+ } else {
+ assert(!real.IsSpent());
+ assert(real.out == data.coins[sim->first].out);
+ assert(real.fCoinBase == data.coins[sim->first].fCoinBase);
+ assert(real.nHeight == sim->second);
+ }
+ }
+
+ // HaveCoinInCache ignores spent coins, so GetCacheSize() may exceed it. */
+ assert(cache.GetCacheSize() >= cache_size);
+ }
+
+ // Compare the bottom coinsview (not a CCoinsViewCache) with sim_cache[0].
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ Coin realcoin;
+ bool real = bottom.GetCoin(data.outpoints[outpointidx], realcoin);
+ auto sim = lookup(outpointidx, 0);
+ if (!sim.has_value()) {
+ assert(!real || realcoin.IsSpent());
+ } else {
+ assert(real && !realcoin.IsSpent());
+ assert(realcoin.out == data.coins[sim->first].out);
+ assert(realcoin.fCoinBase == data.coins[sim->first].fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ }
+}
diff --git a/src/test/fuzz/crypto_chacha20.cpp b/src/test/fuzz/crypto_chacha20.cpp
index 3f552a8cda..3fa445096a 100644
--- a/src/test/fuzz/crypto_chacha20.cpp
+++ b/src/test/fuzz/crypto_chacha20.cpp
@@ -6,6 +6,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <test/util/xoroshiro128plusplus.h>
#include <cstdint>
#include <vector>
@@ -16,21 +17,21 @@ FUZZ_TARGET(crypto_chacha20)
ChaCha20 chacha20;
if (fuzzed_data_provider.ConsumeBool()) {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20 = ChaCha20{key.data(), key.size()};
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20 = ChaCha20{key.data()};
}
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
CallOneOf(
fuzzed_data_provider,
[&] {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20.SetKey(key.data(), key.size());
+ std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20.SetKey32(key.data());
},
[&] {
chacha20.SetIV(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
},
[&] {
- chacha20.Seek(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
+ chacha20.Seek64(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
},
[&] {
std::vector<uint8_t> output(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
@@ -43,3 +44,110 @@ FUZZ_TARGET(crypto_chacha20)
});
}
}
+
+namespace
+{
+
+/** Fuzzer that invokes ChaCha20::Crypt() or ChaCha20::Keystream multiple times:
+ once for a large block at once, and then the same data in chunks, comparing
+ the outcome.
+
+ If UseCrypt, seeded Xoroshiro128++ output is used as input to Crypt().
+ If not, Keystream() is used directly, or sequences of 0x00 are encrypted.
+*/
+template<bool UseCrypt>
+void ChaCha20SplitFuzz(FuzzedDataProvider& provider)
+{
+ // Determine key, iv, start position, length.
+ unsigned char key[32] = {0};
+ auto key_bytes = provider.ConsumeBytes<unsigned char>(32);
+ std::copy(key_bytes.begin(), key_bytes.end(), key);
+ uint64_t iv = provider.ConsumeIntegral<uint64_t>();
+ uint64_t total_bytes = provider.ConsumeIntegralInRange<uint64_t>(0, 1000000);
+ /* ~x = 2^64 - 1 - x, so ~(total_bytes >> 6) is the maximal seek position. */
+ uint64_t seek = provider.ConsumeIntegralInRange<uint64_t>(0, ~(total_bytes >> 6));
+
+ // Initialize two ChaCha20 ciphers, with the same key/iv/position.
+ ChaCha20 crypt1(key);
+ ChaCha20 crypt2(key);
+ crypt1.SetIV(iv);
+ crypt1.Seek64(seek);
+ crypt2.SetIV(iv);
+ crypt2.Seek64(seek);
+
+ // Construct vectors with data.
+ std::vector<unsigned char> data1, data2;
+ data1.resize(total_bytes);
+ data2.resize(total_bytes);
+
+ // If using Crypt(), initialize data1 and data2 with the same Xoroshiro128++ based
+ // stream.
+ if constexpr (UseCrypt) {
+ uint64_t seed = provider.ConsumeIntegral<uint64_t>();
+ XoRoShiRo128PlusPlus rng(seed);
+ uint64_t bytes = 0;
+ while (bytes < (total_bytes & ~uint64_t{7})) {
+ uint64_t val = rng();
+ WriteLE64(data1.data() + bytes, val);
+ WriteLE64(data2.data() + bytes, val);
+ bytes += 8;
+ }
+ if (bytes < total_bytes) {
+ unsigned char valbytes[8];
+ uint64_t val = rng();
+ WriteLE64(valbytes, val);
+ std::copy(valbytes, valbytes + (total_bytes - bytes), data1.data() + bytes);
+ std::copy(valbytes, valbytes + (total_bytes - bytes), data2.data() + bytes);
+ }
+ }
+
+ // Whether UseCrypt is used or not, the two byte arrays must match.
+ assert(data1 == data2);
+
+ // Encrypt data1, the whole array at once.
+ if constexpr (UseCrypt) {
+ crypt1.Crypt(data1.data(), data1.data(), total_bytes);
+ } else {
+ crypt1.Keystream(data1.data(), total_bytes);
+ }
+
+ // Encrypt data2, in at most 256 chunks.
+ uint64_t bytes2 = 0;
+ int iter = 0;
+ while (true) {
+ bool is_last = (iter == 255) || (bytes2 == total_bytes) || provider.ConsumeBool();
+ ++iter;
+ // Determine how many bytes to encrypt in this chunk: a fuzzer-determined
+ // amount for all but the last chunk (which processes all remaining bytes).
+ uint64_t now = is_last ? total_bytes - bytes2 :
+ provider.ConsumeIntegralInRange<uint64_t>(0, total_bytes - bytes2);
+ // For each chunk, consider using Crypt() even when UseCrypt is false.
+ // This tests that Keystream() has the same behavior as Crypt() applied
+ // to 0x00 input bytes.
+ if (UseCrypt || provider.ConsumeBool()) {
+ crypt2.Crypt(data2.data() + bytes2, data2.data() + bytes2, now);
+ } else {
+ crypt2.Keystream(data2.data() + bytes2, now);
+ }
+ bytes2 += now;
+ if (is_last) break;
+ }
+ // We should have processed everything now.
+ assert(bytes2 == total_bytes);
+ // And the result should match.
+ assert(data1 == data2);
+}
+
+} // namespace
+
+FUZZ_TARGET(chacha20_split_crypt)
+{
+ FuzzedDataProvider provider{buffer.data(), buffer.size()};
+ ChaCha20SplitFuzz<true>(provider);
+}
+
+FUZZ_TARGET(chacha20_split_keystream)
+{
+ FuzzedDataProvider provider{buffer.data(), buffer.size()};
+ ChaCha20SplitFuzz<false>(provider);
+}
diff --git a/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp b/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
index 1b89d55773..78fee48de6 100644
--- a/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
+++ b/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
@@ -267,32 +267,33 @@ void ECRYPT_keystream_bytes(ECRYPT_ctx* x, u8* stream, u32 bytes)
FUZZ_TARGET(crypto_diff_fuzz_chacha20)
{
+ static const unsigned char ZEROKEY[32] = {0};
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
ChaCha20 chacha20;
ECRYPT_ctx ctx;
- // D. J. Bernstein doesn't initialise ctx to 0 while Bitcoin Core initialises chacha20 to 0 in the constructor
- for (int i = 0; i < 16; i++) {
- ctx.input[i] = 0;
- }
if (fuzzed_data_provider.ConsumeBool()) {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20 = ChaCha20{key.data(), key.size()};
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20 = ChaCha20{key.data()};
ECRYPT_keysetup(&ctx, key.data(), key.size() * 8, 0);
- // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey() does
- uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
- ECRYPT_ivsetup(&ctx, iv);
+ } else {
+ // The default ChaCha20 constructor is equivalent to using the all-0 key.
+ ECRYPT_keysetup(&ctx, ZEROKEY, 256, 0);
}
+ // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey32() does
+ static const uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ ECRYPT_ivsetup(&ctx, iv);
+
LIMITED_WHILE (fuzzed_data_provider.ConsumeBool(), 3000) {
CallOneOf(
fuzzed_data_provider,
[&] {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20.SetKey(key.data(), key.size());
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20.SetKey32(key.data());
ECRYPT_keysetup(&ctx, key.data(), key.size() * 8, 0);
- // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey() does
+ // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey32() does
uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
ECRYPT_ivsetup(&ctx, iv);
},
@@ -304,26 +305,32 @@ FUZZ_TARGET(crypto_diff_fuzz_chacha20)
},
[&] {
uint64_t counter = fuzzed_data_provider.ConsumeIntegral<uint64_t>();
- chacha20.Seek(counter);
+ chacha20.Seek64(counter);
ctx.input[12] = counter;
ctx.input[13] = counter >> 32;
},
[&] {
uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ // DJB's version seeks forward to a multiple of 64 bytes after every operation. Correct for that.
+ uint64_t pos = ctx.input[12] + (((uint64_t)ctx.input[13]) << 32) + ((integralInRange + 63) >> 6);
std::vector<uint8_t> output(integralInRange);
chacha20.Keystream(output.data(), output.size());
std::vector<uint8_t> djb_output(integralInRange);
ECRYPT_keystream_bytes(&ctx, djb_output.data(), djb_output.size());
assert(output == djb_output);
+ chacha20.Seek64(pos);
},
[&] {
uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ // DJB's version seeks forward to a multiple of 64 bytes after every operation. Correct for that.
+ uint64_t pos = ctx.input[12] + (((uint64_t)ctx.input[13]) << 32) + ((integralInRange + 63) >> 6);
std::vector<uint8_t> output(integralInRange);
const std::vector<uint8_t> input = ConsumeFixedLengthByteVector(fuzzed_data_provider, output.size());
chacha20.Crypt(input.data(), output.data(), input.size());
std::vector<uint8_t> djb_output(integralInRange);
ECRYPT_encrypt_bytes(&ctx, input.data(), djb_output.data(), input.size());
assert(output == djb_output);
+ chacha20.Seek64(pos);
});
}
}
diff --git a/src/test/fuzz/http_request.cpp b/src/test/fuzz/http_request.cpp
index 66a1ff945f..9928c4a1ab 100644
--- a/src/test/fuzz/http_request.cpp
+++ b/src/test/fuzz/http_request.cpp
@@ -59,7 +59,7 @@ FUZZ_TARGET(http_request)
const std::string body = http_request.ReadBody();
assert(body.empty());
const CService service = http_request.GetPeer();
- assert(service.ToString() == "[::]:0");
+ assert(service.ToStringAddrPort() == "[::]:0");
evbuffer_free(evbuf);
evhttp_request_free(evreq);
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 7965f90dc7..c0aefe6067 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -152,7 +152,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
const CScriptID script_id{u160};
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
uint256 deserialized_u256;
stream << u256;
@@ -217,7 +217,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
}
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
ser_writedata64(stream, u64);
const uint64_t deserialized_u64 = ser_readdata64(stream);
@@ -245,7 +245,7 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
}
{
- CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream stream{};
WriteCompactSize(stream, u64);
try {
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
index e83606f032..ea6883c08d 100644
--- a/src/test/fuzz/key.cpp
+++ b/src/test/fuzz/key.cpp
@@ -111,7 +111,7 @@ FUZZ_TARGET_INIT(key, initialize_key)
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
pubkey.Serialize(data_stream);
CPubKey pubkey_deserialized;
diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp
index 1d6a8d89e4..73096cd5ca 100644
--- a/src/test/fuzz/miniscript.cpp
+++ b/src/test/fuzz/miniscript.cpp
@@ -14,14 +14,25 @@
namespace {
-//! Some pre-computed data for more efficient string roundtrips.
+//! Some pre-computed data for more efficient string roundtrips and to simulate challenges.
struct TestData {
typedef CPubKey Key;
- // Precomputed public keys.
+ // Precomputed public keys, and a dummy signature for each of them.
std::vector<Key> dummy_keys;
std::map<Key, int> dummy_key_idx_map;
std::map<CKeyID, Key> dummy_keys_map;
+ std::map<Key, std::pair<std::vector<unsigned char>, bool>> dummy_sigs;
+
+ // Precomputed hashes of each kind.
+ std::vector<std::vector<unsigned char>> sha256;
+ std::vector<std::vector<unsigned char>> ripemd160;
+ std::vector<std::vector<unsigned char>> hash256;
+ std::vector<std::vector<unsigned char>> hash160;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> sha256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> ripemd160_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash160_preimages;
//! Set the precomputed data.
void Init() {
@@ -35,6 +46,28 @@ struct TestData {
dummy_keys.push_back(pubkey);
dummy_key_idx_map.emplace(pubkey, i);
dummy_keys_map.insert({pubkey.GetID(), pubkey});
+
+ std::vector<unsigned char> sig;
+ privkey.Sign(uint256S(""), sig);
+ sig.push_back(1); // SIGHASH_ALL
+ dummy_sigs.insert({pubkey, {sig, i & 1}});
+
+ std::vector<unsigned char> hash;
+ hash.resize(32);
+ CSHA256().Write(keydata, 32).Finalize(hash.data());
+ sha256.push_back(hash);
+ if (i & 1) sha256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash256().Write(keydata).Finalize(hash);
+ hash256.push_back(hash);
+ if (i & 1) hash256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ hash.resize(20);
+ CRIPEMD160().Write(keydata, 32).Finalize(hash.data());
+ assert(hash.size() == 20);
+ ripemd160.push_back(hash);
+ if (i & 1) ripemd160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash160().Write(keydata).Finalize(hash);
+ hash160.push_back(hash);
+ if (i & 1) hash160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
}
}
} TEST_DATA;
@@ -59,6 +92,17 @@ struct ParserContext {
return HexStr(Span{&idx, 1});
}
+ std::vector<unsigned char> ToPKBytes(const Key& key) const
+ {
+ return {key.begin(), key.end()};
+ }
+
+ std::vector<unsigned char> ToPKHBytes(const Key& key) const
+ {
+ const auto h = Hash160(key);
+ return {h.begin(), h.end()};
+ }
+
template<typename I>
std::optional<Key> FromString(I first, I last) const {
if (last - first != 2) return {};
@@ -69,7 +113,7 @@ struct ParserContext {
template<typename I>
std::optional<Key> FromPKBytes(I first, I last) const {
- Key key;
+ CPubKey key;
key.Set(first, last);
if (!key.IsValid()) return {};
return key;
@@ -104,7 +148,7 @@ struct ScriptParserContext {
return key.data;
}
- const std::vector<unsigned char> ToPKHBytes(const Key& key) const
+ std::vector<unsigned char> ToPKHBytes(const Key& key) const
{
if (key.is_hash) return key.data;
const auto h = Hash160(key.data);
@@ -130,6 +174,735 @@ struct ScriptParserContext {
}
} SCRIPT_PARSER_CONTEXT;
+//! Context to produce a satisfaction for a Miniscript node using the pre-computed data.
+struct SatisfierContext: ParserContext {
+ // Timelock challenges satisfaction. Make the value (deterministically) vary to explore different
+ // paths.
+ bool CheckAfter(uint32_t value) const { return value % 2; }
+ bool CheckOlder(uint32_t value) const { return value % 2; }
+
+ // Signature challenges fulfilled with a dummy signature, if it was one of our dummy keys.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return miniscript::Availability::NO;
+ if (it->second.second) {
+ // Key is "available"
+ sig = it->second.first;
+ return miniscript::Availability::YES;
+ } else {
+ return miniscript::Availability::NO;
+ }
+ }
+
+ //! Lookup generalization for all the hash satisfactions below
+ miniscript::Availability LookupHash(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage,
+ const std::map<std::vector<unsigned char>, std::vector<unsigned char>>& map) const
+ {
+ const auto it = map.find(hash);
+ if (it == map.end()) return miniscript::Availability::NO;
+ preimage = it->second;
+ return miniscript::Availability::YES;
+ }
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.sha256_preimages);
+ }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.ripemd160_preimages);
+ }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash256_preimages);
+ }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash160_preimages);
+ }
+} SATISFIER_CTX;
+
+//! Context to check a satisfaction against the pre-computed data.
+struct CheckerContext: BaseSignatureChecker {
+ TestData *test_data;
+
+ // Signature checker methods. Checks the right dummy signature is used.
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& vchPubKey,
+ const CScript& scriptCode, SigVersion sigversion) const override
+ {
+ const CPubKey key{vchPubKey};
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return false;
+ return it->second.first == sig;
+ }
+ bool CheckLockTime(const CScriptNum& nLockTime) const override { return nLockTime.GetInt64() & 1; }
+ bool CheckSequence(const CScriptNum& nSequence) const override { return nSequence.GetInt64() & 1; }
+} CHECKER_CTX;
+
+//! Context to check for duplicates when instancing a Node.
+struct KeyComparator {
+ bool KeyCompare(const CPubKey& a, const CPubKey& b) const {
+ return a < b;
+ }
+} KEY_COMP;
+
+// A dummy scriptsig to pass to VerifyScript (we always use Segwit v0).
+const CScript DUMMY_SCRIPTSIG;
+
+using Fragment = miniscript::Fragment;
+using NodeRef = miniscript::NodeRef<CPubKey>;
+using Node = miniscript::Node<CPubKey>;
+using Type = miniscript::Type;
+// https://github.com/llvm/llvm-project/issues/53444
+// NOLINTNEXTLINE(misc-unused-using-decls)
+using miniscript::operator"" _mst;
+
+//! Construct a miniscript node as a shared_ptr.
+template<typename... Args> NodeRef MakeNodeRef(Args&&... args) {
+ return miniscript::MakeNodeRef<CPubKey>(miniscript::internal::NoDupCheck{}, std::forward<Args>(args)...);
+}
+
+/** Information about a yet to be constructed Miniscript node. */
+struct NodeInfo {
+ //! The type of this node
+ Fragment fragment;
+ //! Number of subs of this node
+ uint8_t n_subs;
+ //! The timelock value for older() and after(), the threshold value for multi() and thresh()
+ uint32_t k;
+ //! Keys for this node, if it has some
+ std::vector<CPubKey> keys;
+ //! The hash value for this node, if it has one
+ std::vector<unsigned char> hash;
+ //! The type requirements for the children of this node.
+ std::vector<Type> subtypes;
+
+ NodeInfo(Fragment frag): fragment(frag), n_subs(0), k(0) {}
+ NodeInfo(Fragment frag, CPubKey key): fragment(frag), n_subs(0), k(0), keys({key}) {}
+ NodeInfo(Fragment frag, uint32_t _k): fragment(frag), n_subs(0), k(_k) {}
+ NodeInfo(Fragment frag, std::vector<unsigned char> h): fragment(frag), n_subs(0), k(0), hash(std::move(h)) {}
+ NodeInfo(uint8_t subs, Fragment frag): fragment(frag), n_subs(subs), k(0), subtypes(subs, ""_mst) {}
+ NodeInfo(uint8_t subs, Fragment frag, uint32_t _k): fragment(frag), n_subs(subs), k(_k), subtypes(subs, ""_mst) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag): fragment(frag), n_subs(subt.size()), k(0), subtypes(std::move(subt)) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag, uint32_t _k): fragment(frag), n_subs(subt.size()), k(_k), subtypes(std::move(subt)) {}
+ NodeInfo(Fragment frag, uint32_t _k, std::vector<CPubKey> _keys): fragment(frag), n_subs(0), k(_k), keys(std::move(_keys)) {}
+};
+
+/** Pick an index in a collection from a single byte in the fuzzer's output. */
+template<typename T, typename A>
+T ConsumeIndex(FuzzedDataProvider& provider, A& col) {
+ const uint8_t i = provider.ConsumeIntegral<uint8_t>();
+ return col[i];
+}
+
+CPubKey ConsumePubKey(FuzzedDataProvider& provider) {
+ return ConsumeIndex<CPubKey>(provider, TEST_DATA.dummy_keys);
+}
+
+std::vector<unsigned char> ConsumeSha256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.sha256);
+}
+
+std::vector<unsigned char> ConsumeHash256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash256);
+}
+
+std::vector<unsigned char> ConsumeRipemd160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.ripemd160);
+}
+
+std::vector<unsigned char> ConsumeHash160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash160);
+}
+
+std::optional<uint32_t> ConsumeTimeLock(FuzzedDataProvider& provider) {
+ const uint32_t k = provider.ConsumeIntegral<uint32_t>();
+ if (k == 0 || k >= 0x80000000) return {};
+ return k;
+}
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This version is intended to have a fixed, stable, encoding for Miniscript nodes:
+ * - The first byte sets the type of the fragment. 0, 1 and all non-leaf fragments but thresh() are a
+ * single byte.
+ * - For the other leaf fragments, the following bytes depend on their type.
+ * - For older() and after(), the next 4 bytes define the timelock value.
+ * - For pk_k(), pk_h(), and all hashes, the next byte defines the index of the value in the test data.
+ * - For multi(), the next 2 bytes define respectively the threshold and the number of keys. Then as many
+ * bytes as the number of keys define the index of each key in the test data.
+ * - For thresh(), the next byte defines the threshold value and the following one the number of subs.
+ */
+std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider) {
+ switch (provider.ConsumeIntegral<uint8_t>()) {
+ case 0: return {{Fragment::JUST_0}};
+ case 1: return {{Fragment::JUST_1}};
+ case 2: return {{Fragment::PK_K, ConsumePubKey(provider)}};
+ case 3: return {{Fragment::PK_H, ConsumePubKey(provider)}};
+ case 4: {
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::OLDER, *k}};
+ }
+ case 5: {
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::AFTER, *k}};
+ }
+ case 6: return {{Fragment::SHA256, ConsumeSha256(provider)}};
+ case 7: return {{Fragment::HASH256, ConsumeHash256(provider)}};
+ case 8: return {{Fragment::RIPEMD160, ConsumeRipemd160(provider)}};
+ case 9: return {{Fragment::HASH160, ConsumeHash160(provider)}};
+ case 10: {
+ const auto k = provider.ConsumeIntegral<uint8_t>();
+ const auto n_keys = provider.ConsumeIntegral<uint8_t>();
+ if (n_keys > 20 || k == 0 || k > n_keys) return {};
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{Fragment::MULTI, k, std::move(keys)}};
+ }
+ case 11: return {{3, Fragment::ANDOR}};
+ case 12: return {{2, Fragment::AND_V}};
+ case 13: return {{2, Fragment::AND_B}};
+ case 15: return {{2, Fragment::OR_B}};
+ case 16: return {{2, Fragment::OR_C}};
+ case 17: return {{2, Fragment::OR_D}};
+ case 18: return {{2, Fragment::OR_I}};
+ case 19: {
+ auto k = provider.ConsumeIntegral<uint8_t>();
+ auto n_subs = provider.ConsumeIntegral<uint8_t>();
+ if (k == 0 || k > n_subs) return {};
+ return {{n_subs, Fragment::THRESH, k}};
+ }
+ case 20: return {{1, Fragment::WRAP_A}};
+ case 21: return {{1, Fragment::WRAP_S}};
+ case 22: return {{1, Fragment::WRAP_C}};
+ case 23: return {{1, Fragment::WRAP_D}};
+ case 24: return {{1, Fragment::WRAP_V}};
+ case 25: return {{1, Fragment::WRAP_J}};
+ case 26: return {{1, Fragment::WRAP_N}};
+ default:
+ break;
+ }
+ return {};
+}
+
+/* This structure contains a table which for each "target" Type a list of recipes
+ * to construct it, automatically inferred from the behavior of ComputeType.
+ * Note that the Types here are not the final types of the constructed Nodes, but
+ * just the subset that are required. For example, a recipe for the "Bo" type
+ * might construct a "Bondu" sha256() NodeInfo, but cannot construct a "Bz" older().
+ * Each recipe is a Fragment together with a list of required types for its subnodes.
+ */
+struct SmartInfo
+{
+ using recipe = std::pair<Fragment, std::vector<Type>>;
+ std::map<Type, std::vector<recipe>> table;
+
+ void Init()
+ {
+ /* Construct a set of interesting type requirements to reason with (sections of BKVWzondu). */
+ std::vector<Type> types;
+ for (int base = 0; base < 4; ++base) { /* select from B,K,V,W */
+ Type type_base = base == 0 ? "B"_mst : base == 1 ? "K"_mst : base == 2 ? "V"_mst : "W"_mst;
+ for (int zo = 0; zo < 3; ++zo) { /* select from z,o,(none) */
+ Type type_zo = zo == 0 ? "z"_mst : zo == 1 ? "o"_mst : ""_mst;
+ for (int n = 0; n < 2; ++n) { /* select from (none),n */
+ if (zo == 0 && n == 1) continue; /* z conflicts with n */
+ if (base == 3 && n == 1) continue; /* W conficts with n */
+ Type type_n = n == 0 ? ""_mst : "n"_mst;
+ for (int d = 0; d < 2; ++d) { /* select from (none),d */
+ if (base == 2 && d == 1) continue; /* V conflicts with d */
+ Type type_d = d == 0 ? ""_mst : "d"_mst;
+ for (int u = 0; u < 2; ++u) { /* select from (none),u */
+ if (base == 2 && u == 1) continue; /* V conflicts with u */
+ Type type_u = u == 0 ? ""_mst : "u"_mst;
+ Type type = type_base | type_zo | type_n | type_d | type_u;
+ types.push_back(type);
+ }
+ }
+ }
+ }
+ }
+
+ /* We define a recipe a to be a super-recipe of recipe b if they use the same
+ * fragment, the same number of subexpressions, and each of a's subexpression
+ * types is a supertype of the corresponding subexpression type of b.
+ * Within the set of recipes for the construction of a given type requirement,
+ * no recipe should be a super-recipe of another (as the super-recipe is
+ * applicable in every place the sub-recipe is, the sub-recipe is redundant). */
+ auto is_super_of = [](const recipe& a, const recipe& b) {
+ if (a.first != b.first) return false;
+ if (a.second.size() != b.second.size()) return false;
+ for (size_t i = 0; i < a.second.size(); ++i) {
+ if (!(b.second[i] << a.second[i])) return false;
+ }
+ return true;
+ };
+
+ /* Sort the type requirements. Subtypes will always sort later (e.g. Bondu will
+ * sort after Bo or Bu). As we'll be constructing recipes using these types, in
+ * order, in what follows, we'll construct super-recipes before sub-recipes.
+ * That means we never need to go back and delete a sub-recipe because a
+ * super-recipe got added. */
+ std::sort(types.begin(), types.end());
+
+ // Iterate over all possible fragments.
+ for (int fragidx = 0; fragidx <= int(Fragment::MULTI); ++fragidx) {
+ int sub_count = 0; //!< The minimum number of child nodes this recipe has.
+ int sub_range = 1; //!< The maximum number of child nodes for this recipe is sub_count+sub_range-1.
+ size_t data_size = 0;
+ size_t n_keys = 0;
+ uint32_t k = 0;
+ Fragment frag{fragidx};
+
+ // Based on the fragment, determine #subs/data/k/keys to pass to ComputeType. */
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ n_keys = 1;
+ break;
+ case Fragment::MULTI:
+ n_keys = 1;
+ k = 1;
+ break;
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ k = 1;
+ break;
+ case Fragment::SHA256:
+ case Fragment::HASH256:
+ data_size = 32;
+ break;
+ case Fragment::RIPEMD160:
+ case Fragment::HASH160:
+ data_size = 20;
+ break;
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ break;
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ sub_count = 1;
+ break;
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ sub_count = 2;
+ break;
+ case Fragment::ANDOR:
+ sub_count = 3;
+ break;
+ case Fragment::THRESH:
+ // Thresh logic is executed for 1 and 2 arguments. Larger numbers use ad-hoc code to extend.
+ sub_count = 1;
+ sub_range = 2;
+ k = 1;
+ break;
+ }
+
+ // Iterate over the number of subnodes (sub_count...sub_count+sub_range-1).
+ std::vector<Type> subt;
+ for (int subs = sub_count; subs < sub_count + sub_range; ++subs) {
+ // Iterate over the possible subnode types (at most 3).
+ for (Type x : types) {
+ for (Type y : types) {
+ for (Type z : types) {
+ // Compute the resulting type of a node with the selected fragment / subnode types.
+ subt.clear();
+ if (subs > 0) subt.push_back(x);
+ if (subs > 1) subt.push_back(y);
+ if (subs > 2) subt.push_back(z);
+ Type res = miniscript::internal::ComputeType(frag, x, y, z, subt, k, data_size, subs, n_keys);
+ // Continue if the result is not a valid node.
+ if ((res << "K"_mst) + (res << "V"_mst) + (res << "B"_mst) + (res << "W"_mst) != 1) continue;
+
+ recipe entry{frag, subt};
+ auto super_of_entry = [&](const recipe& rec) { return is_super_of(rec, entry); };
+ // Iterate over all supertypes of res (because if e.g. our selected fragment/subnodes result
+ // in a Bondu, they can form a recipe that is also applicable for constructing a B, Bou, Bdu, ...).
+ for (Type s : types) {
+ if ((res & "BKVWzondu"_mst) << s) {
+ auto& recipes = table[s];
+ // If we don't already have a super-recipe to the new one, add it.
+ if (!std::any_of(recipes.begin(), recipes.end(), super_of_entry)) {
+ recipes.push_back(entry);
+ }
+ }
+ }
+
+ if (subs <= 2) break;
+ }
+ if (subs <= 1) break;
+ }
+ if (subs <= 0) break;
+ }
+ }
+ }
+
+ /* Find which types are useful. The fuzzer logic only cares about constructing
+ * B,V,K,W nodes, so any type that isn't needed in any recipe (directly or
+ * indirectly) for the construction of those is uninteresting. */
+ std::set<Type> useful_types{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = useful_types.size();
+ for (const auto& [type, recipes] : table) {
+ if (useful_types.count(type) != 0) {
+ for (const auto& [_, subtypes] : recipes) {
+ for (auto subtype : subtypes) useful_types.insert(subtype);
+ }
+ }
+ }
+ if (useful_types.size() == set_size) break;
+ }
+ // Remove all rules that construct uninteresting types.
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ if (useful_types.count(type_it->first) == 0) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ /* Find which types are constructible. A type is constructible if there is a leaf
+ * node recipe for constructing it, or a recipe whose subnodes are all constructible.
+ * Types can be non-constructible because they have no recipes to begin with,
+ * because they can only be constructed using recipes that involve otherwise
+ * non-constructible types, or because they require infinite recursion. */
+ std::set<Type> constructible_types{};
+ auto known_constructible = [&](Type type) { return constructible_types.count(type) != 0; };
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = constructible_types.size();
+ // Iterate over all types we have recipes for.
+ for (const auto& [type, recipes] : table) {
+ if (!known_constructible(type)) {
+ // For not (yet known to be) constructible types, iterate over their recipes.
+ for (const auto& [_, subt] : recipes) {
+ // If any recipe involves only (already known to be) constructible types,
+ // add the recipe's type to the set.
+ if (std::all_of(subt.begin(), subt.end(), known_constructible)) {
+ constructible_types.insert(type);
+ break;
+ }
+ }
+ }
+ }
+ if (constructible_types.size() == set_size) break;
+ }
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ // Remove all recipes which involve non-constructible types.
+ type_it->second.erase(std::remove_if(type_it->second.begin(), type_it->second.end(),
+ [&](const recipe& rec) {
+ return !std::all_of(rec.second.begin(), rec.second.end(), known_constructible);
+ }), type_it->second.end());
+ // Delete types entirely which have no recipes left.
+ if (type_it->second.empty()) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ for (auto& [type, recipes] : table) {
+ // Sort recipes for determinism, and place those using fewer subnodes first.
+ // This avoids runaway expansion (when reaching the end of the fuzz input,
+ // all zeroes are read, resulting in the first available recipe being picked).
+ std::sort(recipes.begin(), recipes.end(),
+ [](const recipe& a, const recipe& b) {
+ if (a.second.size() < b.second.size()) return true;
+ if (a.second.size() > b.second.size()) return false;
+ return a < b;
+ }
+ );
+ }
+ }
+} SMARTINFO;
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This is similar to ConsumeNodeStable, but uses a precomputed table with permitted
+ * fragments/subnode type for each required type. It is intended to more quickly explore
+ * interesting miniscripts, at the cost of higher implementation complexity (which could
+ * cause it miss things if incorrect), and with less regard for stability of the seeds
+ * (as improvements to the tables or changes to the typing rules could invalidate
+ * everything).
+ */
+std::optional<NodeInfo> ConsumeNodeSmart(FuzzedDataProvider& provider, Type type_needed) {
+ /** Table entry for the requested type. */
+ auto recipes_it = SMARTINFO.table.find(type_needed);
+ assert(recipes_it != SMARTINFO.table.end());
+ /** Pick one recipe from the available ones for that type. */
+ const auto& [frag, subt] = PickValue(provider, recipes_it->second);
+
+ // Based on the fragment the recipe uses, fill in other data (k, keys, data).
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ return {{frag, ConsumePubKey(provider)}};
+ case Fragment::MULTI: {
+ const auto n_keys = provider.ConsumeIntegralInRange<uint8_t>(1, 20);
+ const auto k = provider.ConsumeIntegralInRange<uint8_t>(1, n_keys);
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{frag, k, std::move(keys)}};
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return {{frag, provider.ConsumeIntegralInRange<uint32_t>(1, 0x7FFFFFF)}};
+ case Fragment::SHA256:
+ return {{frag, PickValue(provider, TEST_DATA.sha256)}};
+ case Fragment::HASH256:
+ return {{frag, PickValue(provider, TEST_DATA.hash256)}};
+ case Fragment::RIPEMD160:
+ return {{frag, PickValue(provider, TEST_DATA.ripemd160)}};
+ case Fragment::HASH160:
+ return {{frag, PickValue(provider, TEST_DATA.hash160)}};
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ case Fragment::ANDOR:
+ return {{subt, frag}};
+ case Fragment::THRESH: {
+ uint32_t children;
+ if (subt.size() < 2) {
+ children = subt.size();
+ } else {
+ // If we hit a thresh with 2 subnodes, artificially extend it to any number
+ // (2 or larger) by replicating the type of the last subnode.
+ children = provider.ConsumeIntegralInRange<uint32_t>(2, MAX_OPS_PER_SCRIPT / 2);
+ }
+ auto k = provider.ConsumeIntegralInRange<uint32_t>(1, children);
+ std::vector<Type> subs = subt;
+ while (subs.size() < children) subs.push_back(subs.back());
+ return {{std::move(subs), frag, k}};
+ }
+ }
+
+ assert(false);
+}
+
+/**
+ * Generate a Miniscript node based on the fuzzer's input.
+ *
+ * - ConsumeNode is a function object taking a Type, and returning an std::optional<NodeInfo>.
+ * - root_type is the required type properties of the constructed NodeRef.
+ * - strict_valid sets whether ConsumeNode is expected to guarantee a NodeInfo that results in
+ * a NodeRef whose Type() matches the type fed to ConsumeNode.
+ */
+template<typename F>
+NodeRef GenNode(F ConsumeNode, Type root_type = ""_mst, bool strict_valid = false) {
+ /** A stack of miniscript Nodes being built up. */
+ std::vector<NodeRef> stack;
+ /** The queue of instructions. */
+ std::vector<std::pair<Type, std::optional<NodeInfo>>> todo{{root_type, {}}};
+
+ while (!todo.empty()) {
+ // The expected type we have to construct.
+ auto type_needed = todo.back().first;
+ if (!todo.back().second) {
+ // Fragment/children have not been decided yet. Decide them.
+ auto node_info = ConsumeNode(type_needed);
+ if (!node_info) return {};
+ auto subtypes = node_info->subtypes;
+ todo.back().second = std::move(node_info);
+ todo.reserve(todo.size() + subtypes.size());
+ // As elements on the todo stack are processed back to front, construct
+ // them in reverse order (so that the first subnode is generated first).
+ for (size_t i = 0; i < subtypes.size(); ++i) {
+ todo.emplace_back(*(subtypes.rbegin() + i), std::nullopt);
+ }
+ } else {
+ // The back of todo has fragment and number of children decided, and
+ // those children have been constructed at the back of stack. Pop
+ // that entry off todo, and use it to construct a new NodeRef on
+ // stack.
+ NodeInfo& info = *todo.back().second;
+ // Gather children from the back of stack.
+ std::vector<NodeRef> sub;
+ sub.reserve(info.n_subs);
+ for (size_t i = 0; i < info.n_subs; ++i) {
+ sub.push_back(std::move(*(stack.end() - info.n_subs + i)));
+ }
+ stack.erase(stack.end() - info.n_subs, stack.end());
+ // Construct new NodeRef.
+ NodeRef node;
+ if (info.keys.empty()) {
+ node = MakeNodeRef(info.fragment, std::move(sub), std::move(info.hash), info.k);
+ } else {
+ assert(sub.empty());
+ assert(info.hash.empty());
+ node = MakeNodeRef(info.fragment, std::move(info.keys), info.k);
+ }
+ // Verify acceptability.
+ if (!node || !(node->GetType() << type_needed)) {
+ assert(!strict_valid);
+ return {};
+ }
+ if (!node->IsValid()) return {};
+ // Move it to the stack.
+ stack.push_back(std::move(node));
+ todo.pop_back();
+ }
+ }
+ assert(stack.size() == 1);
+ stack[0]->DuplicateKeyCheck(KEY_COMP);
+ return std::move(stack[0]);
+}
+
+/** Perform various applicable tests on a miniscript Node. */
+void TestNode(const NodeRef& node, FuzzedDataProvider& provider)
+{
+ if (!node) return;
+
+ // Check that it roundtrips to text representation
+ std::optional<std::string> str{node->ToString(PARSER_CTX)};
+ assert(str);
+ auto parsed = miniscript::FromString(*str, PARSER_CTX);
+ assert(parsed);
+ assert(*parsed == *node);
+
+ // Check consistency between script size estimation and real size.
+ auto script = node->ToScript(PARSER_CTX);
+ assert(node->ScriptSize() == script.size());
+
+ // Check consistency of "x" property with the script (type K is excluded, because it can end
+ // with a push of a key, which could match these opcodes).
+ if (!(node->GetType() << "K"_mst)) {
+ bool ends_in_verify = !(node->GetType() << "x"_mst);
+ assert(ends_in_verify == (script.back() == OP_CHECKSIG || script.back() == OP_CHECKMULTISIG || script.back() == OP_EQUAL));
+ }
+
+ // The rest of the checks only apply when testing a valid top-level script.
+ if (!node->IsValidTopLevel()) return;
+
+ // Check roundtrip to script
+ auto decoded = miniscript::FromScript(script, PARSER_CTX);
+ assert(decoded);
+ // Note we can't use *decoded == *node because the miniscript representation may differ, so we check that:
+ // - The script corresponding to that decoded form matchs exactly
+ // - The type matches exactly
+ assert(decoded->ToScript(PARSER_CTX) == script);
+ assert(decoded->GetType() == node->GetType());
+
+ if (provider.ConsumeBool() && node->GetOps() < MAX_OPS_PER_SCRIPT && node->ScriptSize() < MAX_STANDARD_P2WSH_SCRIPT_SIZE) {
+ // Optionally pad the script with OP_NOPs to max op the ops limit of the constructed script.
+ // This makes the script obviously not actually miniscript-compatible anymore, but the
+ // signatures constructed in this test don't commit to the script anyway, so the same
+ // miniscript satisfier will work. This increases the sensitivity of the test to the ops
+ // counting logic being too low, especially for simple scripts.
+ // Do this optionally because we're not solely interested in cases where the number of ops is
+ // maximal.
+ // Do not pad more than what would cause MAX_STANDARD_P2WSH_SCRIPT_SIZE to be reached, however,
+ // as that also invalidates scripts.
+ int add = std::min<int>(
+ MAX_OPS_PER_SCRIPT - node->GetOps(),
+ MAX_STANDARD_P2WSH_SCRIPT_SIZE - node->ScriptSize());
+ for (int i = 0; i < add; ++i) script.push_back(OP_NOP);
+ }
+
+ // Run malleable satisfaction algorithm.
+ const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script);
+ CScriptWitness witness_mal;
+ const bool mal_success = node->Satisfy(SATISFIER_CTX, witness_mal.stack, false) == miniscript::Availability::YES;
+ witness_mal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ // Run non-malleable satisfaction algorithm.
+ CScriptWitness witness_nonmal;
+ const bool nonmal_success = node->Satisfy(SATISFIER_CTX, witness_nonmal.stack, true) == miniscript::Availability::YES;
+ witness_nonmal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ if (nonmal_success) {
+ // Non-malleable satisfactions are bounded by GetStackSize().
+ assert(witness_nonmal.stack.size() <= node->GetStackSize());
+ // If a non-malleable satisfaction exists, the malleable one must also exist, and be identical to it.
+ assert(mal_success);
+ assert(witness_nonmal.stack == witness_mal.stack);
+
+ // Test non-malleable satisfaction.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions().
+ if (node->ValidSatisfactions()) assert(res);
+ // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed),
+ // or with a stack size error (if CheckStackSize check failed).
+ assert(res ||
+ (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) ||
+ (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE));
+ }
+
+ if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) {
+ // Test malleable satisfaction only if it's different from the non-malleable one.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only
+ // fail due to stack or ops limits.
+ assert(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE);
+ }
+
+ if (node->IsSane()) {
+ // For sane nodes, the two algorithms behave identically.
+ assert(mal_success == nonmal_success);
+ }
+
+ // Verify that if a node is policy-satisfiable, the malleable satisfaction
+ // algorithm succeeds. Given that under IsSane() both satisfactions
+ // are identical, this implies that for such nodes, the non-malleable
+ // satisfaction will also match the expected policy.
+ bool satisfiable = node->IsSatisfiable([](const Node& node) -> bool {
+ switch (node.fragment) {
+ case Fragment::PK_K:
+ case Fragment::PK_H: {
+ auto it = TEST_DATA.dummy_sigs.find(node.keys[0]);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ return it->second.second;
+ }
+ case Fragment::MULTI: {
+ size_t sats = 0;
+ for (const auto& key : node.keys) {
+ auto it = TEST_DATA.dummy_sigs.find(key);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ sats += it->second.second;
+ }
+ return sats >= node.k;
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return node.k & 1;
+ case Fragment::SHA256:
+ return TEST_DATA.sha256_preimages.count(node.data);
+ case Fragment::HASH256:
+ return TEST_DATA.hash256_preimages.count(node.data);
+ case Fragment::RIPEMD160:
+ return TEST_DATA.ripemd160_preimages.count(node.data);
+ case Fragment::HASH160:
+ return TEST_DATA.hash160_preimages.count(node.data);
+ default:
+ assert(false);
+ }
+ return false;
+ });
+ assert(mal_success == satisfiable);
+}
+
} // namespace
void FuzzInit()
@@ -138,6 +911,33 @@ void FuzzInit()
TEST_DATA.Init();
}
+void FuzzInitSmart()
+{
+ FuzzInit();
+ SMARTINFO.Init();
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeStable. */
+FUZZ_TARGET_INIT(miniscript_stable, FuzzInit)
+{
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type) {
+ return ConsumeNodeStable(provider);
+ }), provider);
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeSmart. */
+FUZZ_TARGET_INIT(miniscript_smart, FuzzInitSmart)
+{
+ /** The set of types we aim to construct nodes for. Together they cover all. */
+ static constexpr std::array<Type, 4> BASE_TYPES{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type needed_type) {
+ return ConsumeNodeSmart(provider, needed_type);
+ }, PickValue(provider, BASE_TYPES), true), provider);
+}
+
/* Fuzz tests that test parsing from a string, and roundtripping via string. */
FUZZ_TARGET_INIT(miniscript_string, FuzzInit)
{
diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp
index d61aef6d81..049ae02f4d 100644
--- a/src/test/fuzz/netaddress.cpp
+++ b/src/test/fuzz/netaddress.cpp
@@ -70,8 +70,7 @@ FUZZ_TARGET(netaddress)
assert(net_addr.GetNetwork() == Network::NET_ONION);
}
(void)net_addr.IsValid();
- (void)net_addr.ToString();
- (void)net_addr.ToStringIP();
+ (void)net_addr.ToStringAddr();
const CSubNet sub_net{net_addr, fuzzed_data_provider.ConsumeIntegral<uint8_t>()};
(void)sub_net.IsValid();
@@ -80,9 +79,7 @@ FUZZ_TARGET(netaddress)
const CService service{net_addr, fuzzed_data_provider.ConsumeIntegral<uint16_t>()};
(void)service.GetKey();
(void)service.GetPort();
- (void)service.ToString();
- (void)service.ToStringIPPort();
- (void)service.ToStringPort();
+ (void)service.ToStringAddrPort();
(void)CServiceHash()(service);
(void)CServiceHash(0, 0)(service);
diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp
new file mode 100644
index 0000000000..f8ba4f08d9
--- /dev/null
+++ b/src/test/fuzz/partially_downloaded_block.cpp
@@ -0,0 +1,142 @@
+#include <blockencodings.h>
+#include <consensus/merkle.h>
+#include <consensus/validation.h>
+#include <primitives/block.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <test/fuzz/util/mempool.h>
+#include <test/util/setup_common.h>
+#include <test/util/txmempool.h>
+#include <txmempool.h>
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <optional>
+#include <set>
+#include <vector>
+
+namespace {
+const TestingSetup* g_setup;
+} // namespace
+
+void initialize_pdb()
+{
+ static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>();
+ g_setup = testing_setup.get();
+}
+
+PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional<BlockValidationResult> result)
+{
+ return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) {
+ if (result) {
+ return state.Invalid(*result);
+ }
+
+ return true;
+ };
+}
+
+FUZZ_TARGET_INIT(partially_downloaded_block, initialize_pdb)
+{
+ FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
+
+ auto block{ConsumeDeserializable<CBlock>(fuzzed_data_provider)};
+ if (!block || block->vtx.size() == 0 ||
+ block->vtx.size() >= std::numeric_limits<uint16_t>::max()) {
+ return;
+ }
+
+ CBlockHeaderAndShortTxIDs cmpctblock{*block};
+
+ CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node)};
+ PartiallyDownloadedBlock pdb{&pool};
+
+ // Set of available transactions (mempool or extra_txn)
+ std::set<uint16_t> available;
+ // The coinbase is always available
+ available.insert(0);
+
+ std::vector<std::pair<uint256, CTransactionRef>> extra_txn;
+ for (size_t i = 1; i < block->vtx.size(); ++i) {
+ auto tx{block->vtx[i]};
+
+ bool add_to_extra_txn{fuzzed_data_provider.ConsumeBool()};
+ bool add_to_mempool{fuzzed_data_provider.ConsumeBool()};
+
+ if (add_to_extra_txn) {
+ extra_txn.emplace_back(tx->GetWitnessHash(), tx);
+ available.insert(i);
+ }
+
+ if (add_to_mempool) {
+ LOCK2(cs_main, pool.cs);
+ pool.addUnchecked(ConsumeTxMemPoolEntry(fuzzed_data_provider, *tx));
+ available.insert(i);
+ }
+ }
+
+ auto init_status{pdb.InitData(cmpctblock, extra_txn)};
+
+ std::vector<CTransactionRef> missing;
+ // Whether we skipped a transaction that should be included in `missing`.
+ // FillBlock should never return READ_STATUS_OK if that is the case.
+ bool skipped_missing{false};
+ for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
+ // If init_status == READ_STATUS_OK then a available transaction in the
+ // compact block (i.e. IsTxAvailable(i) == true) implies that we marked
+ // that transaction as available above (i.e. available.count(i) > 0).
+ // The reverse is not true, due to possible compact block short id
+ // collisions (i.e. available.count(i) > 0 does not imply
+ // IsTxAvailable(i) == true).
+ if (init_status == READ_STATUS_OK) {
+ assert(!pdb.IsTxAvailable(i) || available.count(i) > 0);
+ }
+
+ bool skip{fuzzed_data_provider.ConsumeBool()};
+ if (!pdb.IsTxAvailable(i) && !skip) {
+ missing.push_back(block->vtx[i]);
+ }
+
+ skipped_missing |= (!pdb.IsTxAvailable(i) && skip);
+ }
+
+ // Mock CheckBlock
+ bool fail_check_block{fuzzed_data_provider.ConsumeBool()};
+ auto validation_result =
+ fuzzed_data_provider.PickValueInArray(
+ {BlockValidationResult::BLOCK_RESULT_UNSET,
+ BlockValidationResult::BLOCK_CONSENSUS,
+ BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE,
+ BlockValidationResult::BLOCK_CACHED_INVALID,
+ BlockValidationResult::BLOCK_INVALID_HEADER,
+ BlockValidationResult::BLOCK_MUTATED,
+ BlockValidationResult::BLOCK_MISSING_PREV,
+ BlockValidationResult::BLOCK_INVALID_PREV,
+ BlockValidationResult::BLOCK_TIME_FUTURE,
+ BlockValidationResult::BLOCK_CHECKPOINT,
+ BlockValidationResult::BLOCK_HEADER_LOW_WORK});
+ pdb.m_check_block_mock = FuzzedCheckBlock(
+ fail_check_block ?
+ std::optional<BlockValidationResult>{validation_result} :
+ std::nullopt);
+
+ CBlock reconstructed_block;
+ auto fill_status{pdb.FillBlock(reconstructed_block, missing)};
+ switch (fill_status) {
+ case READ_STATUS_OK:
+ assert(!skipped_missing);
+ assert(!fail_check_block);
+ assert(block->GetHash() == reconstructed_block.GetHash());
+ break;
+ case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]];
+ case READ_STATUS_FAILED:
+ assert(fail_check_block);
+ break;
+ case READ_STATUS_INVALID:
+ break;
+ }
+}
diff --git a/src/test/fuzz/prevector.cpp b/src/test/fuzz/prevector.cpp
index c8fd9aca30..9cea32e304 100644
--- a/src/test/fuzz/prevector.cpp
+++ b/src/test/fuzz/prevector.cpp
@@ -59,8 +59,8 @@ public:
--pos;
assert(v == real_vector[pos]);
}
- CDataStream ss1(SER_DISK, 0);
- CDataStream ss2(SER_DISK, 0);
+ DataStream ss1{};
+ DataStream ss2{};
ss1 << real_vector;
ss2 << pre_vector;
assert(ss1.size() == ss2.size());
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index 731e0d22e0..0a7924f226 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -56,7 +56,9 @@ void initialize_process_message()
{
Assert(GetNumMsgTypes() == getAllNetMessageTypes().size()); // If this fails, add or remove the message type below
- static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>();
+ static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(
+ /*chain_name=*/CBaseChainParams::REGTEST,
+ /*extra_args=*/{"-txreconciliation"});
g_setup = testing_setup.get();
for (int i = 0; i < 2 * COINBASE_MATURITY; i++) {
MineBlock(g_setup->m_node, CScript() << OP_TRUE);
diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp
index 465184d57d..96339743ba 100644
--- a/src/test/fuzz/process_messages.cpp
+++ b/src/test/fuzz/process_messages.cpp
@@ -23,7 +23,9 @@ const TestingSetup* g_setup;
void initialize_process_messages()
{
- static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>();
+ static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(
+ /*chain_name=*/CBaseChainParams::REGTEST,
+ /*extra_args=*/{"-txreconciliation"});
g_setup = testing_setup.get();
for (int i = 0; i < 2 * COINBASE_MATURITY; i++) {
MineBlock(g_setup->m_node, CScript() << OP_TRUE);
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index 361cfa6cb6..2578137471 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -253,7 +253,7 @@ std::string ConsumeScalarRPCArgument(FuzzedDataProvider& fuzzed_data_provider)
if (!opt_block_header) {
return;
}
- CDataStream data_stream{SER_NETWORK, PROTOCOL_VERSION};
+ DataStream data_stream{};
data_stream << *opt_block_header;
r = HexStr(data_stream);
},
diff --git a/src/test/fuzz/script_sign.cpp b/src/test/fuzz/script_sign.cpp
index 3cef81c251..c78c22e6cc 100644
--- a/src/test/fuzz/script_sign.cpp
+++ b/src/test/fuzz/script_sign.cpp
@@ -108,10 +108,12 @@ FUZZ_TARGET_INIT(script_sign, initialize_script_sign)
CMutableTransaction script_tx_to = tx_to;
CMutableTransaction sign_transaction_tx_to = tx_to;
if (n_in < tx_to.vin.size() && tx_to.vin[n_in].prevout.n < tx_from.vout.size()) {
- (void)SignSignature(provider, tx_from, tx_to, n_in, fuzzed_data_provider.ConsumeIntegral<int>());
+ SignatureData empty;
+ (void)SignSignature(provider, tx_from, tx_to, n_in, fuzzed_data_provider.ConsumeIntegral<int>(), empty);
}
if (n_in < script_tx_to.vin.size()) {
- (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>());
+ SignatureData empty;
+ (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>(), empty);
MutableTransactionSignatureCreator signature_creator{tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>()};
std::vector<unsigned char> vch_sig;
CKeyID address;
diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp
index 3c427b9bef..9890e4c0e5 100644
--- a/src/test/fuzz/string.cpp
+++ b/src/test/fuzz/string.cpp
@@ -196,7 +196,7 @@ FUZZ_TARGET(string)
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
std::string s;
auto limited_string = LIMITED_STRING(s, 10);
data_stream << random_string_1;
@@ -212,7 +212,7 @@ FUZZ_TARGET(string)
}
}
{
- CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream data_stream{};
const auto limited_string = LIMITED_STRING(random_string_1, 10);
data_stream << limited_string;
std::string deserialized_string;
diff --git a/src/test/fuzz/tx_in.cpp b/src/test/fuzz/tx_in.cpp
index f8247c1fa4..fc16f80cde 100644
--- a/src/test/fuzz/tx_in.cpp
+++ b/src/test/fuzz/tx_in.cpp
@@ -14,12 +14,9 @@
FUZZ_TARGET(tx_in)
{
- CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream ds{buffer};
CTxIn tx_in;
try {
- int version;
- ds >> version;
- ds.SetVersion(version);
ds >> tx_in;
} catch (const std::ios_base::failure&) {
return;
diff --git a/src/test/fuzz/tx_out.cpp b/src/test/fuzz/tx_out.cpp
index 337b8e2771..806216fbf5 100644
--- a/src/test/fuzz/tx_out.cpp
+++ b/src/test/fuzz/tx_out.cpp
@@ -13,12 +13,9 @@
FUZZ_TARGET(tx_out)
{
- CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream ds{buffer};
CTxOut tx_out;
try {
- int version;
- ds >> version;
- ds.SetVersion(version);
ds >> tx_out;
} catch (const std::ios_base::failure&) {
return;
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index e933167341..0cabaf323b 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -311,7 +311,7 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
const auto& node = g_setup->m_node;
- auto& chainstate = node.chainman->ActiveChainstate();
+ auto& chainstate{static_cast<DummyChainState&>(node.chainman->ActiveChainstate())};
MockTime(fuzzed_data_provider, chainstate);
@@ -329,6 +329,8 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
CTxMemPool tx_pool_{MakeMempool(fuzzed_data_provider, node)};
MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
+ chainstate.SetMempool(&tx_pool);
+
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
{
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp
index dafe8249c0..ed55e3fad5 100644
--- a/src/test/fuzz/txorphan.cpp
+++ b/src/test/fuzz/txorphan.cpp
@@ -85,16 +85,12 @@ FUZZ_TARGET_INIT(txorphan, initialize_orphanage)
CallOneOf(
fuzzed_data_provider,
[&] {
- orphanage.AddChildrenToWorkSet(*tx, peer_id);
+ orphanage.AddChildrenToWorkSet(*tx);
},
[&] {
{
- NodeId originator;
- bool more = true;
- CTransactionRef ref = orphanage.GetTxToReconsider(peer_id, originator, more);
- if (!ref) {
- Assert(!more);
- } else {
+ CTransactionRef ref = orphanage.GetTxToReconsider(peer_id);
+ if (ref) {
bool have_tx = orphanage.HaveTx(GenTxid::Txid(ref->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(ref->GetHash()));
Assert(have_tx);
}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index af1d65cd38..c14f633029 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -47,7 +47,7 @@ size_t CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callable
template <typename Collection>
auto& PickValue(FuzzedDataProvider& fuzzed_data_provider, Collection& col)
{
- const auto sz = col.size();
+ auto sz{col.size()};
assert(sz >= 1);
auto it = col.begin();
std::advance(it, fuzzed_data_provider.ConsumeIntegralInRange<decltype(sz)>(0, sz - 1));
diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp
index 5b5158884a..f1f435591b 100644
--- a/src/test/hash_tests.cpp
+++ b/src/test/hash_tests.cpp
@@ -5,6 +5,7 @@
#include <clientversion.h>
#include <crypto/siphash.h>
#include <hash.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
diff --git a/src/test/key_io_tests.cpp b/src/test/key_io_tests.cpp
index fb0a07934d..a400afee71 100644
--- a/src/test/key_io_tests.cpp
+++ b/src/test/key_io_tests.cpp
@@ -8,6 +8,7 @@
#include <key.h>
#include <key_io.h>
#include <script/script.h>
+#include <test/util/json.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
@@ -15,8 +16,6 @@
#include <univalue.h>
-UniValue read_json(const std::string& jsondata);
-
BOOST_FIXTURE_TEST_SUITE(key_io_tests, BasicTestingSetup)
// Goal: check that parsed keys match test payload
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index 21ed2f1080..ea5b94f3a5 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -6,6 +6,7 @@
#include <key_io.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
#include <util/strencodings.h>
@@ -205,8 +206,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
unsigned char rnd[8];
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd);
- uint256 hash;
- CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
+ uint256 hash{Hash(str, rnd)};
// import the static test key
CKey key = DecodeSecret(strSecret1C);
@@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
static CPubKey UnserializePubkey(const std::vector<uint8_t>& data)
{
- CDataStream stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream stream{};
stream << data;
CPubKey pubkey;
stream >> pubkey;
@@ -251,7 +251,7 @@ static unsigned int GetLen(unsigned char chHeader)
static void CmpSerializationPubkey(const CPubKey& pubkey)
{
- CDataStream stream{SER_NETWORK, INIT_PROTO_VERSION};
+ DataStream stream{};
stream << pubkey;
CPubKey pubkey2;
stream >> pubkey2;
diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp
index 022e33f99d..beb9398c74 100644
--- a/src/test/logging_tests.cpp
+++ b/src/test/logging_tests.cpp
@@ -75,20 +75,9 @@ struct LogSetup : public BasicTestingSetup {
BOOST_AUTO_TEST_CASE(logging_timer)
{
- SetMockTime(1);
auto micro_timer = BCLog::Timer<std::chrono::microseconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(micro_timer.LogMsg("test micros"), "tests: test micros (1000000μs)");
-
- SetMockTime(1);
- auto ms_timer = BCLog::Timer<std::chrono::milliseconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(ms_timer.LogMsg("test ms"), "tests: test ms (1000.00ms)");
-
- SetMockTime(1);
- auto sec_timer = BCLog::Timer<std::chrono::seconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(sec_timer.LogMsg("test secs"), "tests: test secs (1.00s)");
+ const std::string_view result_prefix{"tests: msg ("};
+ BOOST_CHECK_EQUAL(micro_timer.LogMsg("msg").substr(0, result_prefix.size()), result_prefix);
}
BOOST_FIXTURE_TEST_CASE(logging_LogPrintf_, LogSetup)
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index bba103d1b0..66f7be3c4e 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <consensus/merkle.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -60,7 +61,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
}
}
mutated |= (inner[level] == h);
- CHash256().Write(inner[level]).Write(h).Finalize(h);
+ h = Hash(inner[level], h);
}
// Store the resulting hash at inner position level.
inner[level] = h;
@@ -86,7 +87,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
if (pbranch && matchh) {
pbranch->push_back(h);
}
- CHash256().Write(h).Write(h).Finalize(h);
+ h = Hash(h, h);
// Increment count to the value it would have if two entries at this
// level had existed.
count += ((uint32_t{1}) << level);
@@ -101,7 +102,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
matchh = true;
}
}
- CHash256().Write(inner[level]).Write(h).Finalize(h);
+ h = Hash(inner[level], h);
level++;
}
}
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index e766a55673..9e484f919e 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -9,6 +9,7 @@
#include <node/miner.h>
#include <policy/policy.h>
#include <script/standard.h>
+#include <test/util/random.h>
#include <test/util/txmempool.h>
#include <timedata.h>
#include <txmempool.h>
diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp
index 3181c9cf28..655d6d7828 100644
--- a/src/test/miniscript_tests.cpp
+++ b/src/test/miniscript_tests.cpp
@@ -2,18 +2,23 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
+#include <stdint.h>
#include <string>
+#include <vector>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
+#include <core_io.h>
#include <hash.h>
#include <pubkey.h>
#include <uint256.h>
#include <crypto/ripemd160.h>
#include <crypto/sha256.h>
+#include <script/interpreter.h>
#include <script/miniscript.h>
+#include <script/standard.h>
+#include <script/script_error.h>
namespace {
@@ -24,15 +29,22 @@ struct TestData {
//! A map from the public keys to their CKeyIDs (faster than hashing every time).
std::map<CPubKey, CKeyID> pkhashes;
std::map<CKeyID, CPubKey> pkmap;
+ std::map<CPubKey, std::vector<unsigned char>> signatures;
// Various precomputed hashes
std::vector<std::vector<unsigned char>> sha256;
std::vector<std::vector<unsigned char>> ripemd160;
std::vector<std::vector<unsigned char>> hash256;
std::vector<std::vector<unsigned char>> hash160;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> sha256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> ripemd160_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash160_preimages;
TestData()
{
+ // All our signatures sign (and are required to sign) this constant message.
+ auto const MESSAGE_HASH = uint256S("f5cd94e18b6fe77dd7aca9e35c2b0c9cbd86356c80a71065");
// We generate 255 public keys and 255 hashes of each type.
for (int i = 1; i <= 255; ++i) {
// This 32-byte array functions as both private key data and hash preimage (31 zero bytes plus any nonzero byte).
@@ -48,18 +60,28 @@ struct TestData {
pkhashes.emplace(pubkey, keyid);
pkmap.emplace(keyid, pubkey);
+ // Compute ECDSA signatures on MESSAGE_HASH with the private keys.
+ std::vector<unsigned char> sig;
+ BOOST_CHECK(key.Sign(MESSAGE_HASH, sig));
+ sig.push_back(1); // sighash byte
+ signatures.emplace(pubkey, sig);
+
// Compute various hashes
std::vector<unsigned char> hash;
hash.resize(32);
CSHA256().Write(keydata, 32).Finalize(hash.data());
sha256.push_back(hash);
+ sha256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
CHash256().Write(keydata).Finalize(hash);
hash256.push_back(hash);
+ hash256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
hash.resize(20);
CRIPEMD160().Write(keydata, 32).Finalize(hash.data());
ripemd160.push_back(hash);
+ ripemd160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
CHash160().Write(keydata).Finalize(hash);
hash160.push_back(hash);
+ hash160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
}
}
};
@@ -67,7 +89,27 @@ struct TestData {
//! Global TestData object
std::unique_ptr<const TestData> g_testdata;
-/** A class encapsulating conversion routing for CPubKey. */
+//! A classification of leaf conditions in miniscripts (excluding true/false).
+enum class ChallengeType {
+ SHA256,
+ RIPEMD160,
+ HASH256,
+ HASH160,
+ OLDER,
+ AFTER,
+ PK
+};
+
+/* With each leaf condition we associate a challenge number.
+ * For hashes it's just the first 4 bytes of the hash. For pubkeys, it's the last 4 bytes.
+ */
+uint32_t ChallengeNumber(const CPubKey& pubkey) { return ReadLE32(pubkey.data() + 29); }
+uint32_t ChallengeNumber(const std::vector<unsigned char>& hash) { return ReadLE32(hash.data()); }
+
+//! A Challenge is a combination of type of leaf condition and its challenge number.
+typedef std::pair<ChallengeType, uint32_t> Challenge;
+
+/** A class encapulating conversion routing for CPubKey. */
struct KeyConverter {
typedef CPubKey Key;
@@ -117,12 +159,197 @@ struct KeyConverter {
}
};
+/** A class that encapsulates all signing/hash revealing operations. */
+struct Satisfier : public KeyConverter {
+ //! Which keys/timelocks/hash preimages are available.
+ std::set<Challenge> supported;
+
+ //! Implement simplified CLTV logic: stack value must exactly match an entry in `supported`.
+ bool CheckAfter(uint32_t value) const {
+ return supported.count(Challenge(ChallengeType::AFTER, value));
+ }
+
+ //! Implement simplified CSV logic: stack value must exactly match an entry in `supported`.
+ bool CheckOlder(uint32_t value) const {
+ return supported.count(Challenge(ChallengeType::OLDER, value));
+ }
+
+ //! Produce a signature for the given key.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ if (supported.count(Challenge(ChallengeType::PK, ChallengeNumber(key)))) {
+ auto it = g_testdata->signatures.find(key);
+ if (it == g_testdata->signatures.end()) return miniscript::Availability::NO;
+ sig = it->second;
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+ }
+
+ //! Helper function for the various hash based satisfactions.
+ miniscript::Availability SatHash(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage, ChallengeType chtype) const {
+ if (!supported.count(Challenge(chtype, ChallengeNumber(hash)))) return miniscript::Availability::NO;
+ const auto& m =
+ chtype == ChallengeType::SHA256 ? g_testdata->sha256_preimages :
+ chtype == ChallengeType::HASH256 ? g_testdata->hash256_preimages :
+ chtype == ChallengeType::RIPEMD160 ? g_testdata->ripemd160_preimages :
+ g_testdata->hash160_preimages;
+ auto it = m.find(hash);
+ if (it == m.end()) return miniscript::Availability::NO;
+ preimage = it->second;
+ return miniscript::Availability::YES;
+ }
+
+ // Functions that produce the preimage for hashes of various types.
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::SHA256); }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::RIPEMD160); }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::HASH256); }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::HASH160); }
+};
+
+/** Mocking signature/timelock checker.
+ *
+ * It holds a pointer to a Satisfier object, to determine which timelocks are supposed to be available.
+ */
+class TestSignatureChecker : public BaseSignatureChecker {
+ const Satisfier& ctx;
+
+public:
+ TestSignatureChecker(const Satisfier& in_ctx LIFETIMEBOUND) : ctx(in_ctx) {}
+
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& pubkey, const CScript& scriptcode, SigVersion sigversion) const override {
+ CPubKey pk(pubkey);
+ if (!pk.IsValid()) return false;
+ // Instead of actually running signature validation, check if the signature matches the precomputed one for this key.
+ auto it = g_testdata->signatures.find(pk);
+ if (it == g_testdata->signatures.end()) return false;
+ return sig == it->second;
+ }
+
+ bool CheckLockTime(const CScriptNum& locktime) const override {
+ // Delegate to Satisfier.
+ return ctx.CheckAfter(locktime.GetInt64());
+ }
+
+ bool CheckSequence(const CScriptNum& sequence) const override {
+ // Delegate to Satisfier.
+ return ctx.CheckOlder(sequence.GetInt64());
+ }
+};
+
//! Singleton instance of KeyConverter.
const KeyConverter CONVERTER{};
+using Fragment = miniscript::Fragment;
+using NodeRef = miniscript::NodeRef<CPubKey>;
// https://github.com/llvm/llvm-project/issues/53444
// NOLINTNEXTLINE(misc-unused-using-decls)
using miniscript::operator"" _mst;
+using Node = miniscript::Node<CPubKey>;
+
+/** Compute all challenges (pubkeys, hashes, timelocks) that occur in a given Miniscript. */
+std::set<Challenge> FindChallenges(const NodeRef& ref) {
+ std::set<Challenge> chal;
+ for (const auto& key : ref->keys) {
+ chal.emplace(ChallengeType::PK, ChallengeNumber(key));
+ }
+ if (ref->fragment == miniscript::Fragment::OLDER) {
+ chal.emplace(ChallengeType::OLDER, ref->k);
+ } else if (ref->fragment == miniscript::Fragment::AFTER) {
+ chal.emplace(ChallengeType::AFTER, ref->k);
+ } else if (ref->fragment == miniscript::Fragment::SHA256) {
+ chal.emplace(ChallengeType::SHA256, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::RIPEMD160) {
+ chal.emplace(ChallengeType::RIPEMD160, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::HASH256) {
+ chal.emplace(ChallengeType::HASH256, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::HASH160) {
+ chal.emplace(ChallengeType::HASH160, ChallengeNumber(ref->data));
+ }
+ for (const auto& sub : ref->subs) {
+ auto sub_chal = FindChallenges(sub);
+ chal.insert(sub_chal.begin(), sub_chal.end());
+ }
+ return chal;
+}
+
+/** Run random satisfaction tests. */
+void TestSatisfy(const std::string& testcase, const NodeRef& node) {
+ auto script = node->ToScript(CONVERTER);
+ auto challenges = FindChallenges(node); // Find all challenges in the generated miniscript.
+ std::vector<Challenge> challist(challenges.begin(), challenges.end());
+ for (int iter = 0; iter < 3; ++iter) {
+ Shuffle(challist.begin(), challist.end(), g_insecure_rand_ctx);
+ Satisfier satisfier;
+ TestSignatureChecker checker(satisfier);
+ bool prev_mal_success = false, prev_nonmal_success = false;
+ // Go over all challenges involved in this miniscript in random order.
+ for (int add = -1; add < (int)challist.size(); ++add) {
+ if (add >= 0) satisfier.supported.insert(challist[add]); // The first iteration does not add anything
+
+ // Run malleable satisfaction algorithm.
+ const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script);
+ CScriptWitness witness_mal;
+ const bool mal_success = node->Satisfy(satisfier, witness_mal.stack, false) == miniscript::Availability::YES;
+ witness_mal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ // Run non-malleable satisfaction algorithm.
+ CScriptWitness witness_nonmal;
+ const bool nonmal_success = node->Satisfy(satisfier, witness_nonmal.stack, true) == miniscript::Availability::YES;
+ witness_nonmal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ if (nonmal_success) {
+ // Non-malleable satisfactions are bounded by GetStackSize().
+ BOOST_CHECK(witness_nonmal.stack.size() <= node->GetStackSize());
+ // If a non-malleable satisfaction exists, the malleable one must also exist, and be identical to it.
+ BOOST_CHECK(mal_success);
+ BOOST_CHECK(witness_nonmal.stack == witness_mal.stack);
+
+ // Test non-malleable satisfaction.
+ ScriptError serror;
+ bool res = VerifyScript(CScript(), script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror);
+ // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions().
+ if (node->ValidSatisfactions()) BOOST_CHECK(res);
+ // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed),
+ // or with a stack size error (if CheckStackSize check fails).
+ BOOST_CHECK(res ||
+ (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) ||
+ (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE));
+ }
+
+ if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) {
+ // Test malleable satisfaction only if it's different from the non-malleable one.
+ ScriptError serror;
+ bool res = VerifyScript(CScript(), script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror);
+ // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only
+ // fail due to stack or ops limits.
+ BOOST_CHECK(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE);
+ }
+
+ if (node->IsSane()) {
+ // For sane nodes, the two algorithms behave identically.
+ BOOST_CHECK_EQUAL(mal_success, nonmal_success);
+ }
+
+ // Adding more satisfied conditions can never remove our ability to produce a satisfaction.
+ BOOST_CHECK(mal_success >= prev_mal_success);
+ // For nonmalleable solutions this is only true if the added condition is PK;
+ // for other conditions, adding one may make an valid satisfaction become malleable. If the script
+ // is sane, this cannot happen however.
+ if (node->IsSane() || add < 0 || challist[add].first == ChallengeType::PK) {
+ BOOST_CHECK(nonmal_success >= prev_nonmal_success);
+ }
+ // Remember results for the next added challenge.
+ prev_mal_success = mal_success;
+ prev_nonmal_success = nonmal_success;
+ }
+
+ bool satisfiable = node->IsSatisfiable([](const Node&) { return true; });
+ // If the miniscript was satisfiable at all, a satisfaction must be found after all conditions are added.
+ BOOST_CHECK_EQUAL(prev_mal_success, satisfiable);
+ // If the miniscript is sane and satisfiable, a nonmalleable satisfaction must eventually be found.
+ if (node->IsSane()) BOOST_CHECK_EQUAL(prev_nonmal_success, satisfiable);
+ }
+}
enum TestMode : int {
TESTMODE_INVALID = 0,
@@ -152,6 +379,7 @@ void Test(const std::string& ms, const std::string& hexscript, int mode, int ops
BOOST_CHECK_MESSAGE(inferred_miniscript->ToScript(CONVERTER) == computed_script, "Roundtrip failure: miniscript->script != miniscript->script->miniscript->script: " + ms);
if (opslimit != -1) BOOST_CHECK_MESSAGE((int)node->GetOps() == opslimit, "Ops limit mismatch: " << ms << " (" << node->GetOps() << " vs " << opslimit << ")");
if (stacklimit != -1) BOOST_CHECK_MESSAGE((int)node->GetStackSize() == stacklimit, "Stack limit mismatch: " << ms << " (" << node->GetStackSize() << " vs " << stacklimit << ")");
+ TestSatisfy(ms, node);
}
}
} // namespace
diff --git a/src/test/minisketch_tests.cpp b/src/test/minisketch_tests.cpp
index 59c0aab053..10506da783 100644
--- a/src/test/minisketch_tests.cpp
+++ b/src/test/minisketch_tests.cpp
@@ -5,6 +5,7 @@
#include <minisketch.h>
#include <node/minisketchwrapper.h>
#include <random.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp
index 1e1a9932ad..7a3e8e3a47 100644
--- a/src/test/multisig_tests.cpp
+++ b/src/test/multisig_tests.cpp
@@ -217,7 +217,8 @@ BOOST_AUTO_TEST_CASE(multisig_Sign)
for (int i = 0; i < 3; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
}
}
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 5a97e9429a..4fbd9b3a6e 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "0.0.0.0");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "0.0.0.0");
// IPv4, INADDR_NONE
BOOST_REQUIRE(LookupHost("255.255.255.255", addr, false));
@@ -150,7 +150,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "255.255.255.255");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "255.255.255.255");
// IPv4, casual
BOOST_REQUIRE(LookupHost("12.34.56.78", addr, false));
@@ -159,7 +159,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "12.34.56.78");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "12.34.56.78");
// IPv6, in6addr_any
BOOST_REQUIRE(LookupHost("::", addr, false));
@@ -168,7 +168,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "::");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "::");
// IPv6, casual
BOOST_REQUIRE(LookupHost("1122:3344:5566:7788:9900:aabb:ccdd:eeff", addr, false));
@@ -177,7 +177,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "1122:3344:5566:7788:9900:aabb:ccdd:eeff");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "1122:3344:5566:7788:9900:aabb:ccdd:eeff");
// IPv6, scoped/link-local. See https://tools.ietf.org/html/rfc4007
// We support non-negative decimal integers (uint32_t) as zone id indices.
@@ -190,14 +190,14 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_REQUIRE(addr.IsValid());
BOOST_REQUIRE(addr.IsIPv6());
BOOST_CHECK(!addr.IsBindAny());
- BOOST_CHECK_EQUAL(addr.ToString(), scoped_addr);
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), scoped_addr);
// Test that the delimiter "%" and default zone id of 0 can be omitted for the default scope.
BOOST_REQUIRE(LookupHost(link_local + "%0", addr, false));
BOOST_REQUIRE(addr.IsValid());
BOOST_REQUIRE(addr.IsIPv6());
BOOST_CHECK(!addr.IsBindAny());
- BOOST_CHECK_EQUAL(addr.ToString(), link_local);
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), link_local);
// TORv2, no longer supported
BOOST_CHECK(!addr.SetSpecial("6hzph5hv6337r6p2.onion"));
@@ -211,7 +211,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsI2P());
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(!addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), torv3_addr);
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), torv3_addr);
// TORv3, broken, with wrong checksum
BOOST_CHECK(!addr.SetSpecial("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscsad.onion"));
@@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsTor());
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(!addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), ToLower(i2p_addr));
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), ToLower(i2p_addr));
// I2P, correct length, but decodes to less than the expected number of bytes.
BOOST_CHECK(!addr.SetSpecial("udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jn=.b32.i2p"));
@@ -265,7 +265,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_basic)
BOOST_CHECK(!addr.IsBindAny());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "esffpvrt3wpeaygy.internal");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "esffpvrt3wpeaygy.internal");
// Totally bogus
BOOST_CHECK(!addr.SetSpecial("totally bogus"));
@@ -321,7 +321,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_tostring_canonical_ipv6)
CNetAddr net_addr;
BOOST_REQUIRE(LookupHost(input_address, net_addr, false));
BOOST_REQUIRE(net_addr.IsIPv6());
- BOOST_CHECK_EQUAL(net_addr.ToString(), expected_canonical_representation_output);
+ BOOST_CHECK_EQUAL(net_addr.ToStringAddr(), expected_canonical_representation_output);
}
}
@@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
BOOST_CHECK(addr.IsValid());
BOOST_CHECK(addr.IsIPv4());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "1.2.3.4");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "1.2.3.4");
BOOST_REQUIRE(s.empty());
// Invalid IPv4, valid length but address itself is shorter.
@@ -447,7 +447,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
BOOST_CHECK(addr.IsValid());
BOOST_CHECK(addr.IsIPv6());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "102:304:506:708:90a:b0c:d0e:f10");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "102:304:506:708:90a:b0c:d0e:f10");
BOOST_REQUIRE(s.empty());
// Valid IPv6, contains embedded "internal".
@@ -459,7 +459,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
s >> addr;
BOOST_CHECK(addr.IsInternal());
BOOST_CHECK(addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "zklycewkdo64v6wc.internal");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "zklycewkdo64v6wc.internal");
BOOST_REQUIRE(s.empty());
// Invalid IPv6, with bogus length.
@@ -505,7 +505,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
BOOST_CHECK(addr.IsValid());
BOOST_CHECK(addr.IsTor());
BOOST_CHECK(!addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(),
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(),
"pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion");
BOOST_REQUIRE(s.empty());
@@ -528,7 +528,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
BOOST_CHECK(addr.IsValid());
BOOST_CHECK(addr.IsI2P());
BOOST_CHECK(!addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(),
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(),
"ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p");
BOOST_REQUIRE(s.empty());
@@ -551,7 +551,7 @@ BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2)
BOOST_CHECK(addr.IsValid());
BOOST_CHECK(addr.IsCJDNS());
BOOST_CHECK(!addr.IsAddrV1Compatible());
- BOOST_CHECK_EQUAL(addr.ToString(), "fc00:1:2:3:4:5:6:7");
+ BOOST_CHECK_EQUAL(addr.ToStringAddr(), "fc00:1:2:3:4:5:6:7");
BOOST_REQUIRE(s.empty());
// Invalid CJDNS, wrong prefix.
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index cef42b7dd8..7e91819ddc 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -131,7 +131,7 @@ BOOST_AUTO_TEST_CASE(netbase_splithost)
bool static TestParse(std::string src, std::string canon)
{
CService addr(LookupNumeric(src, 65535));
- return canon == addr.ToString();
+ return canon == addr.ToStringAddrPort();
}
BOOST_AUTO_TEST_CASE(netbase_lookupnumeric)
@@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(embedded_test)
CNetAddr addr1(ResolveIP("1.2.3.4"));
CNetAddr addr2(ResolveIP("::FFFF:0102:0304"));
BOOST_CHECK(addr2.IsIPv4());
- BOOST_CHECK_EQUAL(addr1.ToString(), addr2.ToString());
+ BOOST_CHECK_EQUAL(addr1.ToStringAddr(), addr2.ToStringAddr());
}
BOOST_AUTO_TEST_CASE(subnet_test)
@@ -240,7 +240,7 @@ BOOST_AUTO_TEST_CASE(subnet_test)
subnet = CSubNet(tor_addr);
BOOST_CHECK(subnet.IsValid());
- BOOST_CHECK_EQUAL(subnet.ToString(), tor_addr.ToString());
+ BOOST_CHECK_EQUAL(subnet.ToString(), tor_addr.ToStringAddr());
BOOST_CHECK(subnet.Match(tor_addr));
BOOST_CHECK(
!subnet.Match(ResolveIP("kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion")));
diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp
index a55b0bbcd0..a2c4774338 100644
--- a/src/test/orphanage_tests.cpp
+++ b/src/test/orphanage_tests.cpp
@@ -7,6 +7,7 @@
#include <script/sign.h>
#include <script/signingprovider.h>
#include <script/standard.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <txorphanage.h>
@@ -88,7 +89,8 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
tx.vout.resize(1);
tx.vout[0].nValue = 1*CENT;
tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
- BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL, empty));
orphanage.AddTx(MakeTransactionRef(tx), i);
}
@@ -108,7 +110,8 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
tx.vin[j].prevout.n = j;
tx.vin[j].prevout.hash = txPrev->GetHash();
}
- BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL, empty));
// Re-use same signature for other inputs
// (they don't have to be valid for this test)
for (unsigned int j = 1; j < tx.vin.size(); j++)
diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp
index d6aee472a8..a1e672d174 100644
--- a/src/test/pmt_tests.cpp
+++ b/src/test/pmt_tests.cpp
@@ -6,6 +6,7 @@
#include <merkleblock.h>
#include <serialize.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
#include <version.h>
@@ -69,7 +70,7 @@ BOOST_AUTO_TEST_CASE(pmt_test1)
CPartialMerkleTree pmt1(vTxid, vMatch);
// serialize
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ss{};
ss << pmt1;
// verify CPartialMerkleTree's size guarantees
diff --git a/src/test/pow_tests.cpp b/src/test/pow_tests.cpp
index 7cd12ede0a..addc925bab 100644
--- a/src/test/pow_tests.cpp
+++ b/src/test/pow_tests.cpp
@@ -5,6 +5,7 @@
#include <chain.h>
#include <chainparams.h>
#include <pow.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp
index 4068775cfa..1559011fcd 100644
--- a/src/test/prevector_tests.cpp
+++ b/src/test/prevector_tests.cpp
@@ -9,6 +9,7 @@
#include <serialize.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -66,8 +67,8 @@ class prevector_tester {
for (const T& v : reverse_iterate(const_pre_vector)) {
local_check(v == real_vector[--pos]);
}
- CDataStream ss1(SER_DISK, 0);
- CDataStream ss2(SER_DISK, 0);
+ DataStream ss1{};
+ DataStream ss2{};
ss1 << real_vector;
ss2 << pre_vector;
local_check_equal(ss1.size(), ss2.size());
diff --git a/src/test/script_p2sh_tests.cpp b/src/test/script_p2sh_tests.cpp
index e439ff3519..c9f002b324 100644
--- a/src/test/script_p2sh_tests.cpp
+++ b/src/test/script_p2sh_tests.cpp
@@ -102,7 +102,8 @@ BOOST_AUTO_TEST_CASE(sign)
}
for (int i = 0; i < 8; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
}
// All of the above should be OK, and the txTos have valid signatures
// Check to make sure signature verification fails if we use the wrong ScriptSig:
@@ -197,7 +198,8 @@ BOOST_AUTO_TEST_CASE(set)
}
for (int i = 0; i < 4; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
BOOST_CHECK_MESSAGE(IsStandardTx(CTransaction(txTo[i]), reason), strprintf("txTo[%d].IsStandard", i));
}
}
@@ -334,9 +336,12 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
txTo.vin[i].prevout.n = i;
txTo.vin[i].prevout.hash = txFrom.GetHash();
}
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 1, SIGHASH_ALL));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 2, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, empty));
+ SignatureData empty_b;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 1, SIGHASH_ALL, empty_b));
+ SignatureData empty_c;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 2, SIGHASH_ALL, empty_c));
// SignSignature doesn't know how to sign these. We're
// not testing validating signatures, so just create
// dummy signatures that DO include the correct P2SH scripts:
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index 88df34ffe6..7bebadf224 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -400,12 +400,11 @@ BOOST_AUTO_TEST_CASE(bip341_spk_test_vectors)
for (const auto& vec : vectors.getValues()) {
TaprootBuilder spktest;
- std::map<std::pair<CScript, int>, int> scriptposes;
+ std::map<std::pair<std::vector<unsigned char>, int>, int> scriptposes;
std::function<void (const UniValue&, int)> parse_tree = [&](const UniValue& node, int depth) {
if (node.isNull()) return;
if (node.isObject()) {
- auto script_bytes = ParseHex(node["script"].get_str());
- CScript script(script_bytes.begin(), script_bytes.end());
+ auto script = ParseHex(node["script"].get_str());
int idx = node["id"].getInt<int>();
int leaf_version = node["leafVersion"].getInt<int>();
scriptposes[{script, leaf_version}] = idx;
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 472cba2aac..45d9f2cf29 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -15,6 +15,8 @@
#include <script/sign.h>
#include <script/signingprovider.h>
#include <streams.h>
+#include <test/util/json.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <test/util/transaction_utils.h>
#include <util/strencodings.h>
@@ -41,18 +43,6 @@ static const unsigned int gFlags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC;
unsigned int ParseScriptFlags(std::string strFlags);
std::string FormatScriptFlags(unsigned int flags);
-UniValue read_json(const std::string& jsondata)
-{
- UniValue v;
-
- if (!v.read(jsondata) || !v.isArray())
- {
- BOOST_ERROR("Parse error.");
- return UniValue(UniValue::VARR);
- }
- return v.get_array();
-}
-
struct ScriptErrorDesc
{
ScriptError_t err;
@@ -1191,7 +1181,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined.scriptSig.empty());
// Single signature case:
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL)); // changes scriptSig
+ SignatureData dummy;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy)); // changes scriptSig
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
@@ -1199,7 +1190,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
SignatureData scriptSigCopy = scriptSig;
// Signing again will give a different, valid signature:
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_b;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_b));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig || combined.scriptSig == scriptSig.scriptSig);
@@ -1208,14 +1200,16 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
CScript pkSingle; pkSingle << ToByteVector(keys[0].GetPubKey()) << OP_CHECKSIG;
BOOST_CHECK(keystore.AddCScript(pkSingle));
scriptPubKey = GetScriptForDestination(ScriptHash(pkSingle));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_c;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_c));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
combined = CombineSignatures(txFrom.vout[0], txTo, empty, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
scriptSigCopy = scriptSig;
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_d;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_d));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig || combined.scriptSig == scriptSig.scriptSig);
@@ -1223,7 +1217,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
// Hardest case: Multisig 2-of-3
scriptPubKey = GetScriptForMultisig(2, pubkeys);
BOOST_CHECK(keystore.AddCScript(scriptPubKey));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_e;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_e));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
@@ -1817,7 +1812,24 @@ BOOST_AUTO_TEST_CASE(bip341_keypath_test_vectors)
}
}
+}
+
+BOOST_AUTO_TEST_CASE(compute_tapbranch)
+{
+ uint256 hash1 = uint256S("8ad69ec7cf41c2a4001fd1f738bf1e505ce2277acdcaa63fe4765192497f47a7");
+ uint256 hash2 = uint256S("f224a923cd0021ab202ab139cc56802ddb92dcfc172b9212261a539df79a112a");
+ uint256 result = uint256S("a64c5b7b943315f9b805d7a7296bedfcfd08919270a1f7a1466e98f8693d8cd9");
+ BOOST_CHECK_EQUAL(ComputeTapbranchHash(hash1, hash2), result);
+}
+
+BOOST_AUTO_TEST_CASE(compute_tapleaf)
+{
+ const uint8_t script[6] = {'f','o','o','b','a','r'};
+ uint256 tlc0 = uint256S("edbc10c272a1215dcdcc11d605b9027b5ad6ed97cd45521203f136767b5b9c06");
+ uint256 tlc2 = uint256S("8b5c4f90ae6bf76e259dbef5d8a59df06359c391b59263741b25eca76451b27a");
+ BOOST_CHECK_EQUAL(ComputeTapleafHash(0xc0, Span(script)), tlc0);
+ BOOST_CHECK_EQUAL(ComputeTapleafHash(0xc2, Span(script)), tlc2);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp
index ed1f081913..b36bdc02ca 100644
--- a/src/test/serfloat_tests.cpp
+++ b/src/test/serfloat_tests.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <hash.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/serfloat.h>
#include <serialize.h>
@@ -111,7 +112,7 @@ Python code to generate the below hashes:
*/
BOOST_AUTO_TEST_CASE(doubles)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
// encode
for (int i = 0; i < 1000; i++) {
ss << EncodeDouble(i);
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index c90ae38ae8..09f77d2b61 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -90,8 +90,8 @@ BOOST_AUTO_TEST_CASE(varints)
{
// encode
- CDataStream ss(SER_DISK, 0);
- CDataStream::size_type size = 0;
+ DataStream ss{};
+ DataStream::size_type size = 0;
for (int i = 0; i < 100000; i++) {
ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED);
size += ::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
@@ -120,7 +120,7 @@ BOOST_AUTO_TEST_CASE(varints)
BOOST_AUTO_TEST_CASE(varints_bitpatterns)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
ss << VARINT_MODE(int8_t{0x7f}, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(varints_bitpatterns)
BOOST_AUTO_TEST_CASE(compactsize)
{
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
std::vector<char>::size_type i, j;
for (i = 1; i <= MAX_SIZE; i *= 2)
@@ -182,7 +182,7 @@ BOOST_AUTO_TEST_CASE(noncanonical)
{
// Write some non-canonical CompactSize encodings, and
// make sure an exception is thrown when read back.
- CDataStream ss(SER_DISK, 0);
+ DataStream ss{};
std::vector<char>::size_type n;
// zero encoded with three bytes:
@@ -237,7 +237,8 @@ BOOST_AUTO_TEST_CASE(class_methods)
BOOST_CHECK(methodtest2 == methodtest3);
BOOST_CHECK(methodtest3 == methodtest4);
- CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, charstrval, txval);
+ CDataStream ss2{SER_DISK, PROTOCOL_VERSION};
+ ss2 << intval << boolval << stringval << charstrval << txval;
ss2 >> methodtest3;
BOOST_CHECK(methodtest3 == methodtest4);
}
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index 1ce694b8c6..e2d11afa6a 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -10,6 +10,8 @@
#include <serialize.h>
#include <streams.h>
#include <test/data/sighash.json.h>
+#include <test/util/json.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
#include <util/system.h>
@@ -21,8 +23,6 @@
#include <univalue.h>
-UniValue read_json(const std::string& jsondata);
-
// Old script.cpp SignatureHash function
uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType)
{
@@ -110,7 +110,7 @@ void static RandomTransaction(CMutableTransaction& tx, bool fSingle)
for (int out = 0; out < outs; out++) {
tx.vout.push_back(CTxOut());
CTxOut &txout = tx.vout.back();
- txout.nValue = InsecureRandRange(100000000);
+ txout.nValue = InsecureRandMoneyAmount();
RandomScript(txout.scriptPubKey);
}
}
diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp
index ae9021df58..050033e43a 100644
--- a/src/test/skiplist_tests.cpp
+++ b/src/test/skiplist_tests.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chain.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <vector>
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index dce230ac10..a9b5251ad3 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -4,6 +4,7 @@
#include <fs.h>
#include <streams.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -128,9 +129,9 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue)
BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
{
- CDataStream data(SER_NETWORK, INIT_PROTO_VERSION);
+ DataStream data{};
- BitStreamWriter<CDataStream> bit_writer(data);
+ BitStreamWriter bit_writer{data};
bit_writer.Write(0, 1);
bit_writer.Write(2, 2);
bit_writer.Write(6, 3);
@@ -141,7 +142,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
bit_writer.Write(30497, 16);
bit_writer.Flush();
- CDataStream data_copy(data);
+ DataStream data_copy{data};
uint32_t serialized_int1;
data >> serialized_int1;
BOOST_CHECK_EQUAL(serialized_int1, uint32_t{0x7700C35A}); // NOTE: Serialized as LE
@@ -149,7 +150,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
data >> serialized_int2;
BOOST_CHECK_EQUAL(serialized_int2, uint16_t{0x1072}); // NOTE: Serialized as LE
- BitStreamReader<CDataStream> bit_reader(data_copy);
+ BitStreamReader bit_reader{data_copy};
BOOST_CHECK_EQUAL(bit_reader.Read(1), 0U);
BOOST_CHECK_EQUAL(bit_reader.Read(2), 2U);
BOOST_CHECK_EQUAL(bit_reader.Read(3), 6U);
@@ -167,7 +168,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
// Degenerate case
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0x00, 0x00});
BOOST_CHECK_EQUAL(""s, ds.str());
}
@@ -177,7 +178,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
// Single character key
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0xff});
BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str());
}
@@ -189,7 +190,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
in.push_back(std::byte{0x0f});
{
- CDataStream ds{in, 0, 0};
+ DataStream ds{in};
ds.Xor({0xff, 0x0f});
BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str());
}
@@ -500,4 +501,18 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
fs::remove(streams_test_filename);
}
+BOOST_AUTO_TEST_CASE(streams_hashed)
+{
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ HashedSourceWriter hash_writer{stream};
+ const std::string data{"bitcoin"};
+ hash_writer << data;
+
+ CHashVerifier hash_verifier{&stream};
+ std::string result;
+ hash_verifier >> result;
+ BOOST_CHECK_EQUAL(data, result);
+ BOOST_CHECK_EQUAL(hash_writer.GetHash(), hash_verifier.GetHash());
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 69b03e07bf..11efb6a5c3 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -21,6 +21,8 @@
#include <script/signingprovider.h>
#include <script/standard.h>
#include <streams.h>
+#include <test/util/json.h>
+#include <test/util/random.h>
#include <test/util/script.h>
#include <test/util/transaction_utils.h>
#include <util/strencodings.h>
@@ -37,9 +39,6 @@
typedef std::vector<unsigned char> valtype;
-// In script_tests.cpp
-UniValue read_json(const std::string& jsondata);
-
static CFeeRate g_dust{DUST_RELAY_TX_FEE};
static bool g_bare_multi{DEFAULT_PERMIT_BAREMULTISIG};
@@ -435,7 +434,8 @@ static void CreateCreditAndSpend(const FillableSigningProvider& keystore, const
inputm.vout.resize(1);
inputm.vout[0].nValue = 1;
inputm.vout[0].scriptPubKey = CScript();
- bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL);
+ SignatureData empty;
+ bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL, empty);
assert(ret == success);
CDataStream ssin(SER_NETWORK, PROTOCOL_VERSION);
ssin << inputm;
@@ -519,7 +519,8 @@ BOOST_AUTO_TEST_CASE(test_big_witness_transaction)
// sign all inputs
for(uint32_t i = 0; i < mtx.vin.size(); i++) {
- bool hashSigned = SignSignature(keystore, scriptPubKey, mtx, i, 1000, sigHashes.at(i % sigHashes.size()));
+ SignatureData empty;
+ bool hashSigned = SignSignature(keystore, scriptPubKey, mtx, i, 1000, sigHashes.at(i % sigHashes.size()), empty);
assert(hashSigned);
}
@@ -937,23 +938,58 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
CheckIsNotStandard(t, "bare-multisig");
g_bare_multi = DEFAULT_PERMIT_BAREMULTISIG;
+ // Check compressed P2PK outputs dust threshold (must have leading 02 or 03)
+ t.vout[0].scriptPubKey = CScript() << std::vector<unsigned char>(33, 0x02) << OP_CHECKSIG;
+ t.vout[0].nValue = 576;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 575;
+ CheckIsNotStandard(t, "dust");
+
+ // Check uncompressed P2PK outputs dust threshold (must have leading 04/06/07)
+ t.vout[0].scriptPubKey = CScript() << std::vector<unsigned char>(65, 0x04) << OP_CHECKSIG;
+ t.vout[0].nValue = 672;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 671;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2PKH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_DUP << OP_HASH160 << std::vector<unsigned char>(20, 0) << OP_EQUALVERIFY << OP_CHECKSIG;
+ t.vout[0].nValue = 546;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 545;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2SH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_HASH160 << std::vector<unsigned char>(20, 0) << OP_EQUAL;
+ t.vout[0].nValue = 540;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 539;
+ CheckIsNotStandard(t, "dust");
+
// Check P2WPKH outputs dust threshold
- t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].scriptPubKey = CScript() << OP_0 << std::vector<unsigned char>(20, 0);
t.vout[0].nValue = 294;
CheckIsStandard(t);
t.vout[0].nValue = 293;
CheckIsNotStandard(t, "dust");
// Check P2WSH outputs dust threshold
- t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].scriptPubKey = CScript() << OP_0 << std::vector<unsigned char>(32, 0);
+ t.vout[0].nValue = 330;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 329;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2TR outputs dust threshold (Invalid xonly key ok!)
+ t.vout[0].scriptPubKey = CScript() << OP_1 << std::vector<unsigned char>(32, 0);
t.vout[0].nValue = 330;
CheckIsStandard(t);
t.vout[0].nValue = 329;
CheckIsNotStandard(t, "dust");
- // Check future Witness Program versions dust threshold
- for (int op = OP_2; op <= OP_16; op += 1) {
- t.vout[0].scriptPubKey = CScript() << (opcodetype)op << ParseHex("ffff");
+ // Check future Witness Program versions dust threshold (non-32-byte pushes are undefined for version 1)
+ for (int op = OP_1; op <= OP_16; op += 1) {
+ t.vout[0].scriptPubKey = CScript() << (opcodetype)op << std::vector<unsigned char>(2, 0);
t.vout[0].nValue = 240;
CheckIsStandard(t);
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index e438867d15..024526497c 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -9,6 +9,7 @@
#include <primitives/transaction.h>
#include <script/script.h>
#include <script/standard.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <validation.h>
diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp
index a4ed1e8b3a..17a55d5ab5 100644
--- a/src/test/txrequest_tests.cpp
+++ b/src/test/txrequest_tests.cpp
@@ -6,6 +6,7 @@
#include <txrequest.h>
#include <uint256.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <algorithm>
diff --git a/src/test/uint256_tests.cpp b/src/test/uint256_tests.cpp
index bc206fc945..9caefe43e2 100644
--- a/src/test/uint256_tests.cpp
+++ b/src/test/uint256_tests.cpp
@@ -187,7 +187,7 @@ BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex begin() end() size() GetLow64 G
BOOST_CHECK(GetSerializeSize(R1L, PROTOCOL_VERSION) == 32);
BOOST_CHECK(GetSerializeSize(ZeroL, PROTOCOL_VERSION) == 32);
- CDataStream ss(0, PROTOCOL_VERSION);
+ DataStream ss{};
ss << R1L;
BOOST_CHECK(ss.str() == std::string(R1Array,R1Array+32));
ss >> TmpL;
diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp
index 3ae22921b9..ec703c6a7b 100644
--- a/src/test/util/blockfilter.cpp
+++ b/src/test/util/blockfilter.cpp
@@ -28,4 +28,3 @@ bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex* block_index,
filter = BlockFilter(filter_type, block, block_undo);
return true;
}
-
diff --git a/src/test/util/coins.cpp b/src/test/util/coins.cpp
new file mode 100644
index 0000000000..9b6c5535c5
--- /dev/null
+++ b/src/test/util/coins.cpp
@@ -0,0 +1,27 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/util/coins.h>
+
+#include <coins.h>
+#include <primitives/transaction.h>
+#include <script/script.h>
+#include <test/util/random.h>
+#include <uint256.h>
+
+#include <stdint.h>
+#include <utility>
+
+COutPoint AddTestCoin(CCoinsViewCache& coins_view)
+{
+ Coin new_coin;
+ const uint256 txid{InsecureRand256()};
+ COutPoint outpoint{txid, /*nIn=*/0};
+ new_coin.nHeight = 1;
+ new_coin.out.nValue = InsecureRandMoneyAmount();
+ new_coin.out.scriptPubKey.assign(uint32_t{56}, 1);
+ coins_view.AddCoin(outpoint, std::move(new_coin), /*possible_overwrite=*/false);
+
+ return outpoint;
+};
diff --git a/src/test/util/coins.h b/src/test/util/coins.h
new file mode 100644
index 0000000000..5e6f4293ae
--- /dev/null
+++ b/src/test/util/coins.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_UTIL_COINS_H
+#define BITCOIN_TEST_UTIL_COINS_H
+
+#include <primitives/transaction.h>
+
+class CCoinsViewCache;
+
+/**
+ * Create a Coin with DynamicMemoryUsage of 80 bytes and add it to the given view.
+ * @param[in,out] coins_view The coins view cache to add the new coin to.
+ * @returns the COutPoint of the created coin.
+ */
+COutPoint AddTestCoin(CCoinsViewCache& coins_view);
+
+#endif // BITCOIN_TEST_UTIL_COINS_H
diff --git a/src/test/util/json.cpp b/src/test/util/json.cpp
new file mode 100644
index 0000000000..ad3c346c84
--- /dev/null
+++ b/src/test/util/json.cpp
@@ -0,0 +1,17 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/util/json.h>
+
+#include <string>
+#include <util/check.h>
+
+#include <univalue.h>
+
+UniValue read_json(const std::string& jsondata)
+{
+ UniValue v;
+ Assert(v.read(jsondata) && v.isArray());
+ return v.get_array();
+}
diff --git a/src/test/util/json.h b/src/test/util/json.h
new file mode 100644
index 0000000000..5b1026762e
--- /dev/null
+++ b/src/test/util/json.h
@@ -0,0 +1,14 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_UTIL_JSON_H
+#define BITCOIN_TEST_UTIL_JSON_H
+
+#include <string>
+
+#include <univalue.h>
+
+UniValue read_json(const std::string& jsondata);
+
+#endif // BITCOIN_TEST_UTIL_JSON_H
diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp
index 975aff13c0..ac5dfe9e73 100644
--- a/src/test/util/net.cpp
+++ b/src/test/util/net.cpp
@@ -67,15 +67,14 @@ void ConnmanTestMsg::NodeReceiveMsgBytes(CNode& node, Span<const uint8_t> msg_by
assert(node.ReceiveMsgBytes(msg_bytes, complete));
if (complete) {
size_t nSizeAdded = 0;
- auto it(node.vRecvMsg.begin());
- for (; it != node.vRecvMsg.end(); ++it) {
+ for (const auto& msg : node.vRecvMsg) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message are held by TransportDeserializer
- nSizeAdded += it->m_raw_message_size;
+ nSizeAdded += msg.m_raw_message_size;
}
{
LOCK(node.cs_vProcessMsg);
- node.vProcessMsg.splice(node.vProcessMsg.end(), node.vRecvMsg, node.vRecvMsg.begin(), it);
+ node.vProcessMsg.splice(node.vProcessMsg.end(), node.vRecvMsg);
node.nProcessQueueSize += nSizeAdded;
node.fPauseRecv = node.nProcessQueueSize > nReceiveFloodSize;
}
diff --git a/src/test/util/net.h b/src/test/util/net.h
index 90c606306f..e6506b0d08 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -103,7 +103,7 @@ constexpr auto ALL_NETWORKS = std::array{
class StaticContentsSock : public Sock
{
public:
- explicit StaticContentsSock(const std::string& contents) : m_contents{contents}, m_consumed{0}
+ explicit StaticContentsSock(const std::string& contents) : m_contents{contents}
{
// Just a dummy number that is not INVALID_SOCKET.
m_socket = INVALID_SOCKET - 1;
@@ -191,7 +191,7 @@ public:
private:
const std::string m_contents;
- mutable size_t m_consumed;
+ mutable size_t m_consumed{0};
};
std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candidates, FastRandomContext& random_context);
diff --git a/src/test/util/random.h b/src/test/util/random.h
new file mode 100644
index 0000000000..7997e8a346
--- /dev/null
+++ b/src/test/util/random.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_UTIL_RANDOM_H
+#define BITCOIN_TEST_UTIL_RANDOM_H
+
+#include <consensus/amount.h>
+#include <random.h>
+#include <test/util/setup_common.h>
+#include <uint256.h>
+
+#include <cstdint>
+
+static inline uint32_t InsecureRand32()
+{
+ return g_insecure_rand_ctx.rand32();
+}
+
+static inline uint256 InsecureRand256()
+{
+ return g_insecure_rand_ctx.rand256();
+}
+
+static inline uint64_t InsecureRandBits(int bits)
+{
+ return g_insecure_rand_ctx.randbits(bits);
+}
+
+static inline uint64_t InsecureRandRange(uint64_t range)
+{
+ return g_insecure_rand_ctx.randrange(range);
+}
+
+static inline bool InsecureRandBool()
+{
+ return g_insecure_rand_ctx.randbool();
+}
+
+static inline CAmount InsecureRandMoneyAmount()
+{
+ return static_cast<CAmount>(InsecureRandRange(MAX_MONEY + 1));
+}
+
+#endif // BITCOIN_TEST_UTIL_RANDOM_H
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 1b28e5f2c0..4e0000cb3d 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -180,11 +180,15 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve
const ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
+ .datadir = m_args.GetDataDirNet(),
.adjusted_time_callback = GetAdjustedTime,
.check_block_index = true,
};
m_node.chainman = std::make_unique<ChainstateManager>(chainman_opts);
- m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(m_cache_sizes.block_tree_db, true);
+ m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(DBParams{
+ .path = m_args.GetDataDirNet() / "blocks" / "index",
+ .cache_bytes = static_cast<size_t>(m_cache_sizes.block_tree_db),
+ .memory_only = true});
constexpr int script_check_threads = 2;
StartScriptCheckWorkerThreads(script_check_threads);
@@ -208,23 +212,25 @@ ChainTestingSetup::~ChainTestingSetup()
void TestingSetup::LoadVerifyActivateChainstate()
{
+ auto& chainman{*Assert(m_node.chainman)};
node::ChainstateLoadOptions options;
options.mempool = Assert(m_node.mempool.get());
options.block_tree_db_in_memory = m_block_tree_db_in_memory;
options.coins_db_in_memory = m_coins_db_in_memory;
options.reindex = node::fReindex;
options.reindex_chainstate = m_args.GetBoolArg("-reindex-chainstate", false);
- options.prune = node::fPruneMode;
+ options.prune = chainman.m_blockman.IsPruneMode();
options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
options.check_level = m_args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
- auto [status, error] = LoadChainstate(*Assert(m_node.chainman), m_cache_sizes, options);
+ options.require_full_verification = m_args.IsArgSet("-checkblocks") || m_args.IsArgSet("-checklevel");
+ auto [status, error] = LoadChainstate(chainman, m_cache_sizes, options);
assert(status == node::ChainstateLoadStatus::SUCCESS);
- std::tie(status, error) = VerifyLoadedChainstate(*Assert(m_node.chainman), options);
+ std::tie(status, error) = VerifyLoadedChainstate(chainman, options);
assert(status == node::ChainstateLoadStatus::SUCCESS);
BlockValidationState state;
- if (!m_node.chainman->ActiveChainstate().ActivateBestChain(state)) {
+ if (!chainman.ActiveChainstate().ActivateBestChain(state)) {
throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
}
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 5f653d83ae..8874db7e75 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -71,12 +71,6 @@ static inline void SeedInsecureRand(SeedRand seed = SeedRand::SEED)
}
}
-static inline uint32_t InsecureRand32() { return g_insecure_rand_ctx.rand32(); }
-static inline uint256 InsecureRand256() { return g_insecure_rand_ctx.rand256(); }
-static inline uint64_t InsecureRandBits(int bits) { return g_insecure_rand_ctx.randbits(bits); }
-static inline uint64_t InsecureRandRange(uint64_t range) { return g_insecure_rand_ctx.randrange(range); }
-static inline bool InsecureRandBool() { return g_insecure_rand_ctx.randbool(); }
-
static constexpr CAmount CENT{1000000};
/** Basic testing setup.
diff --git a/src/test/util/xoroshiro128plusplus.h b/src/test/util/xoroshiro128plusplus.h
new file mode 100644
index 0000000000..ac9f59b3f5
--- /dev/null
+++ b/src/test/util/xoroshiro128plusplus.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
+#define BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
+
+#include <cstdint>
+#include <limits>
+
+/** xoroshiro128++ PRNG. Extremely fast, not appropriate for cryptographic purposes.
+ *
+ * Memory footprint is 128bit, period is 2^128 - 1.
+ * This class is not thread-safe.
+ *
+ * Reference implementation available at https://prng.di.unimi.it/xoroshiro128plusplus.c
+ * See https://prng.di.unimi.it/
+ */
+class XoRoShiRo128PlusPlus
+{
+ uint64_t m_s0;
+ uint64_t m_s1;
+
+ [[nodiscard]] constexpr static uint64_t rotl(uint64_t x, int n)
+ {
+ return (x << n) | (x >> (64 - n));
+ }
+
+ [[nodiscard]] constexpr static uint64_t SplitMix64(uint64_t& seedval) noexcept
+ {
+ uint64_t z = (seedval += UINT64_C(0x9e3779b97f4a7c15));
+ z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
+ z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
+ return z ^ (z >> 31U);
+ }
+
+public:
+ using result_type = uint64_t;
+
+ constexpr explicit XoRoShiRo128PlusPlus(uint64_t seedval) noexcept
+ : m_s0(SplitMix64(seedval)), m_s1(SplitMix64(seedval))
+ {
+ }
+
+ // no copy - that is dangerous, we don't want accidentally copy the RNG and then have two streams
+ // with exactly the same results. If you need a copy, call copy().
+ XoRoShiRo128PlusPlus(const XoRoShiRo128PlusPlus&) = delete;
+ XoRoShiRo128PlusPlus& operator=(const XoRoShiRo128PlusPlus&) = delete;
+
+ // allow moves
+ XoRoShiRo128PlusPlus(XoRoShiRo128PlusPlus&&) = default;
+ XoRoShiRo128PlusPlus& operator=(XoRoShiRo128PlusPlus&&) = default;
+
+ ~XoRoShiRo128PlusPlus() = default;
+
+ constexpr result_type operator()() noexcept
+ {
+ uint64_t s0 = m_s0, s1 = m_s1;
+ const uint64_t result = rotl(s0 + s1, 17) + s0;
+ s1 ^= s0;
+ m_s0 = rotl(s0, 49) ^ s1 ^ (s1 << 21);
+ m_s1 = rotl(s1, 28);
+ return result;
+ }
+
+ static constexpr result_type min() noexcept { return std::numeric_limits<result_type>::min(); }
+ static constexpr result_type max() noexcept { return std::numeric_limits<result_type>::max(); }
+ static constexpr double entropy() noexcept { return 0.0; }
+};
+
+#endif // BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 6b6bb18523..f0dcee7a9b 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -9,6 +9,7 @@
#include <hash.h> // For Hash()
#include <key.h> // For CKey
#include <sync.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
#include <util/getuniquepath.h>
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index 823c9877ac..4c8687ce69 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -11,6 +11,7 @@
#include <pow.h>
#include <random.h>
#include <script/standard.h>
+#include <test/util/random.h>
#include <test/util/script.h>
#include <test/util/setup_common.h>
#include <util/time.h>
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index c40481a95c..2078fcd8f8 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -8,6 +8,8 @@
#include <rpc/blockchain.h>
#include <sync.h>
#include <test/util/chainstate.h>
+#include <test/util/coins.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <uint256.h>
#include <validation.h>
@@ -24,20 +26,6 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
{
ChainstateManager& manager = *Assert(m_node.chainman);
CTxMemPool& mempool = *Assert(m_node.mempool);
-
- //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
- auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
- Coin newcoin;
- uint256 txid = InsecureRand256();
- COutPoint outp{txid, 0};
- newcoin.nHeight = 1;
- newcoin.out.nValue = InsecureRand32();
- newcoin.out.scriptPubKey.assign(uint32_t{56}, 1);
- coins_view.AddCoin(outp, std::move(newcoin), false);
-
- return outp;
- };
-
Chainstate& c1 = WITH_LOCK(cs_main, return manager.InitializeChainstate(&mempool));
c1.InitCoinsDB(
/*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false);
@@ -47,7 +35,7 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
// Add a coin to the in-memory cache, upsize once, then downsize.
{
LOCK(::cs_main);
- auto outpoint = add_coin(c1.CoinsTip());
+ const auto outpoint = AddTestCoin(c1.CoinsTip());
// Set a meaningless bestblock value in the coinsview cache - otherwise we won't
// flush during ResizecoinsCaches() and will subsequently hit an assertion.
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 56867a584b..78301c7c14 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -9,6 +9,7 @@
#include <rpc/blockchain.h>
#include <sync.h>
#include <test/util/chainstate.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <timedata.h>
#include <uint256.h>
@@ -374,6 +375,7 @@ struct SnapshotTestSetup : TestChain100Setup {
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 0);
const ChainstateManager::Options chainman_opts{
.chainparams = ::Params(),
+ .datadir = m_args.GetDataDirNet(),
.adjusted_time_callback = GetAdjustedTime,
};
// For robustness, ensure the old manager is destroyed before creating a
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index f2ff570ca6..26c48eb0e0 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -3,6 +3,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//
#include <sync.h>
+#include <test/util/coins.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <validation.h>
@@ -24,19 +26,6 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
LOCK(::cs_main);
auto& view = chainstate.CoinsTip();
- //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
- auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
- Coin newcoin;
- uint256 txid = InsecureRand256();
- COutPoint outp{txid, 0};
- newcoin.nHeight = 1;
- newcoin.out.nValue = InsecureRand32();
- newcoin.out.scriptPubKey.assign(uint32_t{56}, 1);
- coins_view.AddCoin(outp, std::move(newcoin), false);
-
- return outp;
- };
-
// The number of bytes consumed by coin's heap data, i.e. CScript
// (prevector<28, unsigned char>) when assigned 56 bytes of data per above.
//
@@ -61,7 +50,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Add a bunch of coins to see that we at least flip over to CRITICAL.
for (int i{0}; i < 1000; ++i) {
- COutPoint res = add_coin(view);
+ const COutPoint res = AddTestCoin(view);
BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
}
@@ -83,7 +72,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
constexpr int COINS_UNTIL_CRITICAL{3};
for (int i{0}; i < COINS_UNTIL_CRITICAL; ++i) {
- COutPoint res = add_coin(view);
+ const COutPoint res = AddTestCoin(view);
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
BOOST_CHECK_EQUAL(
@@ -93,7 +82,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Adding some additional coins will push us over the edge to CRITICAL.
for (int i{0}; i < 4; ++i) {
- add_coin(view);
+ AddTestCoin(view);
print_view_mem_usage(view);
if (chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes=*/0) ==
CoinsCacheSizeState::CRITICAL) {
@@ -111,7 +100,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
CoinsCacheSizeState::OK);
for (int i{0}; i < 3; ++i) {
- add_coin(view);
+ AddTestCoin(view);
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes=*/1 << 10),
@@ -120,7 +109,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Adding another coin with the additional mempool room will put us >90%
// but not yet critical.
- add_coin(view);
+ AddTestCoin(view);
print_view_mem_usage(view);
// Only perform these checks on 64 bit hosts; I haven't done the math for 32.
@@ -136,7 +125,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Using the default max_* values permits way more coins to be added.
for (int i{0}; i < 1000; ++i) {
- add_coin(view);
+ AddTestCoin(view);
BOOST_CHECK_EQUAL(
chainstate.GetCoinsCacheSizeState(),
CoinsCacheSizeState::OK);
diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp
index 4a42cec4af..80c00036e7 100644
--- a/src/test/versionbits_tests.cpp
+++ b/src/test/versionbits_tests.cpp
@@ -5,6 +5,7 @@
#include <chain.h>
#include <chainparams.h>
#include <consensus/params.h>
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <versionbits.h>
@@ -13,7 +14,7 @@
/* Define a virtual block time, one block per 10 minutes after Nov 14 2014, 0:55:36am */
static int32_t TestTime(int nHeight) { return 1415926536 + 600 * nHeight; }
-static const std::string StateName(ThresholdState state)
+static std::string StateName(ThresholdState state)
{
switch (state) {
case ThresholdState::DEFINED: return "DEFINED";
diff --git a/src/test/xoroshiro128plusplus_tests.cpp b/src/test/xoroshiro128plusplus_tests.cpp
new file mode 100644
index 0000000000..ea1b3e355f
--- /dev/null
+++ b/src/test/xoroshiro128plusplus_tests.cpp
@@ -0,0 +1,29 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/util/setup_common.h>
+#include <test/util/xoroshiro128plusplus.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(xoroshiro128plusplus_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(reference_values)
+{
+ // numbers generated from reference implementation
+ XoRoShiRo128PlusPlus rng(0);
+ BOOST_TEST(0x6f68e1e7e2646ee1 == rng());
+ BOOST_TEST(0xbf971b7f454094ad == rng());
+ BOOST_TEST(0x48f2de556f30de38 == rng());
+ BOOST_TEST(0x6ea7c59f89bbfc75 == rng());
+
+ // seed with a random number
+ rng = XoRoShiRo128PlusPlus(0x1a26f3fa8546b47a);
+ BOOST_TEST(0xc8dc5e08d844ac7d == rng());
+ BOOST_TEST(0x5b5f1f6d499dad1b == rng());
+ BOOST_TEST(0xbeb0031f93313d6f == rng());
+ BOOST_TEST(0xbfbcf4f43a264497 == rng());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/tinyformat.h b/src/tinyformat.h
index 8eded00add..3ec385bc95 100644
--- a/src/tinyformat.h
+++ b/src/tinyformat.h
@@ -508,9 +508,6 @@ class FormatArg
{
public:
FormatArg()
- : m_value(nullptr),
- m_formatImpl(nullptr),
- m_toIntImpl(nullptr)
{ }
template<typename T>
@@ -549,10 +546,10 @@ class FormatArg
return convertToInt<T>::invoke(*static_cast<const T*>(value));
}
- const void* m_value;
+ const void* m_value{nullptr};
void (*m_formatImpl)(std::ostream& out, const char* fmtBegin,
- const char* fmtEnd, int ntrunc, const void* value);
- int (*m_toIntImpl)(const void* value);
+ const char* fmtEnd, int ntrunc, const void* value){nullptr};
+ int (*m_toIntImpl)(const void* value){nullptr};
};
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index b5f1fa7138..d4daeacd3e 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -53,8 +53,8 @@ static const uint16_t DEFAULT_TOR_SOCKS_PORT = 9050;
/****** Low-level TorControlConnection ********/
-TorControlConnection::TorControlConnection(struct event_base *_base):
- base(_base), b_conn(nullptr)
+TorControlConnection::TorControlConnection(struct event_base* _base)
+ : base(_base)
{
}
@@ -380,7 +380,7 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe
}
Assume(resolved.IsValid());
- LogPrint(BCLog::TOR, "Configuring onion proxy for %s\n", resolved.ToStringIPPort());
+ LogPrint(BCLog::TOR, "Configuring onion proxy for %s\n", resolved.ToStringAddrPort());
Proxy addrOnion = Proxy(resolved, true);
SetProxy(NET_ONION, addrOnion);
@@ -421,7 +421,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
return;
}
service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort());
- LogPrintfCategory(BCLog::TOR, "Got service ID %s, advertising service %s\n", service_id, service.ToString());
+ LogPrintfCategory(BCLog::TOR, "Got service ID %s, advertising service %s\n", service_id, service.ToStringAddrPort());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
LogPrint(BCLog::TOR, "Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
} else {
@@ -453,7 +453,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
}
// Request onion service, redirect port.
// Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports.
- _conn.Command(strprintf("ADD_ONION %s Port=%i,%s", private_key, Params().GetDefaultPort(), m_target.ToStringIPPort()),
+ _conn.Command(strprintf("ADD_ONION %s Port=%i,%s", private_key, Params().GetDefaultPort(), m_target.ToStringAddrPort()),
std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2));
} else {
LogPrintf("tor: Authentication failed\n");
diff --git a/src/torcontrol.h b/src/torcontrol.h
index 81475aee74..6563a2ef42 100644
--- a/src/torcontrol.h
+++ b/src/torcontrol.h
@@ -93,7 +93,7 @@ private:
/** Libevent event base */
struct event_base *base;
/** Connection to control socket */
- struct bufferevent *b_conn;
+ struct bufferevent* b_conn{nullptr};
/** Message being received */
TorControlReply message;
/** Response handlers */
diff --git a/src/txdb.cpp b/src/txdb.cpp
index f04a4e9800..7257fb4959 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -70,21 +70,22 @@ struct CoinEntry {
} // namespace
-CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) :
- m_db(std::make_unique<CDBWrapper>(ldb_path, nCacheSize, fMemory, fWipe, true)),
- m_ldb_path(ldb_path),
- m_is_memory(fMemory) { }
+CCoinsViewDB::CCoinsViewDB(DBParams db_params, CoinsViewOptions options) :
+ m_db_params{std::move(db_params)},
+ m_options{std::move(options)},
+ m_db{std::make_unique<CDBWrapper>(m_db_params)} { }
void CCoinsViewDB::ResizeCache(size_t new_cache_size)
{
// We can't do this operation with an in-memory DB since we'll lose all the coins upon
// reset.
- if (!m_is_memory) {
+ if (!m_db_params.memory_only) {
// Have to do a reset first to get the original `m_db` state to release its
// filesystem lock.
m_db.reset();
- m_db = std::make_unique<CDBWrapper>(
- m_ldb_path, new_cache_size, m_is_memory, /*fWipe=*/false, /*obfuscate=*/true);
+ m_db_params.cache_bytes = new_cache_size;
+ m_db_params.wipe_data = false;
+ m_db = std::make_unique<CDBWrapper>(m_db_params);
}
}
@@ -111,12 +112,10 @@ std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
return vhashHeadBlocks;
}
-bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
+bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase) {
CDBBatch batch(*m_db);
size_t count = 0;
size_t changed = 0;
- size_t batch_size = (size_t)gArgs.GetIntArg("-dbbatchsize", nDefaultDbBatchSize);
- int crash_simulate = gArgs.GetIntArg("-dbcrashratio", 0);
assert(!hashBlock.IsNull());
uint256 old_tip = GetBestBlock();
@@ -146,15 +145,14 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
changed++;
}
count++;
- CCoinsMap::iterator itOld = it++;
- mapCoins.erase(itOld);
- if (batch.SizeEstimate() > batch_size) {
+ it = erase ? mapCoins.erase(it) : std::next(it);
+ if (batch.SizeEstimate() > m_options.batch_write_bytes) {
LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
m_db->WriteBatch(batch);
batch.Clear();
- if (crash_simulate) {
+ if (m_options.simulate_crash_ratio) {
static FastRandomContext rng;
- if (rng.randrange(crash_simulate) == 0) {
+ if (rng.randrange(m_options.simulate_crash_ratio) == 0) {
LogPrintf("Simulating a crash. Goodbye.\n");
_Exit(0);
}
@@ -177,9 +175,6 @@ size_t CCoinsViewDB::EstimateSize() const
return m_db->EstimateSize(DB_COIN, uint8_t(DB_COIN + 1));
}
-CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(gArgs.GetDataDirNet() / "blocks" / "index", nCacheSize, fMemory, fWipe) {
-}
-
bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) {
return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
}
diff --git a/src/txdb.h b/src/txdb.h
index 5a409d7dcc..8a876349fb 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -45,24 +45,30 @@ static const int64_t max_filter_index_cache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
+//! User-controlled performance and debug options.
+struct CoinsViewOptions {
+ //! Maximum database write batch size in bytes.
+ size_t batch_write_bytes = nDefaultDbBatchSize;
+ //! If non-zero, randomly exit when the database is flushed with (1/ratio)
+ //! probability.
+ int simulate_crash_ratio = 0;
+};
+
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
protected:
+ DBParams m_db_params;
+ CoinsViewOptions m_options;
std::unique_ptr<CDBWrapper> m_db;
- fs::path m_ldb_path;
- bool m_is_memory;
public:
- /**
- * @param[in] ldb_path Location in the filesystem where leveldb data will be stored.
- */
- explicit CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe);
+ explicit CCoinsViewDB(DBParams db_params, CoinsViewOptions options);
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
std::vector<uint256> GetHeadBlocks() const override;
- bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
+ bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, bool erase = true) override;
std::unique_ptr<CCoinsViewCursor> Cursor() const override;
//! Whether an unsupported database format is used.
@@ -80,8 +86,7 @@ public:
class CBlockTreeDB : public CDBWrapper
{
public:
- explicit CBlockTreeDB(size_t nCacheSize, bool fMemory = false, bool fWipe = false);
-
+ using CDBWrapper::CDBWrapper;
bool WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info);
bool ReadLastBlockFile(int &nFile);
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index aa04f8a4d0..378123ce0f 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -1128,7 +1128,7 @@ void CTxMemPool::SetLoadTried(bool load_tried)
}
-const std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept
+std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept
{
switch (r) {
case MemPoolRemovalReason::EXPIRY: return "expiry";
diff --git a/src/txmempool.h b/src/txmempool.h
index 51b8af3286..2c3cb7e9db 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -237,7 +237,7 @@ enum class MemPoolRemovalReason {
REPLACED, //!< Removed for replacement
};
-const std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept;
+std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept;
/**
* CTxMemPool stores valid-according-to-the-current-best-chain transactions
diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp
index 94f64abca7..19f9fae998 100644
--- a/src/txorphanage.cpp
+++ b/src/txorphanage.cpp
@@ -145,17 +145,19 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans)
if (nEvicted > 0) LogPrint(BCLog::MEMPOOL, "orphanage overflow, removed %u tx\n", nEvicted);
}
-void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx, NodeId peer)
+void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx)
{
LOCK(m_mutex);
- // Get this peer's work set, emplacing an empty set it didn't exist
- std::set<uint256>& orphan_work_set = m_peer_work_set.try_emplace(peer).first->second;
for (unsigned int i = 0; i < tx.vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
+ // Get this source peer's work set, emplacing an empty set if it didn't exist
+ // (note: if this peer wasn't still connected, we would have removed the orphan tx already)
+ std::set<uint256>& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second;
+ // Add this tx to the work set
orphan_work_set.insert(elem->first);
}
}
@@ -172,7 +174,7 @@ bool TxOrphanage::HaveTx(const GenTxid& gtxid) const
}
}
-CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer, NodeId& originator, bool& more)
+CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer)
{
LOCK(m_mutex);
@@ -185,16 +187,25 @@ CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer, NodeId& originator,
const auto orphan_it = m_orphans.find(txid);
if (orphan_it != m_orphans.end()) {
- more = !work_set.empty();
- originator = orphan_it->second.fromPeer;
return orphan_it->second.tx;
}
}
}
- more = false;
return nullptr;
}
+bool TxOrphanage::HaveTxToReconsider(NodeId peer)
+{
+ LOCK(m_mutex);
+
+ auto work_set_it = m_peer_work_set.find(peer);
+ if (work_set_it != m_peer_work_set.end()) {
+ auto& work_set = work_set_it->second;
+ return !work_set.empty();
+ }
+ return false;
+}
+
void TxOrphanage::EraseForBlock(const CBlock& block)
{
LOCK(m_mutex);
diff --git a/src/txorphanage.h b/src/txorphanage.h
index cd7587fab5..45276c6c98 100644
--- a/src/txorphanage.h
+++ b/src/txorphanage.h
@@ -27,13 +27,11 @@ public:
bool HaveTx(const GenTxid& gtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/** Extract a transaction from a peer's work set
- * Returns nullptr and sets more to false if there are no transactions
- * to work on. Otherwise returns the transaction reference, removes
- * the transaction from the work set, and populates its arguments with
- * the originating peer, and whether there are more orphans for this peer
- * to work on after this tx.
+ * Returns nullptr if there are no transactions to work on.
+ * Otherwise returns the transaction reference, and removes
+ * it from the work set.
*/
- CTransactionRef GetTxToReconsider(NodeId peer, NodeId& originator, bool& more) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+ CTransactionRef GetTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
/** Erase an orphan by txid */
int EraseTx(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
@@ -47,8 +45,11 @@ public:
/** Limit the orphanage to the given maximum */
void LimitOrphans(unsigned int max_orphans) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
- /** Add any orphans that list a particular tx as a parent into a peer's work set */
- void AddChildrenToWorkSet(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+ /** Add any orphans that list a particular tx as a parent into the from peer's work set */
+ void AddChildrenToWorkSet(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);;
+
+ /** Does this peer have any work to do? */
+ bool HaveTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);;
/** Return how many entries exist in the orphange */
size_t Size() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
@@ -72,7 +73,7 @@ protected:
* -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
std::map<uint256, OrphanTx> m_orphans GUARDED_BY(m_mutex);
- /** Which peer provided a parent tx of orphans that need to be reconsidered */
+ /** Which peer provided the orphans that need to be reconsidered */
std::map<NodeId, std::set<uint256>> m_peer_work_set GUARDED_BY(m_mutex);
using OrphanMap = decltype(m_orphans);
diff --git a/src/uint256.cpp b/src/uint256.cpp
index cd9cbb566a..7f81c3c448 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -7,15 +7,6 @@
#include <util/strencodings.h>
-#include <string.h>
-
-template <unsigned int BITS>
-base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch)
-{
- assert(vch.size() == sizeof(m_data));
- memcpy(m_data, vch.data(), sizeof(m_data));
-}
-
template <unsigned int BITS>
std::string base_blob<BITS>::GetHex() const
{
@@ -29,7 +20,7 @@ std::string base_blob<BITS>::GetHex() const
template <unsigned int BITS>
void base_blob<BITS>::SetHex(const char* psz)
{
- memset(m_data, 0, sizeof(m_data));
+ std::fill(m_data.begin(), m_data.end(), 0);
// skip leading spaces
while (IsSpace(*psz))
@@ -43,7 +34,7 @@ void base_blob<BITS>::SetHex(const char* psz)
size_t digits = 0;
while (::HexDigit(psz[digits]) != -1)
digits++;
- unsigned char* p1 = (unsigned char*)m_data;
+ unsigned char* p1 = m_data.data();
unsigned char* pend = p1 + WIDTH;
while (digits > 0 && p1 < pend) {
*p1 = ::HexDigit(psz[--digits]);
@@ -67,14 +58,12 @@ std::string base_blob<BITS>::ToString() const
}
// Explicit instantiations for base_blob<160>
-template base_blob<160>::base_blob(const std::vector<unsigned char>&);
template std::string base_blob<160>::GetHex() const;
template std::string base_blob<160>::ToString() const;
template void base_blob<160>::SetHex(const char*);
template void base_blob<160>::SetHex(const std::string&);
// Explicit instantiations for base_blob<256>
-template base_blob<256>::base_blob(const std::vector<unsigned char>&);
template std::string base_blob<256>::GetHex() const;
template std::string base_blob<256>::ToString() const;
template void base_blob<256>::SetHex(const char*);
diff --git a/src/uint256.h b/src/uint256.h
index 58e595c4ca..1cc3721487 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -9,11 +9,12 @@
#include <crypto/common.h>
#include <span.h>
-#include <assert.h>
+#include <algorithm>
+#include <array>
+#include <cassert>
#include <cstring>
#include <stdint.h>
#include <string>
-#include <vector>
/** Template base class for fixed-sized opaque blobs. */
template<unsigned int BITS>
@@ -21,7 +22,9 @@ class base_blob
{
protected:
static constexpr int WIDTH = BITS / 8;
- uint8_t m_data[WIDTH];
+ std::array<uint8_t, WIDTH> m_data;
+ static_assert(WIDTH == sizeof(m_data), "Sanity check");
+
public:
/* construct 0 value by default */
constexpr base_blob() : m_data() {}
@@ -29,64 +32,47 @@ public:
/* constructor for constants between 1 and 255 */
constexpr explicit base_blob(uint8_t v) : m_data{v} {}
- explicit base_blob(const std::vector<unsigned char>& vch);
+ constexpr explicit base_blob(Span<const unsigned char> vch)
+ {
+ assert(vch.size() == WIDTH);
+ std::copy(vch.begin(), vch.end(), m_data.begin());
+ }
- bool IsNull() const
+ constexpr bool IsNull() const
{
- for (int i = 0; i < WIDTH; i++)
- if (m_data[i] != 0)
- return false;
- return true;
+ return std::all_of(m_data.begin(), m_data.end(), [](uint8_t val) {
+ return val == 0;
+ });
}
- void SetNull()
+ constexpr void SetNull()
{
- memset(m_data, 0, sizeof(m_data));
+ std::fill(m_data.begin(), m_data.end(), 0);
}
- inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); }
+ constexpr int Compare(const base_blob& other) const { return std::memcmp(m_data.data(), other.m_data.data(), WIDTH); }
- friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
- friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
- friend inline bool operator<(const base_blob& a, const base_blob& b) { return a.Compare(b) < 0; }
+ friend constexpr bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
+ friend constexpr bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
+ friend constexpr bool operator<(const base_blob& a, const base_blob& b) { return a.Compare(b) < 0; }
std::string GetHex() const;
void SetHex(const char* psz);
void SetHex(const std::string& str);
std::string ToString() const;
- const unsigned char* data() const { return m_data; }
- unsigned char* data() { return m_data; }
+ constexpr const unsigned char* data() const { return m_data.data(); }
+ constexpr unsigned char* data() { return m_data.data(); }
- unsigned char* begin()
- {
- return &m_data[0];
- }
+ constexpr unsigned char* begin() { return m_data.data(); }
+ constexpr unsigned char* end() { return m_data.data() + WIDTH; }
- unsigned char* end()
- {
- return &m_data[WIDTH];
- }
+ constexpr const unsigned char* begin() const { return m_data.data(); }
+ constexpr const unsigned char* end() const { return m_data.data() + WIDTH; }
- const unsigned char* begin() const
- {
- return &m_data[0];
- }
+ static constexpr unsigned int size() { return WIDTH; }
- const unsigned char* end() const
- {
- return &m_data[WIDTH];
- }
-
- static constexpr unsigned int size()
- {
- return sizeof(m_data);
- }
-
- uint64_t GetUint64(int pos) const
- {
- return ReadLE64(m_data + pos * 8);
- }
+ constexpr uint64_t GetUint64(int pos) const { return ReadLE64(m_data.data() + pos * 8); }
template<typename Stream>
void Serialize(Stream& s) const
@@ -107,8 +93,8 @@ public:
*/
class uint160 : public base_blob<160> {
public:
- constexpr uint160() {}
- explicit uint160(const std::vector<unsigned char>& vch) : base_blob<160>(vch) {}
+ constexpr uint160() = default;
+ constexpr explicit uint160(Span<const unsigned char> vch) : base_blob<160>(vch) {}
};
/** 256-bit opaque blob.
@@ -118,9 +104,9 @@ public:
*/
class uint256 : public base_blob<256> {
public:
- constexpr uint256() {}
+ constexpr uint256() = default;
constexpr explicit uint256(uint8_t v) : base_blob<256>(v) {}
- explicit uint256(const std::vector<unsigned char>& vch) : base_blob<256>(vch) {}
+ constexpr explicit uint256(Span<const unsigned char> vch) : base_blob<256>(vch) {}
static const uint256 ZERO;
static const uint256 ONE;
};
diff --git a/src/univalue/include/univalue_utffilter.h b/src/univalue/include/univalue_utffilter.h
index f688eaaa30..41d8e6bb05 100644
--- a/src/univalue/include/univalue_utffilter.h
+++ b/src/univalue/include/univalue_utffilter.h
@@ -13,8 +13,8 @@
class JSONUTF8StringFilter
{
public:
- explicit JSONUTF8StringFilter(std::string &s):
- str(s), is_valid(true), codepoint(0), state(0), surpair(0)
+ explicit JSONUTF8StringFilter(std::string& s)
+ : str(s)
{
}
// Write single 8-bit char (may be part of UTF-8 sequence)
@@ -79,10 +79,10 @@ public:
}
private:
std::string &str;
- bool is_valid;
+ bool is_valid{true};
// Current UTF-8 decoding state
- unsigned int codepoint;
- int state; // Top bit to be filled in for next UTF-8 byte, or 0
+ unsigned int codepoint{0};
+ int state{0}; // Top bit to be filled in for next UTF-8 byte, or 0
// Keep track of the following state to handle the following section of
// RFC4627:
@@ -94,7 +94,7 @@ private:
// "\uD834\uDD1E".
//
// Two subsequent \u.... may have to be replaced with one actual codepoint.
- unsigned int surpair; // First half of open UTF-16 surrogate pair, or 0
+ unsigned int surpair{0}; // First half of open UTF-16 surrogate pair, or 0
void append_codepoint(unsigned int codepoint_)
{
diff --git a/src/util/check.cpp b/src/util/check.cpp
index 34b9d376a7..795dce7124 100644
--- a/src/util/check.cpp
+++ b/src/util/check.cpp
@@ -14,8 +14,9 @@
#include <cstdio>
#include <cstdlib>
#include <string>
+#include <string_view>
-std::string StrFormatInternalBug(const char* msg, const char* file, int line, const char* func)
+std::string StrFormatInternalBug(std::string_view msg, std::string_view file, int line, std::string_view func)
{
return strprintf("Internal bug detected: \"%s\"\n%s:%d (%s)\n"
"%s %s\n"
@@ -23,12 +24,12 @@ std::string StrFormatInternalBug(const char* msg, const char* file, int line, co
msg, file, line, func, PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT);
}
-NonFatalCheckError::NonFatalCheckError(const char* msg, const char* file, int line, const char* func)
+NonFatalCheckError::NonFatalCheckError(std::string_view msg, std::string_view file, int line, std::string_view func)
: std::runtime_error{StrFormatInternalBug(msg, file, line, func)}
{
}
-void assertion_fail(const char* file, int line, const char* func, const char* assertion)
+void assertion_fail(std::string_view file, int line, std::string_view func, std::string_view assertion)
{
auto str = strprintf("%s:%s %s: Assertion `%s' failed.\n", file, line, func, assertion);
fwrite(str.data(), 1, str.size(), stderr);
diff --git a/src/util/check.h b/src/util/check.h
index 96cd905d47..7ddcebf506 100644
--- a/src/util/check.h
+++ b/src/util/check.h
@@ -8,14 +8,16 @@
#include <attributes.h>
#include <stdexcept>
+#include <string>
+#include <string_view>
#include <utility>
-std::string StrFormatInternalBug(const char* msg, const char* file, int line, const char* func);
+std::string StrFormatInternalBug(std::string_view msg, std::string_view file, int line, std::string_view func);
class NonFatalCheckError : public std::runtime_error
{
public:
- NonFatalCheckError(const char* msg, const char* file, int line, const char* func);
+ NonFatalCheckError(std::string_view msg, std::string_view file, int line, std::string_view func);
};
#define STR_INTERNAL_BUG(msg) StrFormatInternalBug((msg), __FILE__, __LINE__, __func__)
@@ -49,7 +51,7 @@ T&& inline_check_non_fatal(LIFETIMEBOUND T&& val, const char* file, int line, co
#endif
/** Helper for Assert() */
-void assertion_fail(const char* file, int line, const char* func, const char* assertion);
+void assertion_fail(std::string_view file, int line, std::string_view func, std::string_view assertion);
/** Helper for Assert()/Assume() */
template <bool IS_ASSERT, typename T>
diff --git a/src/util/fees.cpp b/src/util/fees.cpp
index cbefe18dbb..8ada02ce54 100644
--- a/src/util/fees.cpp
+++ b/src/util/fees.cpp
@@ -49,7 +49,7 @@ std::string FeeModes(const std::string& delimiter)
return Join(FeeModeMap(), delimiter, [&](const std::pair<std::string, FeeEstimateMode>& i) { return i.first; });
}
-const std::string InvalidEstimateModeErrorMessage()
+std::string InvalidEstimateModeErrorMessage()
{
return "Invalid estimate_mode parameter, must be one of: \"" + FeeModes("\", \"") + "\"";
}
diff --git a/src/util/fees.h b/src/util/fees.h
index 9ef2389d3e..10ba1e4f85 100644
--- a/src/util/fees.h
+++ b/src/util/fees.h
@@ -13,6 +13,6 @@ enum class FeeReason;
bool FeeModeFromString(const std::string& mode_string, FeeEstimateMode& fee_estimate_mode);
std::string StringForFeeReason(FeeReason reason);
std::string FeeModes(const std::string& delimiter);
-const std::string InvalidEstimateModeErrorMessage();
+std::string InvalidEstimateModeErrorMessage();
#endif // BITCOIN_UTIL_FEES_H
diff --git a/src/util/hasher.cpp b/src/util/hasher.cpp
index a3a3f7a429..81e9b990e1 100644
--- a/src/util/hasher.cpp
+++ b/src/util/hasher.cpp
@@ -9,7 +9,10 @@
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand<uint64_t>()), k1(GetRand<uint64_t>()) {}
-SaltedOutpointHasher::SaltedOutpointHasher() : k0(GetRand<uint64_t>()), k1(GetRand<uint64_t>()) {}
+SaltedOutpointHasher::SaltedOutpointHasher(bool deterministic) :
+ k0(deterministic ? 0x8e819f2607a18de6 : GetRand<uint64_t>()),
+ k1(deterministic ? 0xf4020d2e3983b0eb : GetRand<uint64_t>())
+{}
SaltedSipHasher::SaltedSipHasher() : m_k0(GetRand<uint64_t>()), m_k1(GetRand<uint64_t>()) {}
diff --git a/src/util/hasher.h b/src/util/hasher.h
index 82d278b086..506ae9415d 100644
--- a/src/util/hasher.h
+++ b/src/util/hasher.h
@@ -36,7 +36,7 @@ private:
const uint64_t k0, k1;
public:
- SaltedOutpointHasher();
+ SaltedOutpointHasher(bool deterministic = false);
/**
* Having the hash noexcept allows libstdc++'s unordered_map to recalculate
diff --git a/src/util/sock.h b/src/util/sock.h
index adcca377e3..6bac2dfd34 100644
--- a/src/util/sock.h
+++ b/src/util/sock.h
@@ -181,9 +181,9 @@ public:
* Auxiliary requested/occurred events to wait for in `WaitMany()`.
*/
struct Events {
- explicit Events(Event req) : requested{req}, occurred{0} {}
+ explicit Events(Event req) : requested{req} {}
Event requested;
- Event occurred;
+ Event occurred{0};
};
struct HashSharedPtrSock {
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 309595ff5b..e72c970157 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -242,7 +242,7 @@ static std::optional<util::SettingsValue> InterpretValue(const KeyInfo& key, con
ArgsManager::ArgsManager() = default;
ArgsManager::~ArgsManager() = default;
-const std::set<std::string> ArgsManager::GetUnsuitableSectionOnlyArgs() const
+std::set<std::string> ArgsManager::GetUnsuitableSectionOnlyArgs() const
{
std::set<std::string> unsuitables;
@@ -262,7 +262,7 @@ const std::set<std::string> ArgsManager::GetUnsuitableSectionOnlyArgs() const
return unsuitables;
}
-const std::list<SectionInfo> ArgsManager::GetUnrecognizedSections() const
+std::list<SectionInfo> ArgsManager::GetUnrecognizedSections() const
{
// Section names to be recognized in the config file.
static const std::set<std::string> available_sections{
@@ -1360,6 +1360,11 @@ void SetupEnvironment()
SetConsoleCP(CP_UTF8);
SetConsoleOutputCP(CP_UTF8);
#endif
+
+#ifndef WIN32
+ constexpr mode_t private_umask = 0077;
+ umask(private_umask);
+#endif
}
bool SetupNetworking()
diff --git a/src/util/system.h b/src/util/system.h
index 671491f2ff..c053adf8c3 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -250,12 +250,12 @@ protected:
* on the command line or in a network-specific section in the
* config file.
*/
- const std::set<std::string> GetUnsuitableSectionOnlyArgs() const;
+ std::set<std::string> GetUnsuitableSectionOnlyArgs() const;
/**
* Log warnings for unrecognized section names in the config file.
*/
- const std::list<SectionInfo> GetUnrecognizedSections() const;
+ std::list<SectionInfo> GetUnrecognizedSections() const;
struct Command {
/** The command (if one has been registered with AddCommand), or empty */
diff --git a/src/validation.cpp b/src/validation.cpp
index b42b398619..1357de3c01 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -76,8 +76,6 @@ using node::BlockManager;
using node::BlockMap;
using node::CBlockIndexHeightOnlyComparator;
using node::CBlockIndexWorkComparator;
-using node::fImporting;
-using node::fPruneMode;
using node::fReindex;
using node::ReadBlockFromDisk;
using node::SnapshotMetadata;
@@ -1513,13 +1511,9 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
return nSubsidy;
}
-CoinsViews::CoinsViews(
- fs::path ldb_name,
- size_t cache_size_bytes,
- bool in_memory,
- bool should_wipe) : m_dbview(
- gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
- m_catcherview(&m_dbview) {}
+CoinsViews::CoinsViews(DBParams db_params, CoinsViewOptions options)
+ : m_dbview{std::move(db_params), std::move(options)},
+ m_catcherview(&m_dbview) {}
void CoinsViews::InitCache()
{
@@ -1548,7 +1542,14 @@ void Chainstate::InitCoinsDB(
}
m_coins_views = std::make_unique<CoinsViews>(
- leveldb_name, cache_size_bytes, in_memory, should_wipe);
+ DBParams{
+ .path = m_chainman.m_options.datadir / leveldb_name,
+ .cache_bytes = cache_size_bytes,
+ .memory_only = in_memory,
+ .wipe_data = should_wipe,
+ .obfuscate = true,
+ .options = m_chainman.m_options.coins_db},
+ m_chainman.m_options.coins_view);
}
void Chainstate::InitCoinsCache(size_t cache_size_bytes)
@@ -1573,8 +1574,9 @@ bool Chainstate::IsInitialBlockDownload() const
LOCK(cs_main);
if (m_cached_finished_ibd.load(std::memory_order_relaxed))
return false;
- if (fImporting || fReindex)
+ if (m_chainman.m_blockman.LoadingBlocks()) {
return true;
+ }
if (m_chain.Tip() == nullptr)
return true;
if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) {
@@ -2411,7 +2413,7 @@ bool Chainstate::FlushStateToDisk(
CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(m_blockman.cs_LastBlockFile);
- if (fPruneMode && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
+ if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
// make sure we don't prune above any of the prune locks bestblocks
// pruning is height-based
int last_prune{m_chain.Height()}; // last height we can prune
@@ -4058,7 +4060,7 @@ CVerifyDB::~CVerifyDB()
uiInterface.ShowProgress("", 100, false);
}
-bool CVerifyDB::VerifyDB(
+VerifyDBResult CVerifyDB::VerifyDB(
Chainstate& chainstate,
const Consensus::Params& consensus_params,
CCoinsView& coinsview,
@@ -4067,7 +4069,7 @@ bool CVerifyDB::VerifyDB(
AssertLockHeld(cs_main);
if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) {
- return true;
+ return VerifyDBResult::SUCCESS;
}
// Verify blocks in the best chain
@@ -4082,7 +4084,9 @@ bool CVerifyDB::VerifyDB(
int nGoodTransactions = 0;
BlockValidationState state;
int reportDone = 0;
- LogPrintf("[0%%]..."); /* Continued */
+ bool skipped_no_block_data{false};
+ bool skipped_l3_checks{false};
+ LogPrintf("Verification progress: 0%%\n");
const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
@@ -4090,88 +4094,109 @@ bool CVerifyDB::VerifyDB(
const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone / 10) {
// report every 10% step
- LogPrintf("[%d%%]...", percentageDone); /* Continued */
+ LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
break;
}
- if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
+ if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning or running under an assumeutxo snapshot, only go
// back as far as we have data.
- LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
+ LogPrintf("VerifyDB(): block verification stopping at height %d (no data). This could be due to pruning or use of an assumeutxo snapshot.\n", pindex->nHeight);
+ skipped_no_block_data = true;
break;
}
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
- return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) {
- return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
- pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
+ LogPrintf("Verification error: found bad block at %d, hash=%s (%s)\n",
+ pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
if (!pindex->GetUndoPos().IsNull()) {
if (!UndoReadFromDisk(undo, pindex)) {
- return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
- if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
- assert(coins.GetBestBlock() == pindex->GetBlockHash());
- DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
- if (res == DISCONNECT_FAILED) {
- return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
- }
- if (res == DISCONNECT_UNCLEAN) {
- nGoodTransactions = 0;
- pindexFailure = pindex;
+ if (nCheckLevel >= 3) {
+ if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
+ assert(coins.GetBestBlock() == pindex->GetBlockHash());
+ DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
+ if (res == DISCONNECT_FAILED) {
+ LogPrintf("Verification error: irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
+ }
+ if (res == DISCONNECT_UNCLEAN) {
+ nGoodTransactions = 0;
+ pindexFailure = pindex;
+ } else {
+ nGoodTransactions += block.vtx.size();
+ }
} else {
- nGoodTransactions += block.vtx.size();
+ skipped_l3_checks = true;
}
}
- if (ShutdownRequested()) return true;
+ if (ShutdownRequested()) return VerifyDBResult::INTERRUPTED;
}
if (pindexFailure) {
- return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
+ LogPrintf("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
+ }
+ if (skipped_l3_checks) {
+ LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n");
}
// store block count as we move pindex at check level >= 4
int block_count = chainstate.m_chain.Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
- if (nCheckLevel >= 4) {
+ if (nCheckLevel >= 4 && !skipped_l3_checks) {
while (pindex != chainstate.m_chain.Tip()) {
const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
if (reportDone < percentageDone / 10) {
// report every 10% step
- LogPrintf("[%d%%]...", percentageDone); /* Continued */
+ LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
pindex = chainstate.m_chain.Next(pindex);
CBlock block;
- if (!ReadBlockFromDisk(block, pindex, consensus_params))
- return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
+ LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
+ }
if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
- return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
+ LogPrintf("Verification error: found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
+ return VerifyDBResult::CORRUPTED_BLOCK_DB;
}
- if (ShutdownRequested()) return true;
+ if (ShutdownRequested()) return VerifyDBResult::INTERRUPTED;
}
}
- LogPrintf("[DONE].\n");
- LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
+ LogPrintf("Verification: No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
- return true;
+ if (skipped_l3_checks) {
+ return VerifyDBResult::SKIPPED_L3_CHECKS;
+ }
+ if (skipped_no_block_data) {
+ return VerifyDBResult::SKIPPED_MISSING_BLOCKS;
+ }
+ return VerifyDBResult::SUCCESS;
}
/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
diff --git a/src/validation.h b/src/validation.h
index 7170467b00..36c6becf4f 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -349,12 +349,20 @@ bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consens
/** Return the sum of the work on a given set of headers */
arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader>& headers);
+enum class VerifyDBResult {
+ SUCCESS,
+ CORRUPTED_BLOCK_DB,
+ INTERRUPTED,
+ SKIPPED_L3_CHECKS,
+ SKIPPED_MISSING_BLOCKS,
+};
+
/** RAII wrapper for VerifyDB: Verify consistency of the block and coin databases */
class CVerifyDB {
public:
CVerifyDB();
~CVerifyDB();
- bool VerifyDB(
+ [[nodiscard]] VerifyDBResult VerifyDB(
Chainstate& chainstate,
const Consensus::Params& consensus_params,
CCoinsView& coinsview,
@@ -408,7 +416,7 @@ public:
//! state to disk, which should not be done until the health of the database is verified.
//!
//! All arguments forwarded onto CCoinsViewDB.
- CoinsViews(fs::path ldb_name, size_t cache_size_bytes, bool in_memory, bool should_wipe);
+ CoinsViews(DBParams db_params, CoinsViewOptions options);
//! Initialize the CCoinsViewCache member.
void InitCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 900cb0474a..d344c8bfbd 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -17,7 +17,7 @@
#include <unordered_map>
#include <utility>
-const std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept;
+std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept;
/**
* MainSignalsImpl manages a list of shared_ptr<CValidationInterface> callbacks.
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 4ec3ac2189..653115aa81 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -8,6 +8,7 @@
#include <wallet/bdb.h>
#include <wallet/db.h>
+#include <util/check.h>
#include <util/strencodings.h>
#include <util/translation.h>
@@ -220,17 +221,17 @@ BerkeleyEnvironment::BerkeleyEnvironment() : m_use_shared_memory(false)
fMockDb = true;
}
-BerkeleyBatch::SafeDbt::SafeDbt()
+SafeDbt::SafeDbt()
{
m_dbt.set_flags(DB_DBT_MALLOC);
}
-BerkeleyBatch::SafeDbt::SafeDbt(void* data, size_t size)
+SafeDbt::SafeDbt(void* data, size_t size)
: m_dbt(data, size)
{
}
-BerkeleyBatch::SafeDbt::~SafeDbt()
+SafeDbt::~SafeDbt()
{
if (m_dbt.get_data() != nullptr) {
// Clear memory, e.g. in case it was a private key
@@ -244,17 +245,17 @@ BerkeleyBatch::SafeDbt::~SafeDbt()
}
}
-const void* BerkeleyBatch::SafeDbt::get_data() const
+const void* SafeDbt::get_data() const
{
return m_dbt.get_data();
}
-uint32_t BerkeleyBatch::SafeDbt::get_size() const
+uint32_t SafeDbt::get_size() const
{
return m_dbt.get_size();
}
-BerkeleyBatch::SafeDbt::operator Dbt*()
+SafeDbt::operator Dbt*()
{
return &m_dbt;
}
@@ -307,7 +308,7 @@ BerkeleyDatabase::~BerkeleyDatabase()
}
}
-BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database)
+BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, bool fFlushOnCloseIn) : m_database(database)
{
database.AddRef();
database.Open();
@@ -398,7 +399,6 @@ void BerkeleyBatch::Close()
activeTxn->abort();
activeTxn = nullptr;
pdb = nullptr;
- CloseCursor();
if (fFlushOnClose)
Flush();
@@ -476,15 +476,15 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
fSuccess = false;
}
- if (db.StartCursor()) {
+ std::unique_ptr<DatabaseCursor> cursor = db.GetNewCursor();
+ if (cursor) {
while (fSuccess) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- bool complete;
- bool ret1 = db.ReadAtCursor(ssKey, ssValue, complete);
- if (complete) {
+ DataStream ssKey{};
+ DataStream ssValue{};
+ DatabaseCursor::Status ret1 = cursor->Next(ssKey, ssValue);
+ if (ret1 == DatabaseCursor::Status::DONE) {
break;
- } else if (!ret1) {
+ } else if (ret1 == DatabaseCursor::Status::FAIL) {
fSuccess = false;
break;
}
@@ -502,7 +502,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
if (ret2 > 0)
fSuccess = false;
}
- db.CloseCursor();
+ cursor.reset();
}
if (fSuccess) {
db.Close();
@@ -656,48 +656,52 @@ void BerkeleyDatabase::ReloadDbEnv()
env->ReloadDbEnv();
}
-bool BerkeleyBatch::StartCursor()
+BerkeleyCursor::BerkeleyCursor(BerkeleyDatabase& database)
{
- assert(!m_cursor);
- if (!pdb)
- return false;
- int ret = pdb->cursor(nullptr, &m_cursor, 0);
- return ret == 0;
+ if (!database.m_db.get()) {
+ throw std::runtime_error(STR_INTERNAL_BUG("BerkeleyDatabase does not exist"));
+ }
+ int ret = database.m_db->cursor(nullptr, &m_cursor, 0);
+ if (ret != 0) {
+ throw std::runtime_error(STR_INTERNAL_BUG(strprintf("BDB Cursor could not be created. Returned %d", ret)));
+ }
}
-bool BerkeleyBatch::ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete)
+DatabaseCursor::Status BerkeleyCursor::Next(DataStream& ssKey, DataStream& ssValue)
{
- complete = false;
- if (m_cursor == nullptr) return false;
+ if (m_cursor == nullptr) return Status::FAIL;
// Read at cursor
SafeDbt datKey;
SafeDbt datValue;
int ret = m_cursor->get(datKey, datValue, DB_NEXT);
if (ret == DB_NOTFOUND) {
- complete = true;
+ return Status::DONE;
+ }
+ if (ret != 0 || datKey.get_data() == nullptr || datValue.get_data() == nullptr) {
+ return Status::FAIL;
}
- if (ret != 0)
- return false;
- else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr)
- return false;
// Convert to streams
- ssKey.SetType(SER_DISK);
ssKey.clear();
ssKey.write({AsBytePtr(datKey.get_data()), datKey.get_size()});
- ssValue.SetType(SER_DISK);
ssValue.clear();
ssValue.write({AsBytePtr(datValue.get_data()), datValue.get_size()});
- return true;
+ return Status::MORE;
}
-void BerkeleyBatch::CloseCursor()
+BerkeleyCursor::~BerkeleyCursor()
{
if (!m_cursor) return;
m_cursor->close();
m_cursor = nullptr;
}
+std::unique_ptr<DatabaseCursor> BerkeleyBatch::GetNewCursor()
+{
+ if (!pdb) return nullptr;
+ return std::make_unique<BerkeleyCursor>(m_database);
+}
+
bool BerkeleyBatch::TxnBegin()
{
if (!pdb || activeTxn)
@@ -749,7 +753,7 @@ std::string BerkeleyDatabaseVersion()
return DbEnv::version(nullptr, nullptr, nullptr);
}
-bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
+bool BerkeleyBatch::ReadKey(DataStream&& key, DataStream& value)
{
if (!pdb)
return false;
@@ -765,7 +769,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
return false;
}
-bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
+bool BerkeleyBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
{
if (!pdb)
return false;
@@ -780,7 +784,7 @@ bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwr
return (ret == 0);
}
-bool BerkeleyBatch::EraseKey(CDataStream&& key)
+bool BerkeleyBatch::EraseKey(DataStream&& key)
{
if (!pdb)
return false;
@@ -793,7 +797,7 @@ bool BerkeleyBatch::EraseKey(CDataStream&& key)
return (ret == 0 || ret == DB_NOTFOUND);
}
-bool BerkeleyBatch::HasKey(CDataStream&& key)
+bool BerkeleyBatch::HasKey(DataStream&& key)
{
if (!pdb)
return false;
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index 40a1031c8e..06c98972b0 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -165,40 +165,51 @@ public:
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override;
};
-/** RAII class that provides access to a Berkeley database */
-class BerkeleyBatch : public DatabaseBatch
+/** RAII class that automatically cleanses its data on destruction */
+class SafeDbt final
{
- /** RAII class that automatically cleanses its data on destruction */
- class SafeDbt final
- {
- Dbt m_dbt;
+ Dbt m_dbt;
- public:
- // construct Dbt with internally-managed data
- SafeDbt();
- // construct Dbt with provided data
- SafeDbt(void* data, size_t size);
- ~SafeDbt();
+public:
+ // construct Dbt with internally-managed data
+ SafeDbt();
+ // construct Dbt with provided data
+ SafeDbt(void* data, size_t size);
+ ~SafeDbt();
+
+ // delegate to Dbt
+ const void* get_data() const;
+ uint32_t get_size() const;
+
+ // conversion operator to access the underlying Dbt
+ operator Dbt*();
+};
- // delegate to Dbt
- const void* get_data() const;
- uint32_t get_size() const;
+class BerkeleyCursor : public DatabaseCursor
+{
+private:
+ Dbc* m_cursor;
- // conversion operator to access the underlying Dbt
- operator Dbt*();
- };
+public:
+ explicit BerkeleyCursor(BerkeleyDatabase& database);
+ ~BerkeleyCursor() override;
+
+ Status Next(DataStream& key, DataStream& value) override;
+};
+/** RAII class that provides access to a Berkeley database */
+class BerkeleyBatch : public DatabaseBatch
+{
private:
- bool ReadKey(CDataStream&& key, CDataStream& value) override;
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override;
- bool EraseKey(CDataStream&& key) override;
- bool HasKey(CDataStream&& key) override;
+ bool ReadKey(DataStream&& key, DataStream& value) override;
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override;
+ bool EraseKey(DataStream&& key) override;
+ bool HasKey(DataStream&& key) override;
protected:
- Db* pdb;
+ Db* pdb{nullptr};
std::string strFile;
- DbTxn* activeTxn;
- Dbc* m_cursor;
+ DbTxn* activeTxn{nullptr};
bool fReadOnly;
bool fFlushOnClose;
BerkeleyEnvironment *env;
@@ -214,9 +225,7 @@ public:
void Flush() override;
void Close() override;
- bool StartCursor() override;
- bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override;
- void CloseCursor() override;
+ std::unique_ptr<DatabaseCursor> GetNewCursor() override;
bool TxnBegin() override;
bool TxnCommit() override;
bool TxnAbort() override;
diff --git a/src/wallet/db.h b/src/wallet/db.h
index f09844c37e..d4c590fac7 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -22,14 +22,33 @@ struct bilingual_str;
namespace wallet {
void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename);
+class DatabaseCursor
+{
+public:
+ explicit DatabaseCursor() {}
+ virtual ~DatabaseCursor() {}
+
+ DatabaseCursor(const DatabaseCursor&) = delete;
+ DatabaseCursor& operator=(const DatabaseCursor&) = delete;
+
+ enum class Status
+ {
+ FAIL,
+ MORE,
+ DONE,
+ };
+
+ virtual Status Next(DataStream& key, DataStream& value) { return Status::FAIL; }
+};
+
/** RAII class that provides access to a WalletDatabase */
class DatabaseBatch
{
private:
- virtual bool ReadKey(CDataStream&& key, CDataStream& value) = 0;
- virtual bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) = 0;
- virtual bool EraseKey(CDataStream&& key) = 0;
- virtual bool HasKey(CDataStream&& key) = 0;
+ virtual bool ReadKey(DataStream&& key, DataStream& value) = 0;
+ virtual bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) = 0;
+ virtual bool EraseKey(DataStream&& key) = 0;
+ virtual bool HasKey(DataStream&& key) = 0;
public:
explicit DatabaseBatch() {}
@@ -44,7 +63,7 @@ public:
template <typename K, typename T>
bool Read(const K& key, T& value)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -61,7 +80,7 @@ public:
template <typename K, typename T>
bool Write(const K& key, const T& value, bool fOverwrite = true)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -75,7 +94,7 @@ public:
template <typename K>
bool Erase(const K& key)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
@@ -85,16 +104,14 @@ public:
template <typename K>
bool Exists(const K& key)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
ssKey.reserve(1000);
ssKey << key;
return HasKey(std::move(ssKey));
}
- virtual bool StartCursor() = 0;
- virtual bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) = 0;
- virtual void CloseCursor() = 0;
+ virtual std::unique_ptr<DatabaseCursor> GetNewCursor() = 0;
virtual bool TxnBegin() = 0;
virtual bool TxnCommit() = 0;
virtual bool TxnAbort() = 0;
@@ -106,7 +123,7 @@ class WalletDatabase
{
public:
/** Create dummy DB handle */
- WalletDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0) {}
+ WalletDatabase() : nUpdateCounter(0) {}
virtual ~WalletDatabase() {};
/** Open the database if it is not already opened. */
@@ -148,30 +165,33 @@ public:
virtual std::string Format() = 0;
std::atomic<unsigned int> nUpdateCounter;
- unsigned int nLastSeen;
- unsigned int nLastFlushed;
- int64_t nLastWalletUpdate;
+ unsigned int nLastSeen{0};
+ unsigned int nLastFlushed{0};
+ int64_t nLastWalletUpdate{0};
/** Make a DatabaseBatch connected to this database */
virtual std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) = 0;
};
+class DummyCursor : public DatabaseCursor
+{
+ Status Next(DataStream& key, DataStream& value) override { return Status::FAIL; }
+};
+
/** RAII class that provides access to a DummyDatabase. Never fails. */
class DummyBatch : public DatabaseBatch
{
private:
- bool ReadKey(CDataStream&& key, CDataStream& value) override { return true; }
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return true; }
- bool EraseKey(CDataStream&& key) override { return true; }
- bool HasKey(CDataStream&& key) override { return true; }
+ bool ReadKey(DataStream&& key, DataStream& value) override { return true; }
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override { return true; }
+ bool EraseKey(DataStream&& key) override { return true; }
+ bool HasKey(DataStream&& key) override { return true; }
public:
void Flush() override {}
void Close() override {}
- bool StartCursor() override { return true; }
- bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override { return true; }
- void CloseCursor() override {}
+ std::unique_ptr<DatabaseCursor> GetNewCursor() override { return std::make_unique<DummyCursor>(); }
bool TxnBegin() override { return true; }
bool TxnCommit() override { return true; }
bool TxnAbort() override { return true; }
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index efa548ad91..69208c19dc 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -5,6 +5,7 @@
#include <wallet/dump.h>
#include <fs.h>
+#include <util/system.h>
#include <util/translation.h>
#include <wallet/wallet.h>
@@ -47,7 +48,8 @@ bool DumpWallet(const ArgsManager& args, CWallet& wallet, bilingual_str& error)
std::unique_ptr<DatabaseBatch> batch = db.MakeBatch();
bool ret = true;
- if (!batch->StartCursor()) {
+ std::unique_ptr<DatabaseCursor> cursor = batch->GetNewCursor();
+ if (!cursor) {
error = _("Error: Couldn't create cursor into database");
ret = false;
}
@@ -66,15 +68,15 @@ bool DumpWallet(const ArgsManager& args, CWallet& wallet, bilingual_str& error)
// Read the records
while (true) {
- CDataStream ss_key(SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(SER_DISK, CLIENT_VERSION);
- bool complete;
- ret = batch->ReadAtCursor(ss_key, ss_value, complete);
- if (complete) {
+ DataStream ss_key{};
+ DataStream ss_value{};
+ DatabaseCursor::Status status = cursor->Next(ss_key, ss_value);
+ if (status == DatabaseCursor::Status::DONE) {
ret = true;
break;
- } else if (!ret) {
+ } else if (status == DatabaseCursor::Status::FAIL) {
error = _("Error reading next record from wallet database");
+ ret = false;
break;
}
std::string key_str = HexStr(ss_key);
@@ -85,7 +87,7 @@ bool DumpWallet(const ArgsManager& args, CWallet& wallet, bilingual_str& error)
}
}
- batch->CloseCursor();
+ cursor.reset();
batch.reset();
// Close the wallet after we're done with it. The caller won't be doing this
@@ -201,7 +203,7 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
// dummy chain interface
bool ret = true;
- std::shared_ptr<CWallet> wallet(new CWallet(/*chain=*/nullptr, name, gArgs, std::move(database)), WalletToolReleaseWallet);
+ std::shared_ptr<CWallet> wallet(new CWallet(/*chain=*/nullptr, name, std::move(database)), WalletToolReleaseWallet);
{
LOCK(wallet->cs_wallet);
DBErrors load_wallet_ret = wallet->LoadWallet();
@@ -254,8 +256,8 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
std::vector<unsigned char> k = ParseHex(key);
std::vector<unsigned char> v = ParseHex(value);
- CDataStream ss_key(k, SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(v, SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{k};
+ DataStream ss_value{v};
if (!batch->Write(ss_key, ss_value)) {
error = strprintf(_("Error: Unable to write record to new wallet"));
diff --git a/src/wallet/external_signer_scriptpubkeyman.h b/src/wallet/external_signer_scriptpubkeyman.h
index 9918979a81..01dc80b1ca 100644
--- a/src/wallet/external_signer_scriptpubkeyman.h
+++ b/src/wallet/external_signer_scriptpubkeyman.h
@@ -13,11 +13,11 @@ namespace wallet {
class ExternalSignerScriptPubKeyMan : public DescriptorScriptPubKeyMan
{
public:
- ExternalSignerScriptPubKeyMan(WalletStorage& storage, WalletDescriptor& descriptor)
- : DescriptorScriptPubKeyMan(storage, descriptor)
+ ExternalSignerScriptPubKeyMan(WalletStorage& storage, WalletDescriptor& descriptor, int64_t keypool_size)
+ : DescriptorScriptPubKeyMan(storage, descriptor, keypool_size)
{}
- ExternalSignerScriptPubKeyMan(WalletStorage& storage)
- : DescriptorScriptPubKeyMan(storage)
+ ExternalSignerScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size)
+ : DescriptorScriptPubKeyMan(storage, keypool_size)
{}
/** Provide a descriptor at setup time
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index bd158b5985..37a704bfa4 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -155,7 +155,7 @@ bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid)
}
Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCoinControl& coin_control, std::vector<bilingual_str>& errors,
- CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine)
+ CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine, const std::vector<CTxOut>& outputs)
{
// We are going to modify coin control later, copy to re-use
CCoinControl new_coin_control(coin_control);
@@ -222,11 +222,19 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
return result;
}
- // Fill in recipients(and preserve a single change key if there is one)
- // While we're here, calculate the output amount
- std::vector<CRecipient> recipients;
+ // Calculate the old output amount.
CAmount output_value = 0;
- for (const auto& output : wtx.tx->vout) {
+ for (const auto& old_output : wtx.tx->vout) {
+ output_value += old_output.nValue;
+ }
+
+ old_fee = input_value - output_value;
+
+ // Fill in recipients (and preserve a single change key if there
+ // is one). If outputs vector is non-empty, replace original
+ // outputs with its contents, otherwise use original outputs.
+ std::vector<CRecipient> recipients;
+ for (const auto& output : outputs.empty() ? wtx.tx->vout : outputs) {
if (!OutputIsChange(wallet, output)) {
CRecipient recipient = {output.scriptPubKey, output.nValue, false};
recipients.push_back(recipient);
@@ -235,11 +243,8 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
ExtractDestination(output.scriptPubKey, change_dest);
new_coin_control.destChange = change_dest;
}
- output_value += output.nValue;
}
- old_fee = input_value - output_value;
-
if (coin_control.m_feerate) {
// The user provided a feeRate argument.
// We calculate this here to avoid compiler warning on the cs_wallet lock
diff --git a/src/wallet/feebumper.h b/src/wallet/feebumper.h
index a96871b26f..53cf16e0f1 100644
--- a/src/wallet/feebumper.h
+++ b/src/wallet/feebumper.h
@@ -51,7 +51,8 @@ Result CreateRateBumpTransaction(CWallet& wallet,
CAmount& old_fee,
CAmount& new_fee,
CMutableTransaction& mtx,
- bool require_mine);
+ bool require_mine,
+ const std::vector<CTxOut>& outputs);
//! Sign the new transaction,
//! @return false if the tx couldn't be found or if it was
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 773f094274..5403e38950 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -122,9 +122,6 @@ bool WalletInit::ParameterInteraction() const
return InitError(Untranslated("-zapwallettxes has been removed. If you are attempting to remove a stuck transaction from your wallet, please use abandontransaction instead."));
}
- if (gArgs.GetBoolArg("-sysperms", false))
- return InitError(Untranslated("-sysperms is not allowed in combination with enabled wallet functionality"));
-
return true;
}
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index 68dd3da9b5..1a76e46c54 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -291,7 +291,8 @@ public:
CAmount& new_fee,
CMutableTransaction& mtx) override
{
- return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx, /* require_mine= */ true) == feebumper::Result::OK;
+ std::vector<CTxOut> outputs; // just an empty list of new recipients for now
+ return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx, /* require_mine= */ true, outputs) == feebumper::Result::OK;
}
bool signBumpTransaction(CMutableTransaction& mtx) override { return feebumper::SignTransaction(*m_wallet.get(), mtx); }
bool commitBumpTransaction(const uint256& txid,
diff --git a/src/wallet/rpc/addresses.cpp b/src/wallet/rpc/addresses.cpp
index 95e1ba4dd9..da63d45d11 100644
--- a/src/wallet/rpc/addresses.cpp
+++ b/src/wallet/rpc/addresses.cpp
@@ -226,7 +226,7 @@ RPCHelpMan addmultisigaddress()
{"key", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "bitcoin address or hex-encoded public key"},
},
},
- {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "A label to assign the addresses to."},
+ {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A label to assign the addresses to."},
{"address_type", RPCArg::Type::STR, RPCArg::DefaultHint{"set by -addresstype"}, "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
@@ -696,7 +696,7 @@ RPCHelpMan listlabels()
return RPCHelpMan{"listlabels",
"\nReturns the list of all labels, or labels that are assigned to addresses with a specific purpose.\n",
{
- {"purpose", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Address purpose to list labels for ('send','receive'). An empty string is the same as not providing this argument."},
+ {"purpose", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Address purpose to list labels for ('send','receive'). An empty string is the same as not providing this argument."},
},
RPCResult{
RPCResult::Type::ARR, "", "",
diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp
index ab46706084..744537cfbd 100644
--- a/src/wallet/rpc/backup.cpp
+++ b/src/wallet/rpc/backup.cpp
@@ -6,6 +6,7 @@
#include <clientversion.h>
#include <core_io.h>
#include <fs.h>
+#include <hash.h>
#include <interfaces/chain.h>
#include <key_io.h>
#include <merkleblock.h>
@@ -14,8 +15,8 @@
#include <script/script.h>
#include <script/standard.h>
#include <sync.h>
+#include <uint256.h>
#include <util/bip32.h>
-#include <util/system.h>
#include <util/time.h>
#include <util/translation.h>
#include <wallet/rpc/util.h>
@@ -334,7 +335,7 @@ RPCHelpMan importprunedfunds()
}
uint256 hashTx = tx.GetHash();
- CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION);
+ DataStream ssMB{ParseHexV(request.params[1], "proof")};
CMerkleBlock merkleBlock;
ssMB >> merkleBlock;
@@ -886,9 +887,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d
}
case TxoutType::WITNESS_V0_SCRIPTHASH: {
if (script_ctx == ScriptContext::WITNESS_V0) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Trying to nest P2WSH inside another P2WSH");
- uint256 fullid(solverdata[0]);
- CScriptID id;
- CRIPEMD160().Write(fullid.begin(), fullid.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(solverdata[0])};
auto subscript = std::move(import_data.witnessscript); // Remove redeemscript from import_data to check for superfluous script later.
if (!subscript) return "missing witnessscript";
if (CScriptID(*subscript) != id) return "witnessScript does not match the scriptPubKey or redeemScript";
@@ -1292,7 +1291,7 @@ RPCHelpMan importmulti()
},
},
RPCArgOptions{.oneline_description="\"requests\""}},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
{"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Scan the chain and mempool for wallet transactions after all imports."},
},
@@ -1478,7 +1477,7 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c
} else {
warnings.push_back("Range not given, using default keypool range");
range_start = 0;
- range_end = gArgs.GetIntArg("-keypool", DEFAULT_KEYPOOL_SIZE);
+ range_end = wallet.m_keypool_size;
}
next_index = range_start;
@@ -1651,10 +1650,14 @@ RPCHelpMan importdescriptors()
}
WalletRescanReserver reserver(*pwallet);
- if (!reserver.reserve()) {
+ if (!reserver.reserve(/*with_passphrase=*/true)) {
throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait.");
}
+ // Ensure that the wallet is not locked for the remainder of this RPC, as
+ // the passphrase is used to top up the keypool.
+ LOCK(pwallet->m_relock_mutex);
+
const UniValue& requests = main_request.params[0];
const int64_t minimum_timestamp = 1;
int64_t now = 0;
@@ -1891,7 +1894,7 @@ RPCHelpMan restorewallet()
{
{"wallet_name", RPCArg::Type::STR, RPCArg::Optional::NO, "The name that will be applied to the restored wallet"},
{"backup_file", RPCArg::Type::STR, RPCArg::Optional::NO, "The backup file that will be used to restore the wallet."},
- {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED_NAMED_ARG, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
+ {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp
index 82642194c2..4c386789f1 100644
--- a/src/wallet/rpc/coins.cpp
+++ b/src/wallet/rpc/coins.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <core_io.h>
+#include <hash.h>
#include <key_io.h>
#include <rpc/util.h>
#include <util/moneystr.h>
@@ -165,7 +166,7 @@ RPCHelpMan getbalance()
"The available balance is what the wallet considers currently spendable, and is\n"
"thus affected by options which limit spendability such as -spendzeroconfchange.\n",
{
- {"dummy", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Remains for backward compatibility. Must be excluded or set to \"*\"."},
+ {"dummy", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Remains for backward compatibility. Must be excluded or set to \"*\"."},
{"minconf", RPCArg::Type::NUM, RPCArg::Default{0}, "Only include transactions confirmed at least this many times."},
{"include_watchonly", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Also include balance in watch-only addresses (see 'importaddress')"},
{"avoid_reuse", RPCArg::Type::BOOL, RPCArg::Default{true}, "(only available if avoid_reuse wallet flag is set) Do not include balance in dirty outputs; addresses are considered dirty if they have previously been used in a transaction."},
@@ -509,7 +510,7 @@ RPCHelpMan listunspent()
},
{"include_unsafe", RPCArg::Type::BOOL, RPCArg::Default{true}, "Include outputs that are not safe to spend\n"
"See description of \"safe\" attribute below."},
- {"query_options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "JSON with query options",
+ {"query_options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "JSON with query options",
{
{"minimumAmount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(0)}, "Minimum value of each UTXO in " + CURRENCY_UNIT + ""},
{"maximumAmount", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"unlimited"}, "Maximum value of each UTXO in " + CURRENCY_UNIT + ""},
@@ -679,8 +680,7 @@ RPCHelpMan listunspent()
CHECK_NONFATAL(extracted);
// Also return the witness script
const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(witness_destination);
- CScriptID id;
- CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(whash)};
CScript witnessScript;
if (provider->GetCScript(id, witnessScript)) {
entry.pushKV("witnessScript", HexStr(witnessScript));
@@ -689,8 +689,7 @@ RPCHelpMan listunspent()
}
} else if (scriptPubKey.IsPayToWitnessScriptHash()) {
const WitnessV0ScriptHash& whash = std::get<WitnessV0ScriptHash>(address);
- CScriptID id;
- CRIPEMD160().Write(whash.begin(), whash.size()).Finalize(id.begin());
+ CScriptID id{RIPEMD160(whash)};
CScript witnessScript;
if (provider->GetCScript(id, witnessScript)) {
entry.pushKV("witnessScript", HexStr(witnessScript));
diff --git a/src/wallet/rpc/encrypt.cpp b/src/wallet/rpc/encrypt.cpp
index fcf25e01d6..0226d15698 100644
--- a/src/wallet/rpc/encrypt.cpp
+++ b/src/wallet/rpc/encrypt.cpp
@@ -49,9 +49,7 @@ RPCHelpMan walletpassphrase()
// Note that the walletpassphrase is stored in request.params[0] which is not mlock()ed
SecureString strWalletPass;
strWalletPass.reserve(100);
- // TODO: get rid of this .c_str() by implementing SecureString::operator=(std::string)
- // Alternately, find a way to make request.params[0] mlock()'d to begin with.
- strWalletPass = request.params[0].get_str().c_str();
+ strWalletPass = std::string_view{request.params[0].get_str()};
// Get the timeout
nSleepTime = request.params[1].getInt<int64_t>();
@@ -70,7 +68,17 @@ RPCHelpMan walletpassphrase()
}
if (!pwallet->Unlock(strWalletPass)) {
- throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The wallet passphrase entered was incorrect.");
+ // Check if the passphrase has a null character (see #27067 for details)
+ if (strWalletPass.find('\0') == std::string::npos) {
+ throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The wallet passphrase entered was incorrect.");
+ } else {
+ throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The wallet passphrase entered is incorrect. "
+ "It contains a null character (ie - a zero byte). "
+ "If the passphrase was set with a version of this software prior to 25.0, "
+ "please try again with only the characters up to — but not including — "
+ "the first null character. If this is successful, please set a new "
+ "passphrase to avoid this issue in the future.");
+ }
}
pwallet->TopUpKeyPool();
@@ -90,7 +98,7 @@ RPCHelpMan walletpassphrase()
std::weak_ptr<CWallet> weak_wallet = wallet;
pwallet->chain().rpcRunLater(strprintf("lockwallet(%s)", pwallet->GetName()), [weak_wallet, relock_time] {
if (auto shared_wallet = weak_wallet.lock()) {
- LOCK(shared_wallet->cs_wallet);
+ LOCK2(shared_wallet->m_relock_mutex, shared_wallet->cs_wallet);
// Skip if this is not the most recent rpcRunLater callback.
if (shared_wallet->nRelockTime != relock_time) return;
shared_wallet->Lock();
@@ -122,28 +130,39 @@ RPCHelpMan walletpassphrasechange()
std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request);
if (!pwallet) return UniValue::VNULL;
- LOCK(pwallet->cs_wallet);
-
if (!pwallet->IsCrypted()) {
throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE, "Error: running with an unencrypted wallet, but walletpassphrasechange was called.");
}
- // TODO: get rid of these .c_str() calls by implementing SecureString::operator=(std::string)
- // Alternately, find a way to make request.params[0] mlock()'d to begin with.
+ if (pwallet->IsScanningWithPassphrase()) {
+ throw JSONRPCError(RPC_WALLET_ERROR, "Error: the wallet is currently being used to rescan the blockchain for related transactions. Please call `abortrescan` before changing the passphrase.");
+ }
+
+ LOCK2(pwallet->m_relock_mutex, pwallet->cs_wallet);
+
SecureString strOldWalletPass;
strOldWalletPass.reserve(100);
- strOldWalletPass = request.params[0].get_str().c_str();
+ strOldWalletPass = std::string_view{request.params[0].get_str()};
SecureString strNewWalletPass;
strNewWalletPass.reserve(100);
- strNewWalletPass = request.params[1].get_str().c_str();
+ strNewWalletPass = std::string_view{request.params[1].get_str()};
if (strOldWalletPass.empty() || strNewWalletPass.empty()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "passphrase cannot be empty");
}
if (!pwallet->ChangeWalletPassphrase(strOldWalletPass, strNewWalletPass)) {
- throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The wallet passphrase entered was incorrect.");
+ // Check if the old passphrase had a null character (see #27067 for details)
+ if (strOldWalletPass.find('\0') == std::string::npos) {
+ throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The wallet passphrase entered was incorrect.");
+ } else {
+ throw JSONRPCError(RPC_WALLET_PASSPHRASE_INCORRECT, "Error: The old wallet passphrase entered is incorrect. "
+ "It contains a null character (ie - a zero byte). "
+ "If the old passphrase was set with a version of this software prior to 25.0, "
+ "please try again with only the characters up to — but not including — "
+ "the first null character.");
+ }
}
return UniValue::VNULL;
@@ -175,12 +194,16 @@ RPCHelpMan walletlock()
std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request);
if (!pwallet) return UniValue::VNULL;
- LOCK(pwallet->cs_wallet);
-
if (!pwallet->IsCrypted()) {
throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE, "Error: running with an unencrypted wallet, but walletlock was called.");
}
+ if (pwallet->IsScanningWithPassphrase()) {
+ throw JSONRPCError(RPC_WALLET_ERROR, "Error: the wallet is currently being used to rescan the blockchain for related transactions. Please call `abortrescan` before locking the wallet.");
+ }
+
+ LOCK2(pwallet->m_relock_mutex, pwallet->cs_wallet);
+
pwallet->Lock();
pwallet->nRelockTime = 0;
@@ -219,8 +242,6 @@ RPCHelpMan encryptwallet()
std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request);
if (!pwallet) return UniValue::VNULL;
- LOCK(pwallet->cs_wallet);
-
if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
throw JSONRPCError(RPC_WALLET_ENCRYPTION_FAILED, "Error: wallet does not contain private keys, nothing to encrypt.");
}
@@ -229,11 +250,15 @@ RPCHelpMan encryptwallet()
throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE, "Error: running with an encrypted wallet, but encryptwallet was called.");
}
- // TODO: get rid of this .c_str() by implementing SecureString::operator=(std::string)
- // Alternately, find a way to make request.params[0] mlock()'d to begin with.
+ if (pwallet->IsScanningWithPassphrase()) {
+ throw JSONRPCError(RPC_WALLET_ERROR, "Error: the wallet is currently being used to rescan the blockchain for related transactions. Please call `abortrescan` before encrypting the wallet.");
+ }
+
+ LOCK2(pwallet->m_relock_mutex, pwallet->cs_wallet);
+
SecureString strWalletPass;
strWalletPass.reserve(100);
- strWalletPass = request.params[0].get_str().c_str();
+ strWalletPass = std::string_view{request.params[0].get_str()};
if (strWalletPass.empty()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "passphrase cannot be empty");
diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp
index f7fd6e479f..88ee6e96b0 100644
--- a/src/wallet/rpc/spend.cpp
+++ b/src/wallet/rpc/spend.cpp
@@ -217,9 +217,9 @@ RPCHelpMan sendtoaddress()
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to send to."},
{"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The amount in " + CURRENCY_UNIT + " to send. eg 0.1"},
- {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "A comment used to store what the transaction is for.\n"
+ {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A comment used to store what the transaction is for.\n"
"This is not part of the transaction, just kept in your wallet."},
- {"comment_to", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "A comment to store the name of the person or organization\n"
+ {"comment_to", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A comment to store the name of the person or organization\n"
"to which you're sending the transaction. This is not part of the \n"
"transaction, just kept in your wallet."},
{"subtractfeefromamount", RPCArg::Type::BOOL, RPCArg::Default{false}, "The fee will be deducted from the amount being sent.\n"
@@ -326,9 +326,9 @@ RPCHelpMan sendmany()
{"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The bitcoin address is the key, the numeric amount (can be string) in " + CURRENCY_UNIT + " is the value"},
},
},
- {"minconf", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "Ignored dummy value"},
- {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "A comment"},
- {"subtractfeefrom", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The addresses.\n"
+ {"minconf", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "Ignored dummy value"},
+ {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A comment"},
+ {"subtractfeefrom", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "The addresses.\n"
"The fee will be equally deducted from the amount of each selected address.\n"
"Those recipients will receive less bitcoins than you enter in their corresponding amount field.\n"
"If no addresses are specified here, the sender pays the fee.",
@@ -462,7 +462,7 @@ static std::vector<RPCArg> FundTxDoc(bool solving_data = true)
},
};
if (solving_data) {
- args.push_back({"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
+ args.push_back({"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
"Used for fee estimation during coin selection.",
{
{
@@ -758,7 +758,7 @@ RPCHelpMan fundrawtransaction()
"Only pay-to-pubkey, multisig, and P2SH versions thereof are currently supported for watch-only\n",
{
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "for backward compatibility: passing in a true instead of an object will result in {\"includeWatching\":true}",
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "for backward compatibility: passing in a true instead of an object will result in {\"includeWatching\":true}",
Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::Default{true}, "For a transaction with existing inputs, automatically include more if they are not enough."},
@@ -784,7 +784,7 @@ RPCHelpMan fundrawtransaction()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
- {"input_weights", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "Inputs and their corresponding weights",
+ {"input_weights", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "Inputs and their corresponding weights",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -870,7 +870,7 @@ RPCHelpMan signrawtransactionwithwallet()
HELP_REQUIRING_PASSPHRASE,
{
{"hexstring", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction hex string"},
- {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The previous dependent transaction outputs",
+ {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "The previous dependent transaction outputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -956,6 +956,26 @@ RPCHelpMan signrawtransactionwithwallet()
};
}
+// Definition of allowed formats of specifying transaction outputs in
+// `bumpfee`, `psbtbumpfee`, `send` and `walletcreatefundedpsbt` RPCs.
+static std::vector<RPCArg> OutputsDoc()
+{
+ return
+ {
+ {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
+ {
+ {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address,\n"
+ "the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
+ },
+ },
+ {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
+ {
+ {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
+ },
+ },
+ };
+}
+
static RPCHelpMan bumpfee_helper(std::string method_name)
{
const bool want_psbt = method_name == "psbtbumpfee";
@@ -977,7 +997,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
"* WARNING: before version 0.21, fee_rate was in " + CURRENCY_UNIT + "/kvB. As of 0.21, fee_rate is in " + CURRENCY_ATOM + "/vB. *\n",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The txid to be bumped"},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
{"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks\n"},
{"fee_rate", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"not set, fall back to wallet fee estimation"},
@@ -992,7 +1012,12 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
"still be replaceable in practice, for example if it has unconfirmed ancestors which\n"
"are replaceable).\n"},
{"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, "The fee estimate mode, must be one of (case insensitive):\n"
- "\"" + FeeModes("\"\n\"") + "\""},
+ "\"" + FeeModes("\"\n\"") + "\""},
+ {"outputs", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "New outputs (key-value pairs) which will replace\n"
+ "the original ones, if provided. Each address can only appear once and there can\n"
+ "only be one \"data\" object.\n",
+ OutputsDoc(),
+ RPCArgOptions{.skip_type_check = true}},
},
RPCArgOptions{.oneline_description="options"}},
},
@@ -1029,6 +1054,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
coin_control.fAllowWatchOnly = pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
// optional parameters
coin_control.m_signal_bip125_rbf = true;
+ std::vector<CTxOut> outputs;
if (!request.params[1].isNull()) {
UniValue options = request.params[1];
@@ -1039,6 +1065,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
{"fee_rate", UniValueType()}, // will be checked by AmountFromValue() in SetFeeEstimateMode()
{"replaceable", UniValueType(UniValue::VBOOL)},
{"estimate_mode", UniValueType(UniValue::VSTR)},
+ {"outputs", UniValueType()}, // will be checked by AddOutputs()
},
true, true);
@@ -1052,6 +1079,16 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
coin_control.m_signal_bip125_rbf = options["replaceable"].get_bool();
}
SetFeeEstimateMode(*pwallet, coin_control, conf_target, options["estimate_mode"], options["fee_rate"], /*override_min_fee=*/false);
+
+ // Prepare new outputs by creating a temporary tx and calling AddOutputs().
+ if (!options["outputs"].isNull()) {
+ if (options["outputs"].isArray() && options["outputs"].empty()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument cannot be an empty array");
+ }
+ CMutableTransaction tempTx;
+ AddOutputs(tempTx, options["outputs"]);
+ outputs = tempTx.vout;
+ }
}
// Make sure the results are valid at least up to the most recent block
@@ -1069,7 +1106,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
CMutableTransaction mtx;
feebumper::Result res;
// Targeting feerate bump.
- res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt);
+ res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt, outputs);
if (res != feebumper::Result::OK) {
switch(res) {
case feebumper::Result::INVALID_ADDRESS_OR_KEY:
@@ -1144,24 +1181,13 @@ RPCHelpMan send()
{"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The outputs (key-value pairs), where none of the keys are duplicated.\n"
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For convenience, a dictionary, which holds the key-value pairs directly, is also accepted.",
- {
- {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
- {
- {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address, the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
- },
- },
- {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
- {
- {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
- },
- },
- },
+ OutputsDoc(),
RPCArgOptions{.skip_type_check = true}},
{"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
{"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, "The fee estimate mode, must be one of (case insensitive):\n"
"\"" + FeeModes("\"\n\"") + "\""},
{"fee_rate", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"not set, fall back to wallet fee estimation"}, "Specify a fee rate in " + CURRENCY_ATOM + "/vB."},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::DefaultHint{"false when \"inputs\" are specified, true otherwise"},"Automatically include coins from the wallet to cover the target amount.\n"},
@@ -1276,7 +1302,7 @@ RPCHelpMan sendall()
"\"" + FeeModes("\"\n\"") + "\""},
{"fee_rate", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"not set, fall back to wallet fee estimation"}, "Specify a fee rate in " + CURRENCY_ATOM + "/vB."},
{
- "options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ "options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
Cat<std::vector<RPCArg>>(
{
{"add_to_wallet", RPCArg::Type::BOOL, RPCArg::Default{true}, "When false, returns the serialized transaction without broadcasting or adding it to the wallet"},
@@ -1586,7 +1612,7 @@ RPCHelpMan walletcreatefundedpsbt()
"All existing inputs must either have their previous output transaction be in the wallet\n"
"or be in the UTXO set. Solving data must be provided for non-wallet inputs.\n",
{
- {"inputs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "Leave empty to add inputs automatically. See add_inputs option.",
+ {"inputs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "Leave empty to add inputs automatically. See add_inputs option.",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -1606,21 +1632,10 @@ RPCHelpMan walletcreatefundedpsbt()
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For compatibility reasons, a dictionary, which holds the key-value pairs directly, is also\n"
"accepted as second parameter.",
- {
- {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
- {
- {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address, the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
- },
- },
- {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
- {
- {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
- },
- },
- },
- RPCArgOptions{.skip_type_check = true}},
+ OutputsDoc(),
+ RPCArgOptions{.skip_type_check = true}},
{"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::DefaultHint{"false when \"inputs\" are specified, true otherwise"}, "Automatically include coins from the wallet to cover the target amount.\n"},
diff --git a/src/wallet/rpc/transactions.cpp b/src/wallet/rpc/transactions.cpp
index f571f8bcb2..3bfe296d90 100644
--- a/src/wallet/rpc/transactions.cpp
+++ b/src/wallet/rpc/transactions.cpp
@@ -206,7 +206,7 @@ RPCHelpMan listreceivedbyaddress()
{"minconf", RPCArg::Type::NUM, RPCArg::Default{1}, "The minimum number of confirmations before payments are included."},
{"include_empty", RPCArg::Type::BOOL, RPCArg::Default{false}, "Whether to include addresses that haven't received any payments."},
{"include_watchonly", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Whether to include watch-only addresses (see 'importaddress')"},
- {"address_filter", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If present and non-empty, only return information on this address."},
+ {"address_filter", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "If present and non-empty, only return information on this address."},
{"include_immature_coinbase", RPCArg::Type::BOOL, RPCArg::Default{false}, "Include immature coinbase transactions."},
},
RPCResult{
@@ -397,7 +397,7 @@ static void ListTransactions(const CWallet& wallet, const CWalletTx& wtx, int nM
}
-static const std::vector<RPCResult> TransactionDescriptionString()
+static std::vector<RPCResult> TransactionDescriptionString()
{
return{{RPCResult::Type::NUM, "confirmations", "The number of confirmations for the transaction. Negative confirmations means the\n"
"transaction conflicted that many blocks ago."},
@@ -434,7 +434,7 @@ RPCHelpMan listtransactions()
"\nIf a label name is provided, this will return only incoming transactions paying to addresses with the specified label.\n"
"\nReturns up to 'count' most recent transactions skipping the first 'from' transactions.\n",
{
- {"label|dummy", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If set, should be a valid label name to return only incoming transactions\n"
+ {"label|dummy", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "If set, should be a valid label name to return only incoming transactions\n"
"with the specified label, or \"*\" to disable filtering and return all transactions."},
{"count", RPCArg::Type::NUM, RPCArg::Default{10}, "The number of transactions to return"},
{"skip", RPCArg::Type::NUM, RPCArg::Default{0}, "The number of transactions to skip"},
@@ -545,13 +545,13 @@ RPCHelpMan listsinceblock()
"If \"blockhash\" is no longer a part of the main chain, transactions from the fork point onward are included.\n"
"Additionally, if include_removed is set, transactions affecting the wallet which were removed are returned in the \"removed\" array.\n",
{
- {"blockhash", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If set, the block hash to list transactions since, otherwise list all transactions."},
+ {"blockhash", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "If set, the block hash to list transactions since, otherwise list all transactions."},
{"target_confirmations", RPCArg::Type::NUM, RPCArg::Default{1}, "Return the nth block hash from the main chain. e.g. 1 would mean the best block hash. Note: this is not used as a filter, but only affects [lastblock] in the return value"},
{"include_watchonly", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Include transactions to watch-only addresses (see 'importaddress')"},
{"include_removed", RPCArg::Type::BOOL, RPCArg::Default{true}, "Show transactions that were removed due to a reorg in the \"removed\" array\n"
"(not guaranteed to work on pruned nodes)"},
{"include_change", RPCArg::Type::BOOL, RPCArg::Default{false}, "Also add entries for change outputs.\n"},
- {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Return only incoming transactions paying to addresses with the specified label.\n"},
+ {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Return only incoming transactions paying to addresses with the specified label.\n"},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -848,7 +848,7 @@ RPCHelpMan rescanblockchain()
"and block filters are available (using startup option \"-blockfilterindex=1\").\n",
{
{"start_height", RPCArg::Type::NUM, RPCArg::Default{0}, "block height where the rescan should start"},
- {"stop_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "the last block height that should be scanned. If none is provided it will rescan up to the tip at return time of this call."},
+ {"stop_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "the last block height that should be scanned. If none is provided it will rescan up to the tip at return time of this call."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -872,15 +872,18 @@ RPCHelpMan rescanblockchain()
wallet.BlockUntilSyncedToCurrentChain();
WalletRescanReserver reserver(*pwallet);
- if (!reserver.reserve()) {
+ if (!reserver.reserve(/*with_passphrase=*/true)) {
throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait.");
}
int start_height = 0;
std::optional<int> stop_height;
uint256 start_block;
+
+ LOCK(pwallet->m_relock_mutex);
{
LOCK(pwallet->cs_wallet);
+ EnsureWalletIsUnlocked(*pwallet);
int tip_height = pwallet->GetLastBlockHeight();
if (!request.params[0].isNull()) {
diff --git a/src/wallet/rpc/util.cpp b/src/wallet/rpc/util.cpp
index 31435a69ba..4d82e0a41f 100644
--- a/src/wallet/rpc/util.cpp
+++ b/src/wallet/rpc/util.cpp
@@ -6,6 +6,7 @@
#include <common/url.h>
#include <rpc/util.h>
+#include <util/system.h>
#include <util/translation.h>
#include <wallet/context.h>
#include <wallet/wallet.h>
diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp
index 63be95fdd3..16595267b4 100644
--- a/src/wallet/rpc/wallet.cpp
+++ b/src/wallet/rpc/wallet.cpp
@@ -201,7 +201,7 @@ static RPCHelpMan loadwallet()
"\napplied to the new wallet.\n",
{
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet directory or .dat file."},
- {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED_NAMED_ARG, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
+ {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -323,12 +323,12 @@ static RPCHelpMan createwallet()
{"wallet_name", RPCArg::Type::STR, RPCArg::Optional::NO, "The name for the new wallet. If this is a path, the wallet will be created at the path location."},
{"disable_private_keys", RPCArg::Type::BOOL, RPCArg::Default{false}, "Disable the possibility of private keys (only watchonlys are possible in this mode)."},
{"blank", RPCArg::Type::BOOL, RPCArg::Default{false}, "Create a blank wallet. A blank wallet has no keys or HD seed. One can be set using sethdseed."},
- {"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Encrypt the wallet with this passphrase."},
+ {"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Encrypt the wallet with this passphrase."},
{"avoid_reuse", RPCArg::Type::BOOL, RPCArg::Default{false}, "Keep track of coin reuse, and treat dirty and clean coins differently with privacy considerations in mind."},
{"descriptors", RPCArg::Type::BOOL, RPCArg::Default{true}, "Create a native descriptor wallet. The wallet will use descriptors internally to handle address creation."
" Setting to \"false\" will create a legacy wallet; however, the legacy wallet type is being deprecated and"
" support for creating and opening legacy wallets will be removed in the future."},
- {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED_NAMED_ARG, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
+ {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
{"external_signer", RPCArg::Type::BOOL, RPCArg::Default{false}, "Use an external signer such as a hardware wallet. Requires -signer to be configured. Wallet creation will fail if keys cannot be fetched. Requires disable_private_keys and descriptors set to true."},
},
RPCResult{
@@ -359,7 +359,7 @@ static RPCHelpMan createwallet()
passphrase.reserve(100);
std::vector<bilingual_str> warnings;
if (!request.params[3].isNull()) {
- passphrase = request.params[3].get_str().c_str();
+ passphrase = std::string_view{request.params[3].get_str()};
if (passphrase.empty()) {
// Empty string means unencrypted
warnings.emplace_back(Untranslated("Empty string given as passphrase, wallet will not be encrypted."));
@@ -419,7 +419,7 @@ static RPCHelpMan unloadwallet()
"Specifying the wallet name on a wallet endpoint is invalid.",
{
{"wallet_name", RPCArg::Type::STR, RPCArg::DefaultHint{"the wallet name from the RPC endpoint"}, "The name of the wallet to unload. If provided both here and in the RPC endpoint, the two must be identical."},
- {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED_NAMED_ARG, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
+ {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
RPCResult{RPCResult::Type::OBJ, "", "", {
{RPCResult::Type::STR, "warning", "Warning message if wallet was not unloaded cleanly."},
@@ -608,12 +608,12 @@ RPCHelpMan simulaterawtransaction()
return RPCHelpMan{"simulaterawtransaction",
"\nCalculate the balance change resulting in the signing and broadcasting of the given transaction(s).\n",
{
- {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "An array of hex strings of raw transactions.\n",
+ {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED, "An array of hex strings of raw transactions.\n",
{
{"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
},
},
- {"options", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED_NAMED_ARG, "Options",
+ {"options", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "Options",
{
{"include_watchonly", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Whether to include watch-only addresses (see RPC importaddress)"},
},
@@ -720,9 +720,12 @@ static RPCHelpMan migratewallet()
"A new wallet backup will need to be made.\n"
"\nThe migration process will create a backup of the wallet before migrating. This backup\n"
"file will be named <wallet name>-<timestamp>.legacy.bak and can be found in the directory\n"
- "for this wallet. In the event of an incorrect migration, the backup can be restored using restorewallet." +
- HELP_REQUIRING_PASSPHRASE,
- {},
+ "for this wallet. In the event of an incorrect migration, the backup can be restored using restorewallet."
+ "\nEncrypted wallets must have the passphrase provided as an argument to this call.",
+ {
+ {"wallet_name", RPCArg::Type::STR, RPCArg::DefaultHint{"the wallet name from the RPC endpoint"}, "The name of the wallet to migrate. If provided both here and in the RPC endpoint, the two must be identical."},
+ {"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "The wallet passphrase"},
+ },
RPCResult{
RPCResult::Type::OBJ, "", "",
{
@@ -738,16 +741,26 @@ static RPCHelpMan migratewallet()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- std::shared_ptr<CWallet> wallet = GetWalletForJSONRPCRequest(request);
- if (!wallet) return NullUniValue;
+ std::string wallet_name;
+ if (GetWalletNameFromJSONRPCRequest(request, wallet_name)) {
+ if (!(request.params[0].isNull() || request.params[0].get_str() == wallet_name)) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "RPC endpoint wallet and wallet_name parameter specify different wallets");
+ }
+ } else {
+ if (request.params[0].isNull()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Either RPC endpoint wallet or wallet_name parameter must be provided");
+ }
+ wallet_name = request.params[0].get_str();
+ }
- if (wallet->IsCrypted()) {
- throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE, "Error: migratewallet on encrypted wallets is currently unsupported.");
+ SecureString wallet_pass;
+ wallet_pass.reserve(100);
+ if (!request.params[1].isNull()) {
+ wallet_pass = std::string_view{request.params[1].get_str()};
}
WalletContext& context = EnsureWalletContext(request.context);
-
- util::Result<MigrationResult> res = MigrateLegacyToDescriptor(std::move(wallet), context);
+ util::Result<MigrationResult> res = MigrateLegacyToDescriptor(wallet_name, wallet_pass, context);
if (!res) {
throw JSONRPCError(RPC_WALLET_ERROR, util::ErrorString(res).original);
}
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index 9ba3c7fd2c..e2b4dbf4c2 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -135,11 +135,11 @@ bool RecoverDatabaseFile(const ArgsManager& args, const fs::path& file_path, bil
}
DbTxn* ptxn = env->TxnBegin();
- CWallet dummyWallet(nullptr, "", gArgs, CreateDummyWalletDatabase());
+ CWallet dummyWallet(nullptr, "", CreateDummyWalletDatabase());
for (KeyValPair& row : salvagedData)
{
/* Filter for only private key type KV pairs to be added to the salvaged wallet */
- CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{row.first};
CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION);
std::string strType, strErr;
bool fReadOK;
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index d8f34dd2b0..c109533d7a 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <hash.h>
#include <key_io.h>
#include <logging.h>
#include <outputtype.h>
@@ -10,7 +11,6 @@
#include <util/bip32.h>
#include <util/strencodings.h>
#include <util/string.h>
-#include <util/system.h>
#include <util/time.h>
#include <util/translation.h>
#include <wallet/scriptpubkeyman.h>
@@ -166,9 +166,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
if (sigversion == IsMineSigVersion::TOP && !keystore.HaveCScript(CScriptID(CScript() << OP_0 << vSolutions[0]))) {
break;
}
- uint160 hash;
- CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(hash.begin());
- CScriptID scriptID = CScriptID(hash);
+ CScriptID scriptID{RIPEMD160(vSolutions[0])};
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0) : IsMineResult::SPENDABLE);
@@ -1295,7 +1293,7 @@ bool LegacyScriptPubKeyMan::TopUpChain(CHDChain& chain, unsigned int kpSize)
if (kpSize > 0) {
nTargetSize = kpSize;
} else {
- nTargetSize = std::max(gArgs.GetIntArg("-keypool", DEFAULT_KEYPOOL_SIZE), int64_t{0});
+ nTargetSize = m_keypool_size;
}
int64_t target = std::max((int64_t) nTargetSize, int64_t{1});
@@ -1665,7 +1663,7 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const
return set_address;
}
-const std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPubKeys() const
+std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPubKeys() const
{
LOCK(cs_KeyStore);
std::unordered_set<CScript, SaltedSipHasher> spks;
@@ -1785,7 +1783,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0);
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
- auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc));
+ auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
desc_spk_man->AddDescriptorKey(key, key.GetPubKey());
desc_spk_man->TopUp();
auto desc_spks = desc_spk_man->GetScriptPubKeys();
@@ -1830,7 +1828,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
WalletDescriptor w_desc(std::move(desc), 0, 0, chain_counter, 0);
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
- auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc));
+ auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
desc_spk_man->AddDescriptorKey(master_key.key, master_key.key.GetPubKey());
desc_spk_man->TopUp();
auto desc_spks = desc_spk_man->GetScriptPubKeys();
@@ -1892,7 +1890,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
} else {
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0);
- auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc));
+ auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
for (const auto& keyid : privkeyids) {
CKey key;
if (!GetKey(keyid, key)) {
@@ -2123,7 +2121,7 @@ bool DescriptorScriptPubKeyMan::TopUp(unsigned int size)
if (size > 0) {
target_size = size;
} else {
- target_size = std::max(gArgs.GetIntArg("-keypool", DEFAULT_KEYPOOL_SIZE), int64_t{1});
+ target_size = m_keypool_size;
}
// Calculate the new range_end
@@ -2651,17 +2649,17 @@ void DescriptorScriptPubKeyMan::WriteDescriptor()
}
}
-const WalletDescriptor DescriptorScriptPubKeyMan::GetWalletDescriptor() const
+WalletDescriptor DescriptorScriptPubKeyMan::GetWalletDescriptor() const
{
return m_wallet_descriptor;
}
-const std::unordered_set<CScript, SaltedSipHasher> DescriptorScriptPubKeyMan::GetScriptPubKeys() const
+std::unordered_set<CScript, SaltedSipHasher> DescriptorScriptPubKeyMan::GetScriptPubKeys() const
{
return GetScriptPubKeys(0);
}
-const std::unordered_set<CScript, SaltedSipHasher> DescriptorScriptPubKeyMan::GetScriptPubKeys(int32_t minimum_index) const
+std::unordered_set<CScript, SaltedSipHasher> DescriptorScriptPubKeyMan::GetScriptPubKeys(int32_t minimum_index) const
{
LOCK(cs_desc_man);
std::unordered_set<CScript, SaltedSipHasher> script_pub_keys;
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index d74388b3e8..4d14325241 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -36,7 +36,7 @@ class WalletStorage
{
public:
virtual ~WalletStorage() = default;
- virtual const std::string GetDisplayName() const = 0;
+ virtual std::string GetDisplayName() const = 0;
virtual WalletDatabase& GetDatabase() const = 0;
virtual bool IsWalletFlagSet(uint64_t) const = 0;
virtual void UnsetBlankWalletFlag(WalletBatch&) = 0;
@@ -243,7 +243,7 @@ public:
virtual uint256 GetID() const { return uint256(); }
/** Returns a set of all the scriptPubKeys that this ScriptPubKeyMan watches */
- virtual const std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const { return {}; };
+ virtual std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const { return {}; };
/** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
template<typename... Params>
@@ -286,6 +286,9 @@ private:
int64_t nTimeFirstKey GUARDED_BY(cs_KeyStore) = 0;
+ //! Number of pre-generated keys/scripts (part of the look-ahead process, used to detect payments)
+ int64_t m_keypool_size GUARDED_BY(cs_KeyStore){DEFAULT_KEYPOOL_SIZE};
+
bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey);
bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
@@ -363,7 +366,7 @@ private:
bool TopUpChain(CHDChain& chain, unsigned int size);
public:
- using ScriptPubKeyMan::ScriptPubKeyMan;
+ LegacyScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size) : ScriptPubKeyMan(storage), m_keypool_size(keypool_size) {}
util::Result<CTxDestination> GetNewDestination(const OutputType type) override;
isminetype IsMine(const CScript& script) const override;
@@ -512,7 +515,7 @@ public:
const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; }
std::set<CKeyID> GetKeys() const override;
- const std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
+ std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
/** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan.
* Does not modify this ScriptPubKeyMan. */
@@ -555,6 +558,9 @@ private:
//! keeps track of whether Unlock has run a thorough check before
bool m_decryption_thoroughly_checked = false;
+ //! Number of pre-generated keys/scripts (part of the look-ahead process, used to detect payments)
+ int64_t m_keypool_size GUARDED_BY(cs_desc_man){DEFAULT_KEYPOOL_SIZE};
+
bool AddDescriptorKeyWithDB(WalletBatch& batch, const CKey& key, const CPubKey &pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man);
KeyMap GetKeys() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man);
@@ -572,12 +578,14 @@ protected:
WalletDescriptor m_wallet_descriptor GUARDED_BY(cs_desc_man);
public:
- DescriptorScriptPubKeyMan(WalletStorage& storage, WalletDescriptor& descriptor)
+ DescriptorScriptPubKeyMan(WalletStorage& storage, WalletDescriptor& descriptor, int64_t keypool_size)
: ScriptPubKeyMan(storage),
+ m_keypool_size(keypool_size),
m_wallet_descriptor(descriptor)
{}
- DescriptorScriptPubKeyMan(WalletStorage& storage)
- : ScriptPubKeyMan(storage)
+ DescriptorScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size)
+ : ScriptPubKeyMan(storage),
+ m_keypool_size(keypool_size)
{}
mutable RecursiveMutex cs_desc_man;
@@ -641,9 +649,9 @@ public:
void AddDescriptorKey(const CKey& key, const CPubKey &pubkey);
void WriteDescriptor();
- const WalletDescriptor GetWalletDescriptor() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man);
- const std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
- const std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys(int32_t minimum_index) const;
+ WalletDescriptor GetWalletDescriptor() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man);
+ std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
+ std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys(int32_t minimum_index) const;
int32_t GetEndRange() const;
bool GetDescriptorString(std::string& out, const bool priv) const;
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index e66ff8c97c..1a79b59d12 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -306,9 +306,7 @@ CoinsResult AvailableCoins(const CWallet& wallet,
std::unique_ptr<SigningProvider> provider = wallet.GetSolvingProvider(output.scriptPubKey);
int input_bytes = CalculateMaximumSignedInputSize(output, COutPoint(), provider.get(), coinControl);
- // Because CalculateMaximumSignedInputSize just uses ProduceSignature and makes a dummy signature,
- // it is safe to assume that this input is solvable if input_bytes is greater -1.
- bool solvable = input_bytes > -1;
+ bool solvable = provider ? InferDescriptor(output.scriptPubKey, *provider)->IsSolvable() : false;
bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable));
// Filter by spendable outputs only
@@ -1092,6 +1090,13 @@ util::Result<CreatedTransactionResult> CreateTransaction(
TRACE1(coin_selection, attempting_aps_create_tx, wallet.GetName().c_str());
CCoinControl tmp_cc = coin_control;
tmp_cc.m_avoid_partial_spends = true;
+
+ // Re-use the change destination from the first creation attempt to avoid skipping BIP44 indexes
+ const int ungrouped_change_pos = txr_ungrouped.change_pos;
+ if (ungrouped_change_pos != -1) {
+ ExtractDestination(txr_ungrouped.tx->vout[ungrouped_change_pos].scriptPubKey, tmp_cc.destChange);
+ }
+
auto txr_grouped = CreateTransactionInternal(wallet, vecSend, change_pos, tmp_cc, sign);
// if fee of this alternative one is within the range of the max fee, we use this one
const bool use_aps{txr_grouped.has_value() ? (txr_grouped->fee <= txr_ungrouped.fee + wallet.m_max_aps_fee) : false};
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index f2b9909851..4af49db609 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -125,7 +125,6 @@ void SQLiteBatch::SetupSQLStatements()
{&m_insert_stmt, "INSERT INTO main VALUES(?, ?)"},
{&m_overwrite_stmt, "INSERT or REPLACE into main values(?, ?)"},
{&m_delete_stmt, "DELETE FROM main WHERE key = ?"},
- {&m_cursor_stmt, "SELECT key, value FROM main"},
};
for (const auto& [stmt_prepared, stmt_text] : statements) {
@@ -374,7 +373,6 @@ void SQLiteBatch::Close()
{&m_insert_stmt, "insert"},
{&m_overwrite_stmt, "overwrite"},
{&m_delete_stmt, "delete"},
- {&m_cursor_stmt, "cursor"},
};
for (const auto& [stmt_prepared, stmt_description] : statements) {
@@ -387,7 +385,7 @@ void SQLiteBatch::Close()
}
}
-bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
+bool SQLiteBatch::ReadKey(DataStream&& key, DataStream& value)
{
if (!m_database.m_db) return false;
assert(m_read_stmt);
@@ -414,7 +412,7 @@ bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
return true;
}
-bool SQLiteBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
+bool SQLiteBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
{
if (!m_database.m_db) return false;
assert(m_insert_stmt && m_overwrite_stmt);
@@ -441,7 +439,7 @@ bool SQLiteBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrit
return res == SQLITE_DONE;
}
-bool SQLiteBatch::EraseKey(CDataStream&& key)
+bool SQLiteBatch::EraseKey(DataStream&& key)
{
if (!m_database.m_db) return false;
assert(m_delete_stmt);
@@ -459,7 +457,7 @@ bool SQLiteBatch::EraseKey(CDataStream&& key)
return res == SQLITE_DONE;
}
-bool SQLiteBatch::HasKey(CDataStream&& key)
+bool SQLiteBatch::HasKey(DataStream&& key)
{
if (!m_database.m_db) return false;
assert(m_read_stmt);
@@ -472,28 +470,15 @@ bool SQLiteBatch::HasKey(CDataStream&& key)
return res == SQLITE_ROW;
}
-bool SQLiteBatch::StartCursor()
+DatabaseCursor::Status SQLiteCursor::Next(DataStream& key, DataStream& value)
{
- assert(!m_cursor_init);
- if (!m_database.m_db) return false;
- m_cursor_init = true;
- return true;
-}
-
-bool SQLiteBatch::ReadAtCursor(CDataStream& key, CDataStream& value, bool& complete)
-{
- complete = false;
-
- if (!m_cursor_init) return false;
-
int res = sqlite3_step(m_cursor_stmt);
if (res == SQLITE_DONE) {
- complete = true;
- return true;
+ return Status::DONE;
}
if (res != SQLITE_ROW) {
- LogPrintf("SQLiteBatch::ReadAtCursor: Unable to execute cursor step: %s\n", sqlite3_errstr(res));
- return false;
+ LogPrintf("%s: Unable to execute cursor step: %s\n", __func__, sqlite3_errstr(res));
+ return Status::FAIL;
}
// Leftmost column in result is index 0
@@ -503,13 +488,32 @@ bool SQLiteBatch::ReadAtCursor(CDataStream& key, CDataStream& value, bool& compl
const std::byte* value_data{AsBytePtr(sqlite3_column_blob(m_cursor_stmt, 1))};
size_t value_data_size(sqlite3_column_bytes(m_cursor_stmt, 1));
value.write({value_data, value_data_size});
- return true;
+ return Status::MORE;
}
-void SQLiteBatch::CloseCursor()
+SQLiteCursor::~SQLiteCursor()
{
sqlite3_reset(m_cursor_stmt);
- m_cursor_init = false;
+ int res = sqlite3_finalize(m_cursor_stmt);
+ if (res != SQLITE_OK) {
+ LogPrintf("%s: cursor closed but could not finalize cursor statement: %s\n",
+ __func__, sqlite3_errstr(res));
+ }
+}
+
+std::unique_ptr<DatabaseCursor> SQLiteBatch::GetNewCursor()
+{
+ if (!m_database.m_db) return nullptr;
+ auto cursor = std::make_unique<SQLiteCursor>();
+
+ const char* stmt_text = "SELECT key, value FROM main";
+ int res = sqlite3_prepare_v2(m_database.m_db, stmt_text, -1, &cursor->m_cursor_stmt, nullptr);
+ if (res != SQLITE_OK) {
+ throw std::runtime_error(strprintf(
+ "%s: Failed to setup cursor SQL statement: %s\n", __func__, sqlite3_errstr(res)));
+ }
+
+ return cursor;
}
bool SQLiteBatch::TxnBegin()
diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h
index 7680bdd07b..c6745d7a7e 100644
--- a/src/wallet/sqlite.h
+++ b/src/wallet/sqlite.h
@@ -14,26 +14,34 @@ struct bilingual_str;
namespace wallet {
class SQLiteDatabase;
+class SQLiteCursor : public DatabaseCursor
+{
+public:
+ sqlite3_stmt* m_cursor_stmt{nullptr};
+
+ explicit SQLiteCursor() {}
+ ~SQLiteCursor() override;
+
+ Status Next(DataStream& key, DataStream& value) override;
+};
+
/** RAII class that provides access to a WalletDatabase */
class SQLiteBatch : public DatabaseBatch
{
private:
SQLiteDatabase& m_database;
- bool m_cursor_init = false;
-
sqlite3_stmt* m_read_stmt{nullptr};
sqlite3_stmt* m_insert_stmt{nullptr};
sqlite3_stmt* m_overwrite_stmt{nullptr};
sqlite3_stmt* m_delete_stmt{nullptr};
- sqlite3_stmt* m_cursor_stmt{nullptr};
void SetupSQLStatements();
- bool ReadKey(CDataStream&& key, CDataStream& value) override;
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite = true) override;
- bool EraseKey(CDataStream&& key) override;
- bool HasKey(CDataStream&& key) override;
+ bool ReadKey(DataStream&& key, DataStream& value) override;
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override;
+ bool EraseKey(DataStream&& key) override;
+ bool HasKey(DataStream&& key) override;
public:
explicit SQLiteBatch(SQLiteDatabase& database);
@@ -44,9 +52,7 @@ public:
void Close() override;
- bool StartCursor() override;
- bool ReadAtCursor(CDataStream& key, CDataStream& value, bool& complete) override;
- void CloseCursor() override;
+ std::unique_ptr<DatabaseCursor> GetNewCursor() override;
bool TxnBegin() override;
bool TxnCommit() override;
bool TxnAbort() override;
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index 2e12b5b1d4..1c731b95e5 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -232,17 +232,6 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
BOOST_CHECK_EQUAL(result5->GetSelectedValue(), 10 * CENT);
expected_result.Clear();
- // Negative effective value
- // Select 10 Cent but have 1 Cent not be possible because too small
- add_coin(5 * CENT, 5, expected_result);
- add_coin(3 * CENT, 3, expected_result);
- add_coin(2 * CENT, 2, expected_result);
- const auto result6 = SelectCoinsBnB(GroupCoins(utxo_pool), 10 * CENT, 5000);
- BOOST_CHECK(result6);
- BOOST_CHECK_EQUAL(result6->GetSelectedValue(), 10 * CENT);
- // FIXME: this test is redundant with the above, because 1 Cent is selected, not "too small"
- // BOOST_CHECK(EquivalentResult(expected_result, *result));
-
// Select 0.25 Cent, not possible
BOOST_CHECK(!SelectCoinsBnB(GroupCoins(utxo_pool), 0.25 * CENT, 0.5 * CENT));
expected_result.Clear();
@@ -305,7 +294,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
coin_selection_params_bnb.m_subtract_fee_outputs = true;
{
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -327,7 +316,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
}
{
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -350,7 +339,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
BOOST_CHECK(result10);
}
{
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -415,7 +404,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
FastRandomContext rand{};
const auto temp1{[&rand](std::vector<OutputGroup>& g, const CAmount& v, CAmount c) { return KnapsackSolver(g, v, c, rand); }};
const auto KnapsackSolver{temp1};
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -725,7 +714,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
{
FastRandomContext rand{};
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -747,7 +736,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
// Tests that with the ideal conditions, the coin selector will always be able to find a solution that can pay the target value
BOOST_AUTO_TEST_CASE(SelectCoins_test)
{
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -932,9 +921,9 @@ BOOST_AUTO_TEST_CASE(effective_value_test)
BOOST_CHECK_EQUAL(output5.GetEffectiveValue(), nValue); // The effective value should be equal to the absolute value if input_bytes is -1
}
-static util::Result<SelectionResult> select_coins(const CAmount& target, const CoinSelectionParams& cs_params, const CCoinControl& cc, std::function<CoinsResult(CWallet&)> coin_setup, interfaces::Chain* chain, const ArgsManager& args)
+static util::Result<SelectionResult> select_coins(const CAmount& target, const CoinSelectionParams& cs_params, const CCoinControl& cc, std::function<CoinsResult(CWallet&)> coin_setup, interfaces::Chain* chain)
{
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(chain, "", args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(chain, "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -995,7 +984,7 @@ BOOST_AUTO_TEST_CASE(check_max_weight)
add_coin(available_coins, wallet, CAmount(50 * COIN), CFeeRate(0), 144, false, 0, true);
return available_coins;
},
- chain, m_args);
+ chain);
BOOST_CHECK(result);
BOOST_CHECK(has_coin(result->GetInputSet(), CAmount(50 * COIN)));
@@ -1020,7 +1009,7 @@ BOOST_AUTO_TEST_CASE(check_max_weight)
}
return available_coins;
},
- chain, m_args);
+ chain);
BOOST_CHECK(has_coin(result->GetInputSet(), CAmount(0.0625 * COIN)));
BOOST_CHECK(has_coin(result->GetInputSet(), CAmount(0.025 * COIN)));
@@ -1041,7 +1030,7 @@ BOOST_AUTO_TEST_CASE(check_max_weight)
}
return available_coins;
},
- chain, m_args);
+ chain);
// No results
// 1515 inputs * 68 bytes = 103,020 bytes
@@ -1056,7 +1045,7 @@ BOOST_AUTO_TEST_CASE(SelectCoins_effective_value_test)
// This test creates a coin whose value is higher than the target but whose effective value is lower than the target.
// The coin is selected using coin control, with m_allow_other_inputs = false. SelectCoins should fail due to insufficient funds.
- std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
wallet->LoadWallet();
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -1064,7 +1053,7 @@ BOOST_AUTO_TEST_CASE(SelectCoins_effective_value_test)
CoinsResult available_coins;
{
- std::unique_ptr<CWallet> dummyWallet = std::make_unique<CWallet>(m_node.chain.get(), "dummy", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> dummyWallet = std::make_unique<CWallet>(m_node.chain.get(), "dummy", CreateMockWalletDatabase());
dummyWallet->LoadWallet();
LOCK(dummyWallet->cs_wallet);
dummyWallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
@@ -1105,7 +1094,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_coinsresult_test, BasicTestingSetup)
// Test case to verify CoinsResult object sanity.
CoinsResult available_coins;
{
- std::unique_ptr<CWallet> dummyWallet = std::make_unique<CWallet>(m_node.chain.get(), "dummy", m_args, CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> dummyWallet = std::make_unique<CWallet>(m_node.chain.get(), "dummy", CreateMockWalletDatabase());
BOOST_CHECK_EQUAL(dummyWallet->LoadWallet(), DBErrors::LOAD_OK);
LOCK(dummyWallet->cs_wallet);
dummyWallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
diff --git a/src/wallet/test/ismine_tests.cpp b/src/wallet/test/ismine_tests.cpp
index 151b09d2a6..90f369b22a 100644
--- a/src/wallet/test/ismine_tests.cpp
+++ b/src/wallet/test/ismine_tests.cpp
@@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK compressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForRawPubKey(pubkeys[0]);
@@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK compressed - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "pk(" + EncodeSecret(keys[0]) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK uncompressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForRawPubKey(uncompressedPubkey);
@@ -105,7 +105,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK uncompressed - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "pk(" + EncodeSecret(uncompressedKey) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -117,7 +117,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH compressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForDestination(PKHash(pubkeys[0]));
@@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH compressed - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "pkh(" + EncodeSecret(keys[0]) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH uncompressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForDestination(PKHash(uncompressedPubkey));
@@ -167,7 +167,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH uncompressed - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "pkh(" + EncodeSecret(uncompressedKey) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -206,7 +206,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "sh(pkh(" + EncodeSecret(keys[0]) + "))";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -219,7 +219,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2SH (invalid) - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2SH (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "sh(sh(" + EncodeSecret(keys[0]) + "))";
auto spk_manager = CreateDescriptor(keystore, desc_str, false);
@@ -247,7 +247,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2WSH (invalid) - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -266,7 +266,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2WSH (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wsh(sh(" + EncodeSecret(keys[0]) + "))";
auto spk_manager = CreateDescriptor(keystore, desc_str, false);
@@ -275,7 +275,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH inside P2WSH (invalid) - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -292,7 +292,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH inside P2WSH (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wsh(wpkh(" + EncodeSecret(keys[0]) + "))";
auto spk_manager = CreateDescriptor(keystore, desc_str, false);
@@ -301,7 +301,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2WSH inside P2WSH (invalid) - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -320,7 +320,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2WSH inside P2WSH (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wsh(wsh(" + EncodeSecret(keys[0]) + "))";
auto spk_manager = CreateDescriptor(keystore, desc_str, false);
@@ -329,7 +329,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH compressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
@@ -345,7 +345,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH compressed - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wpkh(" + EncodeSecret(keys[0]) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH uncompressed - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
@@ -378,7 +378,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH uncompressed (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wpkh(" + EncodeSecret(uncompressedKey) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, false);
@@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// scriptPubKey multisig - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -422,7 +422,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// scriptPubKey multisig - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "multi(2, " + EncodeSecret(uncompressedKey) + ", " + EncodeSecret(keys[1]) + ")";
auto spk_manager = CreateDescriptor(keystore, desc_str, true);
@@ -434,7 +434,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH multisig - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
@@ -457,7 +457,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH multisig - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "sh(multi(2, " + EncodeSecret(uncompressedKey) + ", " + EncodeSecret(keys[1]) + "))";
@@ -471,7 +471,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with compressed keys - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
@@ -500,7 +500,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with compressed keys - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wsh(multi(2, " + EncodeSecret(keys[0]) + ", " + EncodeSecret(keys[1]) + "))";
@@ -514,7 +514,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with uncompressed key - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
@@ -543,7 +543,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with uncompressed key (invalid) - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "wsh(multi(2, " + EncodeSecret(uncompressedKey) + ", " + EncodeSecret(keys[1]) + "))";
@@ -553,7 +553,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig wrapped in P2SH - Legacy
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
@@ -583,7 +583,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig wrapped in P2SH - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "sh(wsh(multi(2, " + EncodeSecret(keys[0]) + ", " + EncodeSecret(keys[1]) + ")))";
@@ -598,7 +598,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// Combo - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "combo(" + EncodeSecret(keys[0]) + ")";
@@ -642,7 +642,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// Taproot - Descriptor
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
std::string desc_str = "tr(" + EncodeSecret(keys[0]) + ")";
@@ -660,7 +660,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// OP_RETURN
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
@@ -675,7 +675,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// witness unspendable
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
@@ -690,7 +690,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// witness unknown
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
@@ -705,7 +705,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// Nonstandard
{
- CWallet keystore(chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet keystore(chain.get(), "", CreateDummyWalletDatabase());
keystore.SetupLegacyScriptPubKeyMan();
LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
diff --git a/src/wallet/test/scriptpubkeyman_tests.cpp b/src/wallet/test/scriptpubkeyman_tests.cpp
index a524b85ccb..90042f5252 100644
--- a/src/wallet/test/scriptpubkeyman_tests.cpp
+++ b/src/wallet/test/scriptpubkeyman_tests.cpp
@@ -18,7 +18,7 @@ BOOST_FIXTURE_TEST_SUITE(scriptpubkeyman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(CanProvide)
{
// Set up wallet and keyman variables.
- CWallet wallet(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
LegacyScriptPubKeyMan& keyman = *wallet.GetOrCreateLegacyScriptPubKeyMan();
// Make a 1 of 2 multisig script
diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp
index 364cc5c20b..6e87d1cb49 100644
--- a/src/wallet/test/spend_tests.cpp
+++ b/src/wallet/test/spend_tests.cpp
@@ -18,7 +18,7 @@ BOOST_FIXTURE_TEST_SUITE(spend_tests, WalletTestingSetup)
BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup)
{
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
- auto wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), m_args, coinbaseKey);
+ auto wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), coinbaseKey);
// Check that a subtract-from-recipient transaction slightly less than the
// coinbase input amount does not create a change output (because it would
@@ -118,7 +118,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_duplicated_preset_inputs_test, TestChain100Setup)
// Add 4 spendable UTXO, 50 BTC each, to the wallet (total balance 200 BTC)
for (int i = 0; i < 4; i++) CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
- auto wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), m_args, coinbaseKey);
+ auto wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), coinbaseKey);
LOCK(wallet->cs_wallet);
auto available_coins = AvailableCoins(*wallet);
diff --git a/src/wallet/test/util.cpp b/src/wallet/test/util.cpp
index 88597bd320..b7bf312edf 100644
--- a/src/wallet/test/util.cpp
+++ b/src/wallet/test/util.cpp
@@ -14,9 +14,9 @@
#include <memory>
namespace wallet {
-std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, ArgsManager& args, const CKey& key)
+std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key)
{
- auto wallet = std::make_unique<CWallet>(&chain, "", args, CreateMockWalletDatabase());
+ auto wallet = std::make_unique<CWallet>(&chain, "", CreateMockWalletDatabase());
{
LOCK2(wallet->cs_wallet, ::cs_main);
wallet->SetLastBlockProcessed(cchain.Height(), cchain.Tip()->GetBlockHash());
@@ -50,18 +50,18 @@ std::unique_ptr<WalletDatabase> DuplicateMockDatabase(WalletDatabase& database,
// Get a cursor to the original database
auto batch = database.MakeBatch();
- batch->StartCursor();
+ std::unique_ptr<wallet::DatabaseCursor> cursor = batch->GetNewCursor();
// Get a batch for the new database
auto new_batch = new_database->MakeBatch();
// Read all records from the original database and write them to the new one
while (true) {
- CDataStream key(SER_DISK, CLIENT_VERSION);
- CDataStream value(SER_DISK, CLIENT_VERSION);
- bool complete;
- batch->ReadAtCursor(key, value, complete);
- if (complete) break;
+ DataStream key{};
+ DataStream value{};
+ DatabaseCursor::Status status = cursor->Next(key, value);
+ assert(status != DatabaseCursor::Status::FAIL);
+ if (status == DatabaseCursor::Status::DONE) break;
new_batch->Write(key, value);
}
diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h
index 635a5152ec..d726517e21 100644
--- a/src/wallet/test/util.h
+++ b/src/wallet/test/util.h
@@ -21,7 +21,7 @@ class CWallet;
struct DatabaseOptions;
class WalletDatabase;
-std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, ArgsManager& args, const CKey& key);
+std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key);
// Creates a copy of the provided database
std::unique_ptr<WalletDatabase> DuplicateMockDatabase(WalletDatabase& database, DatabaseOptions& options);
diff --git a/src/wallet/test/wallet_crypto_tests.cpp b/src/wallet/test/wallet_crypto_tests.cpp
index 6b8542f378..d5e75bb892 100644
--- a/src/wallet/test/wallet_crypto_tests.cpp
+++ b/src/wallet/test/wallet_crypto_tests.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <util/strencodings.h>
#include <wallet/crypter.h>
diff --git a/src/wallet/test/wallet_test_fixture.cpp b/src/wallet/test/wallet_test_fixture.cpp
index c47e56c093..2dd8f9ad33 100644
--- a/src/wallet/test/wallet_test_fixture.cpp
+++ b/src/wallet/test/wallet_test_fixture.cpp
@@ -10,7 +10,7 @@ namespace wallet {
WalletTestingSetup::WalletTestingSetup(const std::string& chainName)
: TestingSetup(chainName),
m_wallet_loader{interfaces::MakeWalletLoader(*m_node.chain, *Assert(m_node.args))},
- m_wallet(m_node.chain.get(), "", m_args, CreateMockWalletDatabase())
+ m_wallet(m_node.chain.get(), "", CreateMockWalletDatabase())
{
m_wallet.LoadWallet();
m_chain_notifications_handler = m_node.chain->handleNotifications({ &m_wallet, [](CWallet*) {} });
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index b6e50e961a..2e95a14807 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -43,7 +43,7 @@ static_assert(WALLET_INCREMENTAL_RELAY_FEE >= DEFAULT_INCREMENTAL_RELAY_FEE, "wa
BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup)
-static const std::shared_ptr<CWallet> TestLoadWallet(WalletContext& context)
+static std::shared_ptr<CWallet> TestLoadWallet(WalletContext& context)
{
DatabaseOptions options;
options.create_flags = WALLET_FLAG_DESCRIPTORS;
@@ -98,7 +98,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions fails to read an unknown start block.
{
- CWallet wallet(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
LOCK(Assert(m_node.chainman)->GetMutex());
@@ -119,7 +119,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions picks up transactions in both the old
// and new block files.
{
- CWallet wallet(m_node.chain.get(), "", m_args, CreateMockWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateMockWalletDatabase());
{
LOCK(wallet.cs_wallet);
LOCK(Assert(m_node.chainman)->GetMutex());
@@ -164,7 +164,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions only picks transactions in the new block
// file.
{
- CWallet wallet(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
LOCK(Assert(m_node.chainman)->GetMutex());
@@ -192,7 +192,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions scans no blocks.
{
- CWallet wallet(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
LOCK(Assert(m_node.chainman)->GetMutex());
@@ -232,7 +232,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
// before the missing block, and success for a key whose creation time is
// after.
{
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
WITH_LOCK(wallet->cs_wallet, wallet->SetLastBlockProcessed(newTip->nHeight, newTip->GetBlockHash()));
WalletContext context;
@@ -298,7 +298,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
{
WalletContext context;
context.args = &m_args;
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore);
@@ -321,7 +321,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// Call importwallet RPC and verify all blocks with timestamps >= BLOCK_TIME
// were scanned, and no prior blocks were scanned.
{
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
LOCK(wallet->cs_wallet);
wallet->SetupLegacyScriptPubKeyMan();
@@ -355,7 +355,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// debit functions.
BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
{
- CWallet wallet(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
LOCK(wallet.cs_wallet);
LOCK(Assert(m_node.chainman)->GetMutex());
@@ -527,7 +527,7 @@ public:
ListCoinsTestingSetup()
{
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
- wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), m_args, coinbaseKey);
+ wallet = CreateSyncedWallet(*m_node.chain, WITH_LOCK(Assert(m_node.chainman)->GetMutex(), return m_node.chainman->ActiveChain()), coinbaseKey);
}
~ListCoinsTestingSetup()
@@ -665,7 +665,7 @@ BOOST_FIXTURE_TEST_CASE(BasicOutputTypesTest, ListCoinsTest)
BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
{
{
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
wallet->SetMinVersion(FEATURE_LATEST);
wallet->SetWalletFlag(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
@@ -673,7 +673,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
BOOST_CHECK(!wallet->GetNewDestination(OutputType::BECH32, ""));
}
{
- const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", m_args, CreateDummyWalletDatabase());
+ const std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
LOCK(wallet->cs_wallet);
wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
wallet->SetMinVersion(FEATURE_LATEST);
@@ -907,24 +907,28 @@ BOOST_FIXTURE_TEST_CASE(ZapSelectTx, TestChain100Setup)
TestUnloadWallet(std::move(wallet));
}
+class FailCursor : public DatabaseCursor
+{
+public:
+ Status Next(DataStream& key, DataStream& value) override { return Status::FAIL; }
+};
+
/** RAII class that provides access to a FailDatabase. Which fails if needed. */
class FailBatch : public DatabaseBatch
{
private:
bool m_pass{true};
- bool ReadKey(CDataStream&& key, CDataStream& value) override { return m_pass; }
- bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return m_pass; }
- bool EraseKey(CDataStream&& key) override { return m_pass; }
- bool HasKey(CDataStream&& key) override { return m_pass; }
+ bool ReadKey(DataStream&& key, DataStream& value) override { return m_pass; }
+ bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite = true) override { return m_pass; }
+ bool EraseKey(DataStream&& key) override { return m_pass; }
+ bool HasKey(DataStream&& key) override { return m_pass; }
public:
explicit FailBatch(bool pass) : m_pass(pass) {}
void Flush() override {}
void Close() override {}
- bool StartCursor() override { return true; }
- bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override { return false; }
- void CloseCursor() override {}
+ std::unique_ptr<DatabaseCursor> GetNewCursor() override { return std::make_unique<FailCursor>(); }
bool TxnBegin() override { return false; }
bool TxnCommit() override { return false; }
bool TxnAbort() override { return false; }
@@ -957,7 +961,7 @@ public:
*/
BOOST_FIXTURE_TEST_CASE(wallet_sync_tx_invalid_state_test, TestingSetup)
{
- CWallet wallet(m_node.chain.get(), "", m_args, std::make_unique<FailDatabase>());
+ CWallet wallet(m_node.chain.get(), "", std::make_unique<FailDatabase>());
{
LOCK(wallet.cs_wallet);
wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS);
diff --git a/src/wallet/test/walletload_tests.cpp b/src/wallet/test/walletload_tests.cpp
index 24d21c2f22..9f5a4b14d3 100644
--- a/src/wallet/test/walletload_tests.cpp
+++ b/src/wallet/test/walletload_tests.cpp
@@ -46,7 +46,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_unknown_descriptor, TestingSetup)
{
// Now try to load the wallet and verify the error.
- const std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, std::move(database)));
+ const std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", std::move(database)));
BOOST_CHECK_EQUAL(wallet->LoadWallet(), DBErrors::UNKNOWN_DESCRIPTOR);
}
}
@@ -54,13 +54,15 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_unknown_descriptor, TestingSetup)
bool HasAnyRecordOfType(WalletDatabase& db, const std::string& key)
{
std::unique_ptr<DatabaseBatch> batch = db.MakeBatch(false);
- BOOST_CHECK(batch->StartCursor());
+ BOOST_CHECK(batch);
+ std::unique_ptr<DatabaseCursor> cursor = batch->GetNewCursor();
+ BOOST_CHECK(cursor);
while (true) {
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- bool complete;
- BOOST_CHECK(batch->ReadAtCursor(ssKey, ssValue, complete));
- if (complete) break;
+ DataStream ssKey{};
+ DataStream ssValue{};
+ DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
+ assert(status != DatabaseCursor::Status::FAIL);
+ if (status == DatabaseCursor::Status::DONE) break;
std::string type;
ssKey >> type;
if (type == key) return true;
@@ -82,7 +84,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup)
{ // Context setup.
// Create and encrypt legacy wallet
- std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, CreateMockWalletDatabase()));
+ std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", CreateMockWalletDatabase()));
LOCK(wallet->cs_wallet);
auto legacy_spkm = wallet->GetOrCreateLegacyScriptPubKeyMan();
BOOST_CHECK(legacy_spkm->SetupGeneration(true));
@@ -110,7 +112,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup)
// the records every time that 'CWallet::Unlock' gets called, which is not good.
// Load the wallet and check that is encrypted
- std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, get_db(dbs)));
+ std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", get_db(dbs)));
BOOST_CHECK_EQUAL(wallet->LoadWallet(), DBErrors::LOAD_OK);
BOOST_CHECK(wallet->IsCrypted());
BOOST_CHECK(HasAnyRecordOfType(wallet->GetDatabase(), DBKeys::CRYPTED_KEY));
@@ -136,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup)
}
// Load the wallet and check that is encrypted
- std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, std::move(db)));
+ std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", std::move(db)));
BOOST_CHECK_EQUAL(wallet->LoadWallet(), DBErrors::LOAD_OK);
BOOST_CHECK(wallet->IsCrypted());
BOOST_CHECK(HasAnyRecordOfType(wallet->GetDatabase(), DBKeys::CRYPTED_KEY));
@@ -164,7 +166,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup)
BOOST_CHECK(batch->Write(key, value, /*fOverwrite=*/true));
}
- std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, std::move(db)));
+ std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", std::move(db)));
BOOST_CHECK_EQUAL(wallet->LoadWallet(), DBErrors::CORRUPT);
}
@@ -180,7 +182,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup)
BOOST_CHECK(db->MakeBatch(false)->Write(key, value, /*fOverwrite=*/true));
}
- std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", m_args, std::move(db)));
+ std::shared_ptr<CWallet> wallet(new CWallet(m_node.chain.get(), "", std::move(db)));
BOOST_CHECK_EQUAL(wallet->LoadWallet(), DBErrors::CORRUPT);
}
}
diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h
index 6ad222864a..290ef4eaa9 100644
--- a/src/wallet/transaction.h
+++ b/src/wallet/transaction.h
@@ -293,6 +293,7 @@ public:
bool isAbandoned() const { return state<TxStateInactive>() && state<TxStateInactive>()->abandoned; }
bool isConflicted() const { return state<TxStateConflicted>(); }
+ bool isInactive() const { return state<TxStateInactive>(); }
bool isUnconfirmed() const { return !isAbandoned() && !isConflicted() && !isConfirmed(); }
bool isConfirmed() const { return state<TxStateConfirmed>(); }
const uint256& GetHash() const { return tx->GetHash(); }
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 6158ff033c..daddd6446d 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -26,6 +26,7 @@
#include <script/descriptor.h>
#include <script/script.h>
#include <script/signingprovider.h>
+#include <support/cleanse.h>
#include <txmempool.h>
#include <util/bip32.h>
#include <util/check.h>
@@ -34,6 +35,7 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/string.h>
+#include <util/system.h>
#include <util/translation.h>
#include <wallet/coincontrol.h>
#include <wallet/context.h>
@@ -550,7 +552,7 @@ bool CWallet::ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase,
bool fWasLocked = IsLocked();
{
- LOCK(cs_wallet);
+ LOCK2(m_relock_mutex, cs_wallet);
Lock();
CCrypter crypter;
@@ -785,7 +787,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
return false;
{
- LOCK(cs_wallet);
+ LOCK2(m_relock_mutex, cs_wallet);
mapMasterKeys[++nMasterKeyMaxID] = kMasterKey;
WalletBatch* encrypted_batch = new WalletBatch(GetDatabase());
if (!encrypted_batch->TxnBegin()) {
@@ -1065,6 +1067,33 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const
}
}
+ // Mark inactive coinbase transactions and their descendants as abandoned
+ if (wtx.IsCoinBase() && wtx.isInactive()) {
+ std::vector<CWalletTx*> txs{&wtx};
+
+ TxStateInactive inactive_state = TxStateInactive{/*abandoned=*/true};
+
+ while (!txs.empty()) {
+ CWalletTx* desc_tx = txs.back();
+ txs.pop_back();
+ desc_tx->m_state = inactive_state;
+ // Break caches since we have changed the state
+ desc_tx->MarkDirty();
+ batch.WriteTx(*desc_tx);
+ MarkInputsDirty(desc_tx->tx);
+ for (unsigned int i = 0; i < desc_tx->tx->vout.size(); ++i) {
+ COutPoint outpoint(desc_tx->GetHash(), i);
+ std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(outpoint);
+ for (TxSpends::const_iterator it = range.first; it != range.second; ++it) {
+ const auto wit = mapWallet.find(it->second);
+ if (wit != mapWallet.end()) {
+ txs.push_back(&wit->second);
+ }
+ }
+ }
+ }
+ }
+
//// debug print
WalletLogPrintf("AddToWallet %s %s%s\n", hash.ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : ""));
@@ -1081,7 +1110,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const
#if HAVE_SYSTEM
// notify an external script when a wallet transaction comes in or is updated
- std::string strCmd = m_args.GetArg("-walletnotify", "");
+ std::string strCmd = m_notify_tx_changed_script;
if (!strCmd.empty())
{
@@ -1274,7 +1303,11 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
wtx.MarkDirty();
batch.WriteTx(wtx);
NotifyTransactionChanged(wtx.GetHash(), CT_UPDATED);
- // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too
+ // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too.
+ // States are not permanent, so these transactions can become unabandoned if they are re-added to the
+ // mempool, or confirmed in a block, or conflicted.
+ // Note: If the reorged coinbase is re-added to the main chain, the descendants that have not had their
+ // states change will remain abandoned and will require manual broadcast if the user wants them.
for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) {
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(COutPoint(now, i));
for (TxSpends::const_iterator iter = range.first; iter != range.second; ++iter) {
@@ -2882,7 +2915,11 @@ std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::stri
const auto start{SteadyClock::now()};
// TODO: Can't use std::make_shared because we need a custom deleter but
// should be possible to use std::allocate_shared.
- std::shared_ptr<CWallet> walletInstance(new CWallet(chain, name, args, std::move(database)), ReleaseWallet);
+ std::shared_ptr<CWallet> walletInstance(new CWallet(chain, name, std::move(database)), ReleaseWallet);
+ walletInstance->m_keypool_size = std::max(args.GetIntArg("-keypool", DEFAULT_KEYPOOL_SIZE), int64_t{1});
+ walletInstance->m_notify_tx_changed_script = args.GetArg("-walletnotify", "");
+
+ // Load wallet
bool rescan_required = false;
DBErrors nLoadWalletRet = walletInstance->LoadWallet();
if (nLoadWalletRet != DBErrors::LOAD_OK) {
@@ -2995,7 +3032,7 @@ std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::stri
if (args.IsArgSet("-mintxfee")) {
std::optional<CAmount> min_tx_fee = ParseMoney(args.GetArg("-mintxfee", ""));
- if (!min_tx_fee || min_tx_fee.value() == 0) {
+ if (!min_tx_fee) {
error = AmountErrMsg("mintxfee", args.GetArg("-mintxfee", ""));
return nullptr;
} else if (min_tx_fee.value() > HIGH_TX_FEE_PER_KB) {
@@ -3375,8 +3412,11 @@ bool CWallet::Lock()
return false;
{
- LOCK(cs_wallet);
- vMasterKey.clear();
+ LOCK2(m_relock_mutex, cs_wallet);
+ if (!vMasterKey.empty()) {
+ memory_cleanse(vMasterKey.data(), vMasterKey.size() * sizeof(decltype(vMasterKey)::value_type));
+ vMasterKey.clear();
+ }
}
NotifyStatusChanged(this);
@@ -3503,7 +3543,7 @@ void CWallet::SetupLegacyScriptPubKeyMan()
return;
}
- auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this));
+ auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this, m_keypool_size));
for (const auto& type : LEGACY_OUTPUT_TYPES) {
m_internal_spk_managers[type] = spk_manager.get();
m_external_spk_managers[type] = spk_manager.get();
@@ -3532,10 +3572,10 @@ void CWallet::ConnectScriptPubKeyManNotifiers()
void CWallet::LoadDescriptorScriptPubKeyMan(uint256 id, WalletDescriptor& desc)
{
if (IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER)) {
- auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, desc));
+ auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, desc, m_keypool_size));
m_spk_managers[id] = std::move(spk_manager);
} else {
- auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc));
+ auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc, m_keypool_size));
m_spk_managers[id] = std::move(spk_manager);
}
}
@@ -3546,7 +3586,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const CExtKey& master_key)
for (bool internal : {false, true}) {
for (OutputType t : OUTPUT_TYPES) {
- auto spk_manager = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this));
+ auto spk_manager = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, m_keypool_size));
if (IsCrypted()) {
if (IsLocked()) {
throw std::runtime_error(std::string(__func__) + ": Wallet is locked, cannot setup new descriptors");
@@ -3602,7 +3642,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans()
continue;
}
OutputType t = *desc->GetOutputType();
- auto spk_manager = std::unique_ptr<ExternalSignerScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this));
+ auto spk_manager = std::unique_ptr<ExternalSignerScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, m_keypool_size));
spk_manager->SetupDescriptor(std::move(desc));
uint256 id = spk_manager->GetID();
m_spk_managers[id] = std::move(spk_manager);
@@ -3718,7 +3758,7 @@ ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const Flat
WalletLogPrintf("Update existing descriptor: %s\n", desc.descriptor->ToString());
spk_man->UpdateWalletDescriptor(desc);
} else {
- auto new_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc));
+ auto new_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc, m_keypool_size));
spk_man = new_spk_man.get();
// Save the descriptor to memory
@@ -3775,26 +3815,27 @@ bool CWallet::MigrateToSQLite(bilingual_str& error)
// Get all of the records for DB type migration
std::unique_ptr<DatabaseBatch> batch = m_database->MakeBatch();
+ std::unique_ptr<DatabaseCursor> cursor = batch->GetNewCursor();
std::vector<std::pair<SerializeData, SerializeData>> records;
- if (!batch->StartCursor()) {
+ if (!cursor) {
error = _("Error: Unable to begin reading all records in the database");
return false;
}
- bool complete = false;
+ DatabaseCursor::Status status = DatabaseCursor::Status::FAIL;
while (true) {
- CDataStream ss_key(SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(SER_DISK, CLIENT_VERSION);
- bool ret = batch->ReadAtCursor(ss_key, ss_value, complete);
- if (!ret) {
+ DataStream ss_key{};
+ DataStream ss_value{};
+ status = cursor->Next(ss_key, ss_value);
+ if (status != DatabaseCursor::Status::MORE) {
break;
}
SerializeData key(ss_key.begin(), ss_key.end());
SerializeData value(ss_value.begin(), ss_value.end());
records.emplace_back(key, value);
}
- batch->CloseCursor();
+ cursor.reset();
batch.reset();
- if (!complete) {
+ if (status != DatabaseCursor::Status::DONE) {
error = _("Error: Unable to read all records in the database");
return false;
}
@@ -3820,8 +3861,8 @@ bool CWallet::MigrateToSQLite(bilingual_str& error)
bool began = batch->TxnBegin();
assert(began); // This is a critical error, the new db could not be written to. The original db exists as a backup, but we should not continue execution.
for (const auto& [key, value] : records) {
- CDataStream ss_key(key, SER_DISK, CLIENT_VERSION);
- CDataStream ss_value(value, SER_DISK, CLIENT_VERSION);
+ DataStream ss_key{key};
+ DataStream ss_value{value};
if (!batch->Write(ss_key, ss_value)) {
batch->TxnAbort();
m_database->Close();
@@ -3839,14 +3880,11 @@ std::optional<MigrationData> CWallet::GetDescriptorsForLegacy(bilingual_str& err
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
- if (!legacy_spkm) {
- error = _("Error: This wallet is already a descriptor wallet");
- return std::nullopt;
- }
+ assert(legacy_spkm);
std::optional<MigrationData> res = legacy_spkm->MigrateToDescriptor();
if (res == std::nullopt) {
- error = _("Error: Unable to produce descriptors for this legacy wallet. Make sure the wallet is unlocked first");
+ error = _("Error: Unable to produce descriptors for this legacy wallet. Make sure to provide the wallet's passphrase if it is encrypted.");
return std::nullopt;
}
return res;
@@ -4136,27 +4174,19 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
return true;
}
-util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>&& wallet, WalletContext& context)
+util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& wallet_name, const SecureString& passphrase, WalletContext& context)
{
MigrationResult res;
bilingual_str error;
std::vector<bilingual_str> warnings;
- // Make a backup of the DB
- std::string wallet_name = wallet->GetName();
- fs::path this_wallet_dir = fs::absolute(fs::PathFromString(wallet->GetDatabase().Filename())).parent_path();
- fs::path backup_filename = fs::PathFromString(strprintf("%s-%d.legacy.bak", wallet_name, GetTime()));
- fs::path backup_path = this_wallet_dir / backup_filename;
- if (!wallet->BackupWallet(fs::PathToString(backup_path))) {
- return util::Error{_("Error: Unable to make a backup of your wallet")};
- }
- res.backup_path = backup_path;
-
- // Unload the wallet so that nothing else tries to use it while we're changing it
- if (!RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings)) {
- return util::Error{_("Unable to unload the wallet before migrating")};
+ // If the wallet is still loaded, unload it so that nothing else tries to use it while we're changing it
+ if (auto wallet = GetWallet(context, wallet_name)) {
+ if (!RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings)) {
+ return util::Error{_("Unable to unload the wallet before migrating")};
+ }
+ UnloadWallet(std::move(wallet));
}
- UnloadWallet(std::move(wallet));
// Load the wallet but only in the context of this function.
// No signals should be connected nor should anything else be aware of this wallet
@@ -4170,15 +4200,43 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
return util::Error{Untranslated("Wallet file verification failed.") + Untranslated(" ") + error};
}
+ // Make the local wallet
std::shared_ptr<CWallet> local_wallet = CWallet::Create(empty_context, wallet_name, std::move(database), options.create_flags, error, warnings);
if (!local_wallet) {
return util::Error{Untranslated("Wallet loading failed.") + Untranslated(" ") + error};
}
+ // Before anything else, check if there is something to migrate.
+ if (!local_wallet->GetLegacyScriptPubKeyMan()) {
+ return util::Error{_("Error: This wallet is already a descriptor wallet")};
+ }
+
+ // Make a backup of the DB
+ fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path();
+ fs::path backup_filename = fs::PathFromString(strprintf("%s-%d.legacy.bak", wallet_name, GetTime()));
+ fs::path backup_path = this_wallet_dir / backup_filename;
+ if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) {
+ return util::Error{_("Error: Unable to make a backup of your wallet")};
+ }
+ res.backup_path = backup_path;
+
bool success = false;
{
LOCK(local_wallet->cs_wallet);
+ // Unlock the wallet if needed
+ if (local_wallet->IsLocked() && !local_wallet->Unlock(passphrase)) {
+ if (passphrase.find('\0') == std::string::npos) {
+ return util::Error{Untranslated("Error: Wallet decryption failed, the wallet passphrase was not provided or was incorrect.")};
+ } else {
+ return util::Error{Untranslated("Error: Wallet decryption failed, the wallet passphrase entered was incorrect. "
+ "The passphrase contains a null character (ie - a zero byte). "
+ "If this passphrase was set with a version of this software prior to 25.0, "
+ "please try again with only the characters up to — but not including — "
+ "the first null character.")};
+ }
+ }
+
// First change to using SQLite
if (!local_wallet->MigrateToSQLite(error)) return util::Error{error};
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 8b7e6dd526..32cb3e3f59 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -10,6 +10,7 @@
#include <fs.h>
#include <interfaces/chain.h>
#include <interfaces/handler.h>
+#include <logging.h>
#include <outputtype.h>
#include <policy/feerate.h>
#include <psbt.h>
@@ -19,7 +20,6 @@
#include <util/result.h>
#include <util/strencodings.h>
#include <util/string.h>
-#include <util/system.h>
#include <util/time.h>
#include <util/ui_change_type.h>
#include <validationinterface.h>
@@ -243,6 +243,7 @@ private:
std::atomic<bool> fAbortRescan{false};
std::atomic<bool> fScanningWallet{false}; // controlled by WalletRescanReserver
std::atomic<bool> m_attaching_chain{false};
+ std::atomic<bool> m_scanning_with_passphrase{false};
std::atomic<int64_t> m_scanning_start{0};
std::atomic<double> m_scanning_progress{0};
friend class WalletRescanReserver;
@@ -307,9 +308,6 @@ private:
//! Unset the blank wallet flag and saves it to disk
void UnsetBlankWalletFlag(WalletBatch& batch) override;
- /** Provider of aplication-wide arguments. */
- const ArgsManager& m_args;
-
/** Interface for accessing chain state. */
interfaces::Chain* m_chain;
@@ -373,9 +371,8 @@ public:
unsigned int nMasterKeyMaxID = 0;
/** Construct wallet with specified name and database implementation. */
- CWallet(interfaces::Chain* chain, const std::string& name, const ArgsManager& args, std::unique_ptr<WalletDatabase> database)
- : m_args(args),
- m_chain(chain),
+ CWallet(interfaces::Chain* chain, const std::string& name, std::unique_ptr<WalletDatabase> database)
+ : m_chain(chain),
m_name(name),
m_database(std::move(database))
{
@@ -467,6 +464,7 @@ public:
void AbortRescan() { fAbortRescan = true; }
bool IsAbortingRescan() const { return fAbortRescan; }
bool IsScanning() const { return fScanningWallet; }
+ bool IsScanningWithPassphrase() const { return m_scanning_with_passphrase; }
int64_t ScanningDuration() const { return fScanningWallet ? GetTimeMillis() - m_scanning_start : 0; }
double ScanningProgress() const { return fScanningWallet ? (double) m_scanning_progress : 0; }
@@ -486,6 +484,9 @@ public:
// Used to prevent concurrent calls to walletpassphrase RPC.
Mutex m_unlock_mutex;
+ // Used to prevent deleting the passphrase from memory when it is still in use.
+ RecursiveMutex m_relock_mutex;
+
bool Unlock(const SecureString& strWalletPassphrase, bool accept_no_keys = false);
bool ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase, const SecureString& strNewWalletPassphrase);
bool EncryptWallet(const SecureString& strWalletPassphrase);
@@ -642,6 +643,12 @@ public:
/** Absolute maximum transaction fee (in satoshis) used by default for the wallet */
CAmount m_default_max_tx_fee{DEFAULT_TRANSACTION_MAXFEE};
+ /** Number of pre-generated keys/scripts by each spkm (part of the look-ahead process, used to detect payments) */
+ int64_t m_keypool_size{DEFAULT_KEYPOOL_SIZE};
+
+ /** Notify external script when a wallet transaction comes in or is updated (handled by -walletnotify) */
+ std::string m_notify_tx_changed_script;
+
size_t KeypoolCountExternalKeys() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
bool TopUpKeyPool(unsigned int kpSize = 0);
@@ -821,7 +828,8 @@ public:
bool IsLegacy() const;
/** Returns a bracketed wallet name for displaying in logs, will return [default wallet] if the wallet has no name */
- const std::string GetDisplayName() const override {
+ std::string GetDisplayName() const override
+ {
std::string wallet_name = GetName().length() == 0 ? "default wallet" : GetName();
return strprintf("[%s]", wallet_name);
};
@@ -954,17 +962,18 @@ private:
using Clock = std::chrono::steady_clock;
using NowFn = std::function<Clock::time_point()>;
CWallet& m_wallet;
- bool m_could_reserve;
+ bool m_could_reserve{false};
NowFn m_now;
public:
- explicit WalletRescanReserver(CWallet& w) : m_wallet(w), m_could_reserve(false) {}
+ explicit WalletRescanReserver(CWallet& w) : m_wallet(w) {}
- bool reserve()
+ bool reserve(bool with_passphrase = false)
{
assert(!m_could_reserve);
if (m_wallet.fScanningWallet.exchange(true)) {
return false;
}
+ m_wallet.m_scanning_with_passphrase.exchange(with_passphrase);
m_wallet.m_scanning_start = GetTimeMillis();
m_wallet.m_scanning_progress = 0;
m_could_reserve = true;
@@ -984,6 +993,7 @@ public:
{
if (m_could_reserve) {
m_wallet.fScanningWallet = false;
+ m_wallet.m_scanning_with_passphrase = false;
}
}
};
@@ -1006,7 +1016,7 @@ struct MigrationResult {
};
//! Do all steps to migrate a legacy wallet to a descriptor wallet
-util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>&& wallet, WalletContext& context);
+util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& wallet_name, const SecureString& passphrase, WalletContext& context);
} // namespace wallet
#endif // BITCOIN_WALLET_WALLET_H
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index b393c35112..2cd35ae40e 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -321,7 +321,7 @@ public:
};
static bool
-ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
+ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue,
CWalletScanState &wss, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
try {
@@ -759,7 +759,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
return true;
}
-bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn)
+bool ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn)
{
CWalletScanState dummy_wss;
LOCK(pwallet->cs_wallet);
@@ -812,7 +812,8 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
#endif
// Get cursor
- if (!m_batch->StartCursor())
+ std::unique_ptr<DatabaseCursor> cursor = m_batch->GetNewCursor();
+ if (!cursor)
{
pwallet->WalletLogPrintf("Error getting wallet database cursor\n");
return DBErrors::CORRUPT;
@@ -821,16 +822,13 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
while (true)
{
// Read next record
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
+ DataStream ssKey{};
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- bool complete;
- bool ret = m_batch->ReadAtCursor(ssKey, ssValue, complete);
- if (complete) {
+ DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
+ if (status == DatabaseCursor::Status::DONE) {
break;
- }
- else if (!ret)
- {
- m_batch->CloseCursor();
+ } else if (status == DatabaseCursor::Status::FAIL) {
+ cursor.reset();
pwallet->WalletLogPrintf("Error reading next record from wallet database\n");
return DBErrors::CORRUPT;
}
@@ -878,7 +876,6 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
} catch (...) {
result = DBErrors::CORRUPT;
}
- m_batch->CloseCursor();
// Set the active ScriptPubKeyMans
for (auto spk_man_pair : wss.m_active_external_spks) {
@@ -986,7 +983,8 @@ DBErrors WalletBatch::FindWalletTxHashes(std::vector<uint256>& tx_hashes)
}
// Get cursor
- if (!m_batch->StartCursor())
+ std::unique_ptr<DatabaseCursor> cursor = m_batch->GetNewCursor();
+ if (!cursor)
{
LogPrintf("Error getting wallet database cursor\n");
return DBErrors::CORRUPT;
@@ -995,14 +993,12 @@ DBErrors WalletBatch::FindWalletTxHashes(std::vector<uint256>& tx_hashes)
while (true)
{
// Read next record
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
- bool complete;
- bool ret = m_batch->ReadAtCursor(ssKey, ssValue, complete);
- if (complete) {
+ DataStream ssKey{};
+ DataStream ssValue{};
+ DatabaseCursor::Status status = cursor->Next(ssKey, ssValue);
+ if (status == DatabaseCursor::Status::DONE) {
break;
- } else if (!ret) {
- m_batch->CloseCursor();
+ } else if (status == DatabaseCursor::Status::FAIL) {
LogPrintf("Error reading next record from wallet database\n");
return DBErrors::CORRUPT;
}
@@ -1018,7 +1014,6 @@ DBErrors WalletBatch::FindWalletTxHashes(std::vector<uint256>& tx_hashes)
} catch (...) {
result = DBErrors::CORRUPT;
}
- m_batch->CloseCursor();
return result;
}
@@ -1111,7 +1106,8 @@ bool WalletBatch::WriteWalletFlags(const uint64_t flags)
bool WalletBatch::EraseRecords(const std::unordered_set<std::string>& types)
{
// Get cursor
- if (!m_batch->StartCursor())
+ std::unique_ptr<DatabaseCursor> cursor = m_batch->GetNewCursor();
+ if (!cursor)
{
return false;
}
@@ -1120,16 +1116,12 @@ bool WalletBatch::EraseRecords(const std::unordered_set<std::string>& types)
while (true)
{
// Read next record
- CDataStream key(SER_DISK, CLIENT_VERSION);
- CDataStream value(SER_DISK, CLIENT_VERSION);
- bool complete;
- bool ret = m_batch->ReadAtCursor(key, value, complete);
- if (complete) {
+ DataStream key{};
+ DataStream value{};
+ DatabaseCursor::Status status = cursor->Next(key, value);
+ if (status == DatabaseCursor::Status::DONE) {
break;
- }
- else if (!ret)
- {
- m_batch->CloseCursor();
+ } else if (status == DatabaseCursor::Status::FAIL) {
return false;
}
@@ -1143,7 +1135,6 @@ bool WalletBatch::EraseRecords(const std::unordered_set<std::string>& types)
m_batch->Erase(key_data);
}
}
- m_batch->CloseCursor();
return true;
}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 97e8fad278..c97356a71f 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -303,7 +303,7 @@ void MaybeCompactWalletDB(WalletContext& context);
using KeyFilterFn = std::function<bool(const std::string&)>;
//! Unserialize a given Key-Value pair and load it into the wallet
-bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr);
+bool ReadKeyValue(CWallet* pwallet, DataStream& ssKey, CDataStream& ssValue, std::string& strType, std::string& strErr, const KeyFilterFn& filter_fn = nullptr);
/** Return object for accessing dummy database with no read/write capabilities. */
std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase();
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index f93b666bd5..f389676204 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -47,7 +47,7 @@ static void WalletCreate(CWallet* wallet_instance, uint64_t wallet_creation_flag
wallet_instance->TopUpKeyPool();
}
-static const std::shared_ptr<CWallet> MakeWallet(const std::string& name, const fs::path& path, const ArgsManager& args, DatabaseOptions options)
+static std::shared_ptr<CWallet> MakeWallet(const std::string& name, const fs::path& path, DatabaseOptions options)
{
DatabaseStatus status;
bilingual_str error;
@@ -58,7 +58,7 @@ static const std::shared_ptr<CWallet> MakeWallet(const std::string& name, const
}
// dummy chain interface
- std::shared_ptr<CWallet> wallet_instance{new CWallet(/*chain=*/nullptr, name, args, std::move(database)), WalletToolReleaseWallet};
+ std::shared_ptr<CWallet> wallet_instance{new CWallet(/*chain=*/nullptr, name, std::move(database)), WalletToolReleaseWallet};
DBErrors load_wallet_ret;
try {
load_wallet_ret = wallet_instance->LoadWallet();
@@ -159,7 +159,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
options.require_format = DatabaseFormat::SQLITE;
}
- const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, args, options);
+ const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, options);
if (wallet_instance) {
WalletShowInfo(wallet_instance.get());
wallet_instance->Close();
@@ -168,7 +168,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
DatabaseOptions options;
ReadDatabaseArgs(args, options);
options.require_existing = true;
- const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, args, options);
+ const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, options);
if (!wallet_instance) return false;
WalletShowInfo(wallet_instance.get());
wallet_instance->Close();
@@ -194,7 +194,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
DatabaseOptions options;
ReadDatabaseArgs(args, options);
options.require_existing = true;
- const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, args, options);
+ const std::shared_ptr<CWallet> wallet_instance = MakeWallet(name, path, options);
if (!wallet_instance) return false;
bilingual_str error;
bool ret = DumpWallet(args, *wallet_instance, error);
diff --git a/src/zmq/zmqabstractnotifier.h b/src/zmq/zmqabstractnotifier.h
index 6899ee8fa6..cf0ee48f47 100644
--- a/src/zmq/zmqabstractnotifier.h
+++ b/src/zmq/zmqabstractnotifier.h
@@ -20,7 +20,7 @@ class CZMQAbstractNotifier
public:
static const int DEFAULT_ZMQ_SNDHWM {1000};
- CZMQAbstractNotifier() : psocket(nullptr), outbound_message_high_water_mark(DEFAULT_ZMQ_SNDHWM) { }
+ CZMQAbstractNotifier() : outbound_message_high_water_mark(DEFAULT_ZMQ_SNDHWM) {}
virtual ~CZMQAbstractNotifier();
template <typename T>
@@ -57,7 +57,7 @@ public:
virtual bool NotifyTransaction(const CTransaction &transaction);
protected:
- void *psocket;
+ void* psocket{nullptr};
std::string type;
std::string address;
int outbound_message_high_water_mark; // aka SNDHWM
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 6dc4737d0a..df129c0830 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -21,7 +21,7 @@
#include <utility>
#include <vector>
-CZMQNotificationInterface::CZMQNotificationInterface() : pcontext(nullptr)
+CZMQNotificationInterface::CZMQNotificationInterface()
{
}
diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h
index b24d4664da..a43f9bfef3 100644
--- a/src/zmq/zmqnotificationinterface.h
+++ b/src/zmq/zmqnotificationinterface.h
@@ -39,7 +39,7 @@ protected:
private:
CZMQNotificationInterface();
- void *pcontext;
+ void* pcontext{nullptr};
std::list<std::unique_ptr<CZMQAbstractNotifier>> notifiers;
};
diff --git a/test/README.md b/test/README.md
index fdbb91832a..0eddb72e1f 100644
--- a/test/README.md
+++ b/test/README.md
@@ -109,34 +109,57 @@ how many jobs to run, append `--jobs=n`
The individual tests and the test_runner harness have many command-line
options. Run `test/functional/test_runner.py -h` to see them all.
-#### Speed up test runs with a ramdisk
+#### Speed up test runs with a RAM disk
-If you have available RAM on your system you can create a ramdisk to use as the `cache` and `tmp` directories for the functional tests in order to speed them up.
-Speed-up amount varies on each system (and according to your ram speed and other variables), but a 2-3x speed-up is not uncommon.
+If you have available RAM on your system you can create a RAM disk to use as the `cache` and `tmp` directories for the functional tests in order to speed them up.
+Speed-up amount varies on each system (and according to your RAM speed and other variables), but a 2-3x speed-up is not uncommon.
-To create a 4GB ramdisk on Linux at `/mnt/tmp/`:
+**Linux**
+
+To create a 4 GiB RAM disk at `/mnt/tmp/`:
```bash
sudo mkdir -p /mnt/tmp
sudo mount -t tmpfs -o size=4g tmpfs /mnt/tmp/
```
-Configure the size of the ramdisk using the `size=` option.
-The size of the ramdisk needed is relative to the number of concurrent jobs the test suite runs.
-For example running the test suite with `--jobs=100` might need a 4GB ramdisk, but running with `--jobs=32` will only need a 2.5GB ramdisk.
+Configure the size of the RAM disk using the `size=` option.
+The size of the RAM disk needed is relative to the number of concurrent jobs the test suite runs.
+For example running the test suite with `--jobs=100` might need a 4 GiB RAM disk, but running with `--jobs=32` will only need a 2.5 GiB RAM disk.
-To use, run the test suite specifying the ramdisk as the `cachedir` and `tmpdir`:
+To use, run the test suite specifying the RAM disk as the `cachedir` and `tmpdir`:
```bash
test/functional/test_runner.py --cachedir=/mnt/tmp/cache --tmpdir=/mnt/tmp
```
-Once finished with the tests and the disk, and to free the ram, simply unmount the disk:
+Once finished with the tests and the disk, and to free the RAM, simply unmount the disk:
```bash
sudo umount /mnt/tmp
```
+**macOS**
+
+To create a 4 GiB RAM disk named "ramdisk" at `/Volumes/ramdisk/`:
+
+```bash
+diskutil erasevolume HFS+ ramdisk $(hdiutil attach -nomount ram://8388608)
+```
+
+Configure the RAM disk size, expressed as the number of blocks, at the end of the command
+(`4096 MiB * 2048 blocks/MiB = 8388608 blocks` for 4 GiB). To run the tests using the RAM disk:
+
+```bash
+test/functional/test_runner.py --cachedir=/Volumes/ramdisk/cache --tmpdir=/Volumes/ramdisk/tmp
+```
+
+To unmount:
+
+```bash
+umount /Volumes/ramdisk
+```
+
#### Troubleshooting and debugging test failures
##### Resource contention
diff --git a/test/functional/data/rpc_decodescript.json b/test/functional/data/rpc_decodescript.json
index 4a15ae8792..5f3e725d4c 100644
--- a/test/functional/data/rpc_decodescript.json
+++ b/test/functional/data/rpc_decodescript.json
@@ -69,7 +69,7 @@
"p2sh": "2N34iiGoUUkVSPiaaTFpJjB1FR9TXQu3PGM",
"segwit": {
"asm": "0 96c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
- "desc": "addr(bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5)#5akkdska",
+ "desc": "wsh(raw(02eeee))#gtay4y0z",
"hex": "002096c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
"address": "bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5",
"type": "witness_v0_scripthash",
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 22b1918b85..1080e77c40 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -161,7 +161,7 @@ class FullBlockTest(BitcoinTestFramework):
self.log.info(f"Reject block with invalid tx: {TxTemplate.__name__}")
blockname = f"for_invalid.{TxTemplate.__name__}"
- badblock = self.next_block(blockname)
+ self.next_block(blockname)
badtx = template.get_tx()
if TxTemplate != invalid_txs.InputMissing:
self.sign_tx(badtx, attempt_spend_tx)
@@ -473,7 +473,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
- b39 = self.next_block(39)
+ self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
@@ -672,7 +672,7 @@ class FullBlockTest(BitcoinTestFramework):
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
- b51 = self.next_block(51)
+ self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.send_blocks([b51], success=False, reject_reason='bad-cb-multiple', reconnect=True)
@@ -752,7 +752,7 @@ class FullBlockTest(BitcoinTestFramework):
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
- b57 = self.next_block(57)
+ self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
@@ -769,7 +769,7 @@ class FullBlockTest(BitcoinTestFramework):
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
- b57p2 = self.next_block("57p2")
+ self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
@@ -803,7 +803,7 @@ class FullBlockTest(BitcoinTestFramework):
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
- b58 = self.next_block(58, spend=out[17])
+ self.next_block(58, spend=out[17])
tx = CTransaction()
assert len(out[17].vout) < 42
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), SEQUENCE_FINAL))
@@ -815,7 +815,7 @@ class FullBlockTest(BitcoinTestFramework):
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
- b59 = self.next_block(59)
+ self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.send_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True)
@@ -851,7 +851,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b_spend_dup_cb (b_dup_cb) -> b_dup_2 ()
#
self.move_tip(57)
- b_spend_dup_cb = self.next_block('spend_dup_cb')
+ self.next_block('spend_dup_cb')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(duplicate_tx.sha256, 0)))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
@@ -876,7 +876,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip('dup_2')
- b62 = self.next_block(62)
+ self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
@@ -957,7 +957,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
- b65 = self.next_block(65)
+ self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
@@ -970,7 +970,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
- b66 = self.next_block(66)
+ self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
@@ -984,7 +984,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
- b67 = self.next_block(67)
+ self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
@@ -1005,7 +1005,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
- b68 = self.next_block(68, additional_coinbase_value=10)
+ self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.send_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True)
@@ -1025,7 +1025,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
- b70 = self.next_block(70, spend=out[21])
+ self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
@@ -1043,7 +1043,7 @@ class FullBlockTest(BitcoinTestFramework):
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
- b72 = self.next_block(72)
+ self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
@@ -1081,7 +1081,7 @@ class FullBlockTest(BitcoinTestFramework):
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
- b73 = self.next_block(73)
+ self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
@@ -1109,7 +1109,7 @@ class FullBlockTest(BitcoinTestFramework):
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
- b74 = self.next_block(74)
+ self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
@@ -1122,7 +1122,7 @@ class FullBlockTest(BitcoinTestFramework):
self.send_blocks([b74], success=False, reject_reason='bad-blk-sigops', reconnect=True)
self.move_tip(72)
- b75 = self.next_block(75)
+ self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
@@ -1137,7 +1137,7 @@ class FullBlockTest(BitcoinTestFramework):
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
- b76 = self.next_block(76)
+ self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
@@ -1165,18 +1165,18 @@ class FullBlockTest(BitcoinTestFramework):
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
- b77 = self.next_block(77)
+ self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.send_blocks([b77], True)
self.save_spendable_output()
- b78 = self.next_block(78)
+ self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.send_blocks([b78], True)
- b79 = self.next_block(79)
+ self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.send_blocks([b79], True)
@@ -1208,7 +1208,7 @@ class FullBlockTest(BitcoinTestFramework):
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
- b83 = self.next_block(83)
+ self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
@@ -1227,7 +1227,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
- b84 = self.next_block(84)
+ self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
@@ -1265,7 +1265,7 @@ class FullBlockTest(BitcoinTestFramework):
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
- b89a = self.next_block("89a", spend=out[32])
+ self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index d5e5ed47d6..f9730b48c5 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -126,7 +126,6 @@ class ConfArgsTest(BitcoinTestFramework):
expected_msgs=[
'Command-line arg: addnode="some.node"',
'Command-line arg: rpcauth=****',
- 'Command-line arg: rpcbind=****',
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
@@ -135,14 +134,17 @@ class ConfArgsTest(BitcoinTestFramework):
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
- '127.1.1.1',
'secret-rpcuser',
'secret-torpassword',
+ 'Command-line arg: rpcbind=****',
+ 'Command-line arg: rpcallowip=****',
]):
self.start_node(0, extra_args=[
'-addnode=some.node',
'-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'-rpcbind=127.1.1.1',
+ '-rpcbind=127.0.0.1',
+ "-rpcallowip=127.0.0.1",
'-rpcpassword=',
'-rpcuser=secret-rpcuser',
'-torpassword=secret-torpassword',
@@ -249,11 +251,43 @@ class ConfArgsTest(BitcoinTestFramework):
]):
self.nodes[0].setmocktime(start + 65)
+ def test_connect_with_seednode(self):
+ self.log.info('Test -connect with -seednode')
+ seednode_ignored = ['-seednode is ignored when -connect is used\n']
+ dnsseed_ignored = ['-dnsseed is ignored when -connect is used and -proxy is specified\n']
+ addcon_thread_started = ['addcon thread start\n']
+ self.stop_node(0)
+
+ # When -connect is supplied, expanding addrman via getaddr calls to ADDR_FETCH(-seednode)
+ # nodes is irrelevant and -seednode is ignored.
+ with self.nodes[0].assert_debug_log(expected_msgs=seednode_ignored):
+ self.start_node(0, extra_args=['-connect=fakeaddress1', '-seednode=fakeaddress2'])
+
+ # With -proxy, an ADDR_FETCH connection is made to a peer that the dns seed resolves to.
+ # ADDR_FETCH connections are not used when -connect is used.
+ with self.nodes[0].assert_debug_log(expected_msgs=dnsseed_ignored):
+ self.restart_node(0, extra_args=['-connect=fakeaddress1', '-dnsseed=1', '-proxy=1.2.3.4'])
+
+ # If the user did not disable -dnsseed, but it was soft-disabled because they provided -connect,
+ # they shouldn't see a warning about -dnsseed being ignored.
+ with self.nodes[0].assert_debug_log(expected_msgs=addcon_thread_started,
+ unexpected_msgs=dnsseed_ignored):
+ self.restart_node(0, extra_args=['-connect=fakeaddress1', '-proxy=1.2.3.4'])
+
+ # We have to supply expected_msgs as it's a required argument
+ # The expected_msg must be something we are confident will be logged after the unexpected_msg
+ # These cases test for -connect being supplied but only to disable it
+ for connect_arg in ['-connect=0', '-noconnect']:
+ with self.nodes[0].assert_debug_log(expected_msgs=addcon_thread_started,
+ unexpected_msgs=seednode_ignored):
+ self.restart_node(0, extra_args=[connect_arg, '-seednode=fakeaddress2'])
+
def run_test(self):
self.test_log_buffer()
self.test_args_log()
self.test_seed_peers()
self.test_networkactive()
+ self.test_connect_with_seednode()
self.test_config_file_parser()
self.test_invalid_command_line_options()
diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py
index e2bc566f53..1f2e0936ed 100755
--- a/test/functional/feature_dbcrash.py
+++ b/test/functional/feature_dbcrash.py
@@ -85,7 +85,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
- except:
+ except Exception:
# An exception here should mean the node is about to crash.
# If bitcoind exits, then try again. wait_for_node_exit()
# should raise an exception if bitcoind doesn't exit.
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 28a8959e93..c551c0b449 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -164,6 +164,9 @@ class MaxUploadTest(BitcoinTestFramework):
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
+ self.log.info("Test passing an unparsable value to -maxuploadtarget throws an error")
+ self.stop_node(0)
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-maxuploadtarget=abc"], expected_msg="Error: Unable to parse -maxuploadtarget: 'abc'")
if __name__ == '__main__':
MaxUploadTest().main()
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index 32fea18f37..8cb633d454 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -31,7 +31,7 @@ class NotificationsTest(BitcoinTestFramework):
self.num_nodes = 2
self.setup_clean_chain = True
# The experimental syscall sandbox feature (-sandbox) is not compatible with -alertnotify,
- # -blocknotify or -walletnotify (which all invoke execve).
+ # -blocknotify, -walletnotify or -shutdownnotify (which all invoke execve).
self.disable_syscall_sandbox = True
def setup_network(self):
@@ -39,14 +39,18 @@ class NotificationsTest(BitcoinTestFramework):
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
+ self.shutdownnotify_dir = os.path.join(self.options.tmpdir, "shutdownnotify")
+ self.shutdownnotify_file = os.path.join(self.shutdownnotify_dir, "shutdownnotify.txt")
os.mkdir(self.alertnotify_dir)
os.mkdir(self.blocknotify_dir)
os.mkdir(self.walletnotify_dir)
+ os.mkdir(self.shutdownnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}",
f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}",
+ f"-shutdownnotify=echo > {self.shutdownnotify_file}",
], [
f"-walletnotify=echo %h_%b > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}",
]]
@@ -162,6 +166,10 @@ class NotificationsTest(BitcoinTestFramework):
# TODO: add test for `-alertnotify` large fork notifications
+ self.log.info("test -shutdownnotify")
+ self.stop_nodes()
+ self.wait_until(lambda: os.path.isfile(self.shutdownnotify_file), timeout=10)
+
def expect_wallet_notify(self, tx_details):
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_details), timeout=10)
# Should have no more and no less files than expected
diff --git a/test/functional/feature_posix_fs_permissions.py b/test/functional/feature_posix_fs_permissions.py
new file mode 100755
index 0000000000..c5a543e97a
--- /dev/null
+++ b/test/functional/feature_posix_fs_permissions.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test file system permissions for POSIX platforms.
+"""
+
+import os
+import stat
+
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class PosixFsPermissionsTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_platform_not_posix()
+
+ def check_directory_permissions(self, dir):
+ mode = os.lstat(dir).st_mode
+ self.log.info(f"{stat.filemode(mode)} {dir}")
+ assert mode == (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+
+ def check_file_permissions(self, file):
+ mode = os.lstat(file).st_mode
+ self.log.info(f"{stat.filemode(mode)} {file}")
+ assert mode == (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
+
+ def run_test(self):
+ self.stop_node(0)
+ datadir = os.path.join(self.nodes[0].datadir, self.chain)
+ self.check_directory_permissions(datadir)
+ walletsdir = os.path.join(datadir, "wallets")
+ self.check_directory_permissions(walletsdir)
+ debuglog = os.path.join(datadir, "debug.log")
+ self.check_file_permissions(debuglog)
+
+
+if __name__ == '__main__':
+ PosixFsPermissionsTest().main()
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index 664ed779db..519877ac5b 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -223,8 +223,8 @@ class PruneTest(BitcoinTestFramework):
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
- with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
- self.nodes[2].verifychain(checklevel=4, nblocks=0)
+ with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(no data)']):
+ assert not self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info(f"Will need to redownload block {self.forkheight}")
# Verify that we have enough history to reorg back to the fork point
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index 0a84a66a8f..947d2e8273 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -392,11 +392,11 @@ class ReplaceByFeeTest(BitcoinTestFramework):
enough transactions off of each root UTXO to exceed the MAX_REPLACEMENT_LIMIT.
Then create a conflicting RBF replacement transaction.
"""
- normal_node = self.nodes[1]
- wallet = MiniWallet(normal_node)
# Clear mempools to avoid cross-node sync failure.
for node in self.nodes:
self.generate(node, 1)
+ normal_node = self.nodes[1]
+ wallet = MiniWallet(normal_node)
# This has to be chosen so that the total number of transactions can exceed
# MAX_REPLACEMENT_LIMIT without having any one tx graph run into the descendant
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index 144e01c367..8ac06f570d 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -750,7 +750,7 @@ def spenders_taproot_active():
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
- add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
+ add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != LEAF_VERSION_TAPSCRIPT]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
@@ -1555,12 +1555,16 @@ class TaprootTest(BitcoinTestFramework):
script_lists = [
None,
- [("0", CScript([pubs[50], OP_CHECKSIG]), 0xc0)],
- [("0", CScript([pubs[51], OP_CHECKSIG]), 0xc0)],
- [("0", CScript([pubs[52], OP_CHECKSIG]), 0xc0), ("1", CScript([b"BIP341"]), VALID_LEAF_VERS[pubs[99][0] % 41])],
- [("0", CScript([pubs[53], OP_CHECKSIG]), 0xc0), ("1", CScript([b"Taproot"]), VALID_LEAF_VERS[pubs[99][1] % 41])],
- [("0", CScript([pubs[54], OP_CHECKSIG]), 0xc0), [("1", CScript([pubs[55], OP_CHECKSIG]), 0xc0), ("2", CScript([pubs[56], OP_CHECKSIG]), 0xc0)]],
- [("0", CScript([pubs[57], OP_CHECKSIG]), 0xc0), [("1", CScript([pubs[58], OP_CHECKSIG]), 0xc0), ("2", CScript([pubs[59], OP_CHECKSIG]), 0xc0)]],
+ [("0", CScript([pubs[50], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)],
+ [("0", CScript([pubs[51], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)],
+ [("0", CScript([pubs[52], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"BIP341"]), VALID_LEAF_VERS[pubs[99][0] % 41])],
+ [("0", CScript([pubs[53], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"Taproot"]), VALID_LEAF_VERS[pubs[99][1] % 41])],
+ [("0", CScript([pubs[54], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT),
+ [("1", CScript([pubs[55], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[56], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)]
+ ],
+ [("0", CScript([pubs[57], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT),
+ [("1", CScript([pubs[58], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[59], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)]
+ ],
]
taps = [taproot_construct(inner_keys[i], script_lists[i]) for i in range(len(inner_keys))]
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 68cbb5dbed..8350e9c91e 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -7,14 +7,12 @@
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
+from math import ceil
import time
-from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
-from test_framework.address import key_to_p2pkh
-from test_framework.wallet_util import bytes_to_wif
-from test_framework.key import ECKey
+from test_framework.wallet import MiniWallet
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
@@ -22,15 +20,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']]
- def get_new_address(self):
- key = ECKey()
- key.generate()
- pubkey = key.get_pubkey().get_bytes()
- address = key_to_p2pkh(pubkey)
- self.priv_keys.append(bytes_to_wif(key.get_bytes()))
- return address
-
- def transaction_graph_test(self, size, n_tx_to_mine=None, start_input_txid='', end_address='', fee=Decimal(0.00100000)):
+ def transaction_graph_test(self, size, n_tx_to_mine=None, fee=100_000):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
Keyword arguments:
@@ -45,14 +35,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
-
- self.priv_keys = [self.nodes[0].get_deterministic_priv_key().key]
- if not start_input_txid:
- start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0]
-
- if not end_address:
- end_address = self.get_new_address()
-
+ wallet = MiniWallet(self.nodes[0])
first_block_hash = ''
tx_id = []
tx_size = []
@@ -61,41 +44,31 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
- inputs = [{'txid': start_input_txid, 'vout': 0}]
- inputs_value = self.nodes[0].gettxout(start_input_txid, 0)['value']
+ inputs = [wallet.get_utxo()] # let MiniWallet provide a start UTXO
else:
inputs = []
- inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
- inputs.append({'txid': tx_id[j], 'vout': vout})
- inputs_value += self.nodes[0].gettxout(tx, vout)['value']
-
- self.log.debug('inputs={}'.format(inputs))
- self.log.debug('inputs_value={}'.format(inputs_value))
+ inputs.append(wallet.get_utxo(txid=tx_id[j], vout=vout))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
- output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
- outputs = {}
- for _ in range(n_outputs):
- outputs[self.get_new_address()] = output_value
else:
- output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
- outputs = {end_address: output_value}
-
- self.log.debug('output_value={}'.format(output_value))
- self.log.debug('outputs={}'.format(outputs))
+ n_outputs = 1
# Create a new transaction.
- unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
- signed_raw_tx = self.nodes[0].signrawtransactionwithkey(unsigned_raw_tx, self.priv_keys)
- tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
- tx_size.append(self.nodes[0].getmempoolentry(tx_id[-1])['vsize'])
+ new_tx = wallet.send_self_transfer_multi(
+ from_node=self.nodes[0],
+ utxos_to_spend=inputs,
+ num_outputs=n_outputs,
+ fee_per_output=ceil(fee / n_outputs)
+ )
+ tx_id.append(new_tx['txid'])
+ tx_size.append(new_tx['tx'].get_vsize())
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py
index b2f0659eda..394009f30f 100755
--- a/test/functional/p2p_disconnect_ban.py
+++ b/test/functional/p2p_disconnect_ban.py
@@ -116,7 +116,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
- self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
+ self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
@@ -127,7 +127,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
- self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
+ self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
diff --git a/test/functional/p2p_eviction.py b/test/functional/p2p_eviction.py
index 1f4797a89d..8b31dfa549 100755
--- a/test/functional/p2p_eviction.py
+++ b/test/functional/p2p_eviction.py
@@ -12,22 +12,23 @@ address/netgroup since in the current framework, all peers are connecting from
the same local address. See Issue #14210 for more info.
Therefore, this test is limited to the remaining protection criteria.
"""
-
import time
from test_framework.blocktools import (
- COINBASE_MATURITY,
create_block,
create_coinbase,
)
from test_framework.messages import (
msg_pong,
msg_tx,
- tx_from_hex,
)
-from test_framework.p2p import P2PDataStore, P2PInterface
+from test_framework.p2p import (
+ P2PDataStore,
+ P2PInterface,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+from test_framework.wallet import MiniWallet
class SlowP2PDataStore(P2PDataStore):
@@ -35,14 +36,15 @@ class SlowP2PDataStore(P2PDataStore):
time.sleep(0.1)
self.send_message(msg_pong(message.nonce))
+
class SlowP2PInterface(P2PInterface):
def on_ping(self, message):
time.sleep(0.1)
self.send_message(msg_pong(message.nonce))
+
class P2PEvict(BitcoinTestFramework):
def set_test_params(self):
- self.setup_clean_chain = True
self.num_nodes = 1
# The choice of maxconnections=32 results in a maximum of 21 inbound connections
# (32 - 10 outbound - 1 feeler). 20 inbound peers are protected from eviction:
@@ -53,7 +55,7 @@ class P2PEvict(BitcoinTestFramework):
protected_peers = set() # peers that we expect to be protected from eviction
current_peer = -1
node = self.nodes[0]
- self.generatetoaddress(node, COINBASE_MATURITY + 1, node.get_deterministic_priv_key().address)
+ self.wallet = MiniWallet(node)
self.log.info("Create 4 peers and protect them from eviction by sending us a block")
for _ in range(4):
@@ -79,21 +81,8 @@ class P2PEvict(BitcoinTestFramework):
current_peer += 1
txpeer.sync_with_ping()
- prevtx = node.getblock(node.getblockhash(i + 1), 2)['tx'][0]
- rawtx = node.createrawtransaction(
- inputs=[{'txid': prevtx['txid'], 'vout': 0}],
- outputs=[{node.get_deterministic_priv_key().address: 50 - 0.00125}],
- )
- sigtx = node.signrawtransactionwithkey(
- hexstring=rawtx,
- privkeys=[node.get_deterministic_priv_key().key],
- prevtxs=[{
- 'txid': prevtx['txid'],
- 'vout': 0,
- 'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
- }],
- )['hex']
- txpeer.send_message(msg_tx(tx_from_hex(sigtx)))
+ tx = self.wallet.create_self_transfer()['tx']
+ txpeer.send_message(msg_tx(tx))
protected_peers.add(current_peer)
self.log.info("Create 8 peers and protect them from eviction by having faster pings")
@@ -133,5 +122,6 @@ class P2PEvict(BitcoinTestFramework):
self.log.debug("{} protected peers: {}".format(len(protected_peers), protected_peers))
assert evicted_peers[0] not in protected_peers
+
if __name__ == '__main__':
P2PEvict().main()
diff --git a/test/functional/p2p_headers_sync_with_minchainwork.py b/test/functional/p2p_headers_sync_with_minchainwork.py
index b07077c668..832fd7e0e9 100755
--- a/test/functional/p2p_headers_sync_with_minchainwork.py
+++ b/test/functional/p2p_headers_sync_with_minchainwork.py
@@ -27,6 +27,7 @@ NODE2_BLOCKS_REQUIRED = 2047
class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
def set_test_params(self):
+ self.rpc_timeout *= 4 # To avoid timeout when generating BLOCKS_TO_MINE
self.setup_clean_chain = True
self.num_nodes = 4
# Node0 has no required chainwork; node1 requires 15 blocks on top of the genesis block; node2 requires 2047
diff --git a/test/functional/p2p_ibd_stalling.py b/test/functional/p2p_ibd_stalling.py
new file mode 100755
index 0000000000..aca98ceb3f
--- /dev/null
+++ b/test/functional/p2p_ibd_stalling.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022- The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test stalling logic during IBD
+"""
+
+import time
+
+from test_framework.blocktools import (
+ create_block,
+ create_coinbase
+)
+from test_framework.messages import (
+ MSG_BLOCK,
+ MSG_TYPE_MASK,
+)
+from test_framework.p2p import (
+ CBlockHeader,
+ msg_block,
+ msg_headers,
+ P2PDataStore,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+
+class P2PStaller(P2PDataStore):
+ def __init__(self, stall_block):
+ self.stall_block = stall_block
+ super().__init__()
+
+ def on_getdata(self, message):
+ for inv in message.inv:
+ self.getdata_requests.append(inv.hash)
+ if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
+ if (inv.hash != self.stall_block):
+ self.send_message(msg_block(self.block_store[inv.hash]))
+
+ def on_getheaders(self, message):
+ pass
+
+
+class P2PIBDStallingTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def run_test(self):
+ NUM_BLOCKS = 1025
+ NUM_PEERS = 4
+ node = self.nodes[0]
+ tip = int(node.getbestblockhash(), 16)
+ blocks = []
+ height = 1
+ block_time = node.getblock(node.getbestblockhash())['time'] + 1
+ self.log.info("Prepare blocks without sending them to the node")
+ block_dict = {}
+ for _ in range(NUM_BLOCKS):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ block_dict[blocks[-1].sha256] = blocks[-1]
+ stall_block = blocks[0].sha256
+
+ headers_message = msg_headers()
+ headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
+ peers = []
+
+ self.log.info("Check that a staller does not get disconnected if the 1024 block lookahead buffer is filled")
+ for id in range(NUM_PEERS):
+ peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
+ peers[-1].block_store = block_dict
+ peers[-1].send_message(headers_message)
+
+ # Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
+ # returning the number of downloaded (but not connected) blocks.
+ self.wait_until(lambda: self.total_bytes_recv_for_blocks() == 172761)
+
+ self.all_sync_send_with_ping(peers)
+ # If there was a peer marked for stalling, it would get disconnected
+ self.mocktime = int(time.time()) + 3
+ node.setmocktime(self.mocktime)
+ self.all_sync_send_with_ping(peers)
+ assert_equal(node.num_test_p2p_connections(), NUM_PEERS)
+
+ self.log.info("Check that increasing the window beyond 1024 blocks triggers stalling logic")
+ headers_message.headers = [CBlockHeader(b) for b in blocks]
+ with node.assert_debug_log(expected_msgs=['Stall started']):
+ for p in peers:
+ p.send_message(headers_message)
+ self.all_sync_send_with_ping(peers)
+
+ self.log.info("Check that the stalling peer is disconnected after 2 seconds")
+ self.mocktime += 3
+ node.setmocktime(self.mocktime)
+ peers[0].wait_for_disconnect()
+ assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 1)
+ self.wait_until(lambda: self.is_block_requested(peers, stall_block))
+ # Make sure that SendMessages() is invoked, which assigns the missing block
+ # to another peer and starts the stalling logic for them
+ self.all_sync_send_with_ping(peers)
+
+ self.log.info("Check that the stalling timeout gets doubled to 4 seconds for the next staller")
+ # No disconnect after just 3 seconds
+ self.mocktime += 3
+ node.setmocktime(self.mocktime)
+ self.all_sync_send_with_ping(peers)
+ assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 1)
+
+ self.mocktime += 2
+ node.setmocktime(self.mocktime)
+ self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 2)
+ self.wait_until(lambda: self.is_block_requested(peers, stall_block))
+ self.all_sync_send_with_ping(peers)
+
+ self.log.info("Check that the stalling timeout gets doubled to 8 seconds for the next staller")
+ # No disconnect after just 7 seconds
+ self.mocktime += 7
+ node.setmocktime(self.mocktime)
+ self.all_sync_send_with_ping(peers)
+ assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 2)
+
+ self.mocktime += 2
+ node.setmocktime(self.mocktime)
+ self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 3)
+ self.wait_until(lambda: self.is_block_requested(peers, stall_block))
+ self.all_sync_send_with_ping(peers)
+
+ self.log.info("Provide the withheld block and check that stalling timeout gets reduced back to 2 seconds")
+ with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds']):
+ for p in peers:
+ if p.is_connected and (stall_block in p.getdata_requests):
+ p.send_message(msg_block(block_dict[stall_block]))
+
+ self.log.info("Check that all outstanding blocks get connected")
+ self.wait_until(lambda: node.getblockcount() == NUM_BLOCKS)
+
+ def total_bytes_recv_for_blocks(self):
+ total = 0
+ for info in self.nodes[0].getpeerinfo():
+ if ("block" in info["bytesrecv_per_msg"].keys()):
+ total += info["bytesrecv_per_msg"]["block"]
+ return total
+
+ def all_sync_send_with_ping(self, peers):
+ for p in peers:
+ if p.is_connected:
+ p.sync_send_with_ping()
+
+ def is_block_requested(self, peers, hash):
+ for p in peers:
+ if p.is_connected and (hash in p.getdata_requests):
+ return True
+ return False
+
+
+if __name__ == '__main__':
+ P2PIBDStallingTest().main()
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 3109ad2b56..ea4999a965 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -13,11 +13,12 @@ from test_framework.messages import (
MAX_HEADERS_RESULTS,
MAX_INV_SIZE,
MAX_PROTOCOL_MESSAGE_LENGTH,
+ MSG_TX,
+ from_hex,
msg_getdata,
msg_headers,
msg_inv,
msg_ping,
- MSG_TX,
msg_version,
ser_string,
)
@@ -73,6 +74,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
self.test_oversized_inv_msg()
self.test_oversized_getdata_msg()
self.test_oversized_headers_msg()
+ self.test_invalid_pow_headers_msg()
self.test_resource_exhaustion()
def test_buffer(self):
@@ -248,6 +250,36 @@ class InvalidMessagesTest(BitcoinTestFramework):
size = MAX_HEADERS_RESULTS + 1
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
+ def test_invalid_pow_headers_msg(self):
+ self.log.info("Test headers message with invalid proof-of-work is logged as misbehaving and disconnects peer")
+ blockheader_tip_hash = self.nodes[0].getbestblockhash()
+ blockheader_tip = from_hex(CBlockHeader(), self.nodes[0].getblockheader(blockheader_tip_hash, False))
+
+ # send valid headers message first
+ assert_equal(self.nodes[0].getblockchaininfo()['headers'], 0)
+ blockheader = CBlockHeader()
+ blockheader.hashPrevBlock = int(blockheader_tip_hash, 16)
+ blockheader.nTime = int(time.time())
+ blockheader.nBits = blockheader_tip.nBits
+ blockheader.rehash()
+ while not blockheader.hash.startswith('0'):
+ blockheader.nNonce += 1
+ blockheader.rehash()
+ peer = self.nodes[0].add_p2p_connection(P2PInterface())
+ peer.send_and_ping(msg_headers([blockheader]))
+ assert_equal(self.nodes[0].getblockchaininfo()['headers'], 1)
+ chaintips = self.nodes[0].getchaintips()
+ assert_equal(chaintips[0]['status'], 'headers-only')
+ assert_equal(chaintips[0]['hash'], blockheader.hash)
+
+ # invalidate PoW
+ while not blockheader.hash.startswith('f'):
+ blockheader.nNonce += 1
+ blockheader.rehash()
+ with self.nodes[0].assert_debug_log(['Misbehaving', 'header with invalid proof of work']):
+ peer.send_message(msg_headers([blockheader]))
+ peer.wait_for_disconnect()
+
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index 5a0003d3ef..a56afbcf7b 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -85,7 +85,7 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
self.connect_nodes(0, 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
- except:
+ except Exception:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 41324682fc..f84bbf67e6 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -56,12 +56,12 @@ class P2PPermissionsTests(BitcoinTestFramework):
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
- self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
+ self.nodes[1].replace_in_config([("bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)])
self.checkpermission(
["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay", "download"])
- self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
+ self.nodes[1].replace_in_config([("whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")])
self.checkpermission(
# legacy whitelistrelay should be ignored
@@ -138,12 +138,6 @@ class P2PPermissionsTests(BitcoinTestFramework):
if p not in peerinfo['permissions']:
raise AssertionError("Expected permissions %r is not granted." % p)
- def replaceinconfig(self, nodeid, old, new):
- with open(self.nodes[nodeid].bitcoinconf, encoding="utf8") as f:
- newText = f.read().replace(old, new)
- with open(self.nodes[nodeid].bitcoinconf, 'w', encoding="utf8") as f:
- f.write(newText)
-
if __name__ == '__main__':
P2PPermissionsTests().main()
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 7356b8bbb3..0e463c5072 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -5,6 +5,7 @@
"""
Test transaction download behavior
"""
+import time
from test_framework.messages import (
CInv,
@@ -13,7 +14,6 @@ from test_framework.messages import (
MSG_WTX,
msg_inv,
msg_notfound,
- tx_from_hex,
)
from test_framework.p2p import (
P2PInterface,
@@ -23,9 +23,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
-from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
-
-import time
+from test_framework.wallet import MiniWallet
class TestP2PConn(P2PInterface):
@@ -88,19 +86,8 @@ class TxDownloadTest(BitcoinTestFramework):
def test_inv_block(self):
self.log.info("Generate a transaction on node 0")
- tx = self.nodes[0].createrawtransaction(
- inputs=[{ # coinbase
- "txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0],
- "vout": 0
- }],
- outputs={ADDRESS_BCRT1_UNSPENDABLE: 50 - 0.00025},
- )
- tx = self.nodes[0].signrawtransactionwithkey(
- hexstring=tx,
- privkeys=[self.nodes[0].get_deterministic_priv_key().key],
- )['hex']
- ctx = tx_from_hex(tx)
- txid = int(ctx.rehash(), 16)
+ tx = self.wallet.create_self_transfer()
+ txid = int(tx['txid'], 16)
self.log.info(
"Announce the transaction to all nodes from all {} incoming peers, but never send it".format(NUM_INBOUND))
@@ -109,7 +96,7 @@ class TxDownloadTest(BitcoinTestFramework):
p.send_and_ping(msg)
self.log.info("Put the tx in node 0's mempool")
- self.nodes[0].sendrawtransaction(tx)
+ self.nodes[0].sendrawtransaction(tx['hex'])
# Since node 1 is connected outbound to an honest peer (node 0), it
# should get the tx within a timeout. (Assuming that node 0
@@ -255,6 +242,8 @@ class TxDownloadTest(BitcoinTestFramework):
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
def run_test(self):
+ self.wallet = MiniWallet(self.nodes[0])
+
# Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes
self.test_expiry_fallback()
self.test_disconnect_fallback()
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 19c73eebf0..7a0cedb1f5 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -25,6 +25,7 @@ from decimal import Decimal
import http.client
import os
import subprocess
+import textwrap
from test_framework.blocktools import (
MAX_FUTURE_BLOCK_TIME,
@@ -429,6 +430,17 @@ class BlockchainTest(BitcoinTestFramework):
def _test_getnetworkhashps(self):
self.log.info("Test getnetworkhashps")
hashes_per_second = self.nodes[0].getnetworkhashps()
+ assert_raises_rpc_error(
+ -3,
+ textwrap.dedent("""
+ Wrong type passed:
+ {
+ "Position 1 (nblocks)": "JSON value of type string is not of expected type number",
+ "Position 2 (height)": "JSON value of type array is not of expected type number"
+ }
+ """).strip(),
+ lambda: self.nodes[0].getnetworkhashps("a", []),
+ )
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py
index a61710b739..673836bd04 100755
--- a/test/functional/rpc_decodescript.py
+++ b/test/functional/rpc_decodescript.py
@@ -263,6 +263,19 @@ class DecodeScriptTest(BitcoinTestFramework):
rpc_result = self.nodes[0].decodescript(script)
assert_equal(result, rpc_result)
+ def decodescript_miniscript(self):
+ """Check that a Miniscript is decoded when possible under P2WSH context."""
+ # Sourced from https://github.com/bitcoin/bitcoin/pull/27037#issuecomment-1416151907.
+ # Miniscript-compatible offered HTLC
+ res = self.nodes[0].decodescript("82012088a914ffffffffffffffffffffffffffffffffffffffff88210250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0ad51b2")
+ assert res["segwit"]["desc"] == "wsh(and_v(and_v(v:hash160(ffffffffffffffffffffffffffffffffffffffff),v:pk(0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0)),older(1)))#gm8xz4fl"
+ # Miniscript-incompatible offered HTLC
+ res = self.nodes[0].decodescript("82012088a914ffffffffffffffffffffffffffffffffffffffff882102ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffacb2")
+ assert res["segwit"]["desc"] == "wsh(raw(82012088a914ffffffffffffffffffffffffffffffffffffffff882102ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffacb2))#ra6w2xa7"
+ # Miniscript-compatible multisig bigger than 520 byte P2SH limit.
+ res = self.nodes[0].decodescript("5b21020e0338c96a8870479f2396c373cc7696ba124e8635d41b0ea581112b678172612102675333a4e4b8fb51d9d4e22fa5a8eaced3fdac8a8cbf9be8c030f75712e6af992102896807d54bc55c24981f24a453c60ad3e8993d693732288068a23df3d9f50d4821029e51a5ef5db3137051de8323b001749932f2ff0d34c82e96a2c2461de96ae56c2102a4e1a9638d46923272c266631d94d36bdb03a64ee0e14c7518e49d2f29bc401021031c41fdbcebe17bec8d49816e00ca1b5ac34766b91c9f2ac37d39c63e5e008afb2103079e252e85abffd3c401a69b087e590a9b86f33f574f08129ccbd3521ecf516b2103111cf405b627e22135b3b3733a4a34aa5723fb0f58379a16d32861bf576b0ec2210318f331b3e5d38156da6633b31929c5b220349859cc9ca3d33fb4e68aa08401742103230dae6b4ac93480aeab26d000841298e3b8f6157028e47b0897c1e025165de121035abff4281ff00660f99ab27bb53e6b33689c2cd8dcd364bc3c90ca5aea0d71a62103bd45cddfacf2083b14310ae4a84e25de61e451637346325222747b157446614c2103cc297026b06c71cbfa52089149157b5ff23de027ac5ab781800a578192d175462103d3bde5d63bdb3a6379b461be64dad45eabff42f758543a9645afd42f6d4248282103ed1e8d5109c9ed66f7941bc53cc71137baa76d50d274bda8d5e8ffbd6e61fe9a5fae736402c00fb269522103aab896d53a8e7d6433137bbba940f9c521e085dd07e60994579b64a6d992cf79210291b7d0b1b692f8f524516ed950872e5da10fb1b808b5a526dedc6fed1cf29807210386aa9372fbab374593466bc5451dc59954e90787f08060964d95c87ef34ca5bb53ae68")
+ assert_equal(res["segwit"]["desc"], "wsh(or_d(multi(11,020e0338c96a8870479f2396c373cc7696ba124e8635d41b0ea581112b67817261,02675333a4e4b8fb51d9d4e22fa5a8eaced3fdac8a8cbf9be8c030f75712e6af99,02896807d54bc55c24981f24a453c60ad3e8993d693732288068a23df3d9f50d48,029e51a5ef5db3137051de8323b001749932f2ff0d34c82e96a2c2461de96ae56c,02a4e1a9638d46923272c266631d94d36bdb03a64ee0e14c7518e49d2f29bc4010,031c41fdbcebe17bec8d49816e00ca1b5ac34766b91c9f2ac37d39c63e5e008afb,03079e252e85abffd3c401a69b087e590a9b86f33f574f08129ccbd3521ecf516b,03111cf405b627e22135b3b3733a4a34aa5723fb0f58379a16d32861bf576b0ec2,0318f331b3e5d38156da6633b31929c5b220349859cc9ca3d33fb4e68aa0840174,03230dae6b4ac93480aeab26d000841298e3b8f6157028e47b0897c1e025165de1,035abff4281ff00660f99ab27bb53e6b33689c2cd8dcd364bc3c90ca5aea0d71a6,03bd45cddfacf2083b14310ae4a84e25de61e451637346325222747b157446614c,03cc297026b06c71cbfa52089149157b5ff23de027ac5ab781800a578192d17546,03d3bde5d63bdb3a6379b461be64dad45eabff42f758543a9645afd42f6d424828,03ed1e8d5109c9ed66f7941bc53cc71137baa76d50d274bda8d5e8ffbd6e61fe9a),and_v(v:older(4032),multi(2,03aab896d53a8e7d6433137bbba940f9c521e085dd07e60994579b64a6d992cf79,0291b7d0b1b692f8f524516ed950872e5da10fb1b808b5a526dedc6fed1cf29807,0386aa9372fbab374593466bc5451dc59954e90787f08060964d95c87ef34ca5bb))))#7jwwklk4")
+
def run_test(self):
self.log.info("Test decoding of standard input scripts [scriptSig]")
self.decodescript_script_sig()
@@ -272,6 +285,8 @@ class DecodeScriptTest(BitcoinTestFramework):
self.decoderawtransaction_asm_sighashtype()
self.log.info("Data-driven tests")
self.decodescript_datadriven_tests()
+ self.log.info("Miniscript descriptor decoding")
+ self.decodescript_miniscript()
if __name__ == '__main__':
DecodeScriptTest().main()
diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py
index 91298937fd..3062a86565 100755
--- a/test/functional/rpc_preciousblock.py
+++ b/test/functional/rpc_preciousblock.py
@@ -16,7 +16,7 @@ def unidirectional_node_sync_via_rpc(node_src, node_dest):
try:
assert len(node_dest.getblock(blockhash, False)) > 0
break
- except:
+ except Exception:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py
index dd20b28550..61f92aeac3 100644
--- a/test/functional/test_framework/authproxy.py
+++ b/test/functional/test_framework/authproxy.py
@@ -78,7 +78,10 @@ class AuthServiceProxy():
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
- self.timeout = timeout
+ # clamp the socket timeout, since larger values can cause an
+ # "Invalid argument" exception in Python's HTTP(S) client
+ # library on some operating systems (e.g. OpenBSD, FreeBSD)
+ self.timeout = min(timeout, 2147483)
self._set_conn(connection)
def __getattr__(self, name):
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index 59157f4755..c5768177bd 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -385,7 +385,7 @@ class P2PInterface(P2PConnection):
self.message_count[msgtype] += 1
self.last_message[msgtype] = message
getattr(self, 'on_' + msgtype)(message)
- except:
+ except Exception:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 823958397d..9620951a16 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -279,10 +279,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if seed is None:
seed = random.randrange(sys.maxsize)
else:
- self.log.debug("User supplied random seed {}".format(seed))
+ self.log.info("User supplied random seed {}".format(seed))
random.seed(seed)
- self.log.debug("PRNG seed is: {}".format(seed))
+ self.log.info("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
@@ -533,11 +533,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
- conf_file = test_node_i.bitcoinconf
- with open(conf_file, 'r', encoding='utf8') as conf:
- conf_data = conf.read()
- with open(conf_file, 'w', encoding='utf8') as conf:
- conf.write(conf_data.replace('[regtest]', ''))
+ test_node_i.replace_in_config([('[regtest]', '')])
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
@@ -561,7 +557,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
- except:
+ except Exception:
# If one node failed to start, stop the others
self.stop_nodes()
raise
@@ -884,6 +880,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if platform.system() != "Linux":
raise SkipTest("not on a Linux system")
+ def skip_if_platform_not_posix(self):
+ """Skip the running test if we are not on a POSIX platform"""
+ if os.name != 'posix':
+ raise SkipTest("not on a POSIX system")
+
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index f3d81ed7da..882f82e0f2 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -387,6 +387,21 @@ class TestNode():
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
+ def replace_in_config(self, replacements):
+ """
+ Perform replacements in the configuration file.
+ The substitutions are passed as a list of search-replace-tuples, e.g.
+ [("old", "new"), ("foo", "bar"), ...]
+ """
+ with open(self.bitcoinconf, 'r', encoding='utf8') as conf:
+ conf_data = conf.read()
+ for replacement in replacements:
+ assert_equal(len(replacement), 2)
+ old, new = replacement[0], replacement[1]
+ conf_data = conf_data.replace(old, new)
+ with open(self.bitcoinconf, 'w', encoding='utf8') as conf:
+ conf.write(conf_data)
+
@property
def chain_path(self) -> Path:
return Path(self.datadir) / self.chain
@@ -814,7 +829,7 @@ class RPCOverloadWrapper():
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
- except:
+ except Exception:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 569af0ee9b..26ebce039b 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -211,10 +211,13 @@ BASE_SCRIPTS = [
'p2p_addrv2_relay.py',
'p2p_compactblocks_hb.py',
'p2p_disconnect_ban.py',
+ 'feature_posix_fs_permissions.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
+ 'wallet_change_address.py --legacy-wallet',
+ 'wallet_change_address.py --descriptors',
'p2p_addr_relay.py',
'p2p_getaddr_caching.py',
'p2p_getdata.py',
@@ -254,6 +257,7 @@ BASE_SCRIPTS = [
'wallet_importprunedfunds.py --descriptors',
'p2p_leak_tx.py',
'p2p_eviction.py',
+ 'p2p_ibd_stalling.py',
'wallet_signmessagewithaddress.py',
'rpc_signmessagewithprivkey.py',
'rpc_generate.py',
diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py
index f55a3758ce..76aac3e486 100755
--- a/test/functional/wallet_backwards_compatibility.py
+++ b/test/functional/wallet_backwards_compatibility.py
@@ -33,11 +33,12 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 10
+ self.num_nodes = 11
# Add new version after each release:
self.extra_args = [
["-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to mine blocks. noban for immediate tx relay
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to receive coins, swap wallets, etc
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v24.0.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v23.0
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v22.0
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v0.21.0
@@ -57,6 +58,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
None,
+ 240001,
230000,
220000,
210000,
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index a2ae997ecb..ad79e0288c 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -81,7 +81,7 @@ class BumpFeeTest(BitcoinTestFramework):
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
- for mode in ["default", "fee_rate"]:
+ for mode in ["default", "fee_rate", "new_outputs"]:
test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address)
self.test_invalid_parameters(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
@@ -157,6 +157,14 @@ class BumpFeeTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
rbf_node.bumpfee, rbfid, {"estimate_mode": mode})
+ self.log.info("Test invalid outputs values")
+ assert_raises_rpc_error(-8, "Invalid parameter, output argument cannot be an empty array",
+ rbf_node.bumpfee, rbfid, {"outputs": []})
+ assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: " + dest_address,
+ rbf_node.bumpfee, rbfid, {"outputs": [{dest_address: 0.1}, {dest_address: 0.2}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data",
+ rbf_node.bumpfee, rbfid, {"outputs": [{"data": "deadbeef"}, {"data": "deadbeef"}]})
+
self.clear_mempool()
@@ -169,6 +177,10 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
+ elif mode == "new_outputs":
+ new_address = peer_node.getnewaddress()
+ bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"outputs": {new_address: 0.0003}})
+ bumped_tx = rbf_node.bumpfee(rbfid, {"outputs": {new_address: 0.0003}})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
@@ -192,6 +204,10 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
+ # if this is a new_outputs test, check that outputs were indeed replaced
+ if mode == "new_outputs":
+ assert len(bumpedwtx["details"]) == 1
+ assert bumpedwtx["details"][0]["address"] == new_address
self.clear_mempool()
@@ -628,12 +644,14 @@ def test_change_script_match(self, rbf_node, dest_address):
self.clear_mempool()
-def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
+def spend_one_input(node, dest_address, change_size=Decimal("0.00049000"), data=None):
tx_input = dict(
sequence=MAX_BIP125_RBF_SEQUENCE, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
+ if data:
+ destinations['data'] = data
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
diff --git a/test/functional/wallet_change_address.py b/test/functional/wallet_change_address.py
new file mode 100755
index 0000000000..f8bfe9eebf
--- /dev/null
+++ b/test/functional/wallet_change_address.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+# Copyright (c) 2023 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test wallet change address selection"""
+
+import re
+
+from test_framework.blocktools import COINBASE_MATURITY
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+
+class WalletChangeAddressTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ self.add_wallet_options(parser)
+
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+ # discardfee is used to make change outputs less likely in the change_pos test
+ self.extra_args = [
+ [],
+ ["-discardfee=1"],
+ ["-avoidpartialspends", "-discardfee=1"]
+ ]
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def assert_change_index(self, node, tx, index):
+ change_index = None
+ for vout in tx["vout"]:
+ info = node.getaddressinfo(vout["scriptPubKey"]["address"])
+ if (info["ismine"] and info["ischange"]):
+ change_index = int(re.findall(r'\d+', info["hdkeypath"])[-1])
+ break
+ assert_equal(change_index, index)
+
+ def assert_change_pos(self, wallet, tx, pos):
+ change_pos = None
+ for index, output in enumerate(tx["vout"]):
+ info = wallet.getaddressinfo(output["scriptPubKey"]["address"])
+ if (info["ismine"] and info["ischange"]):
+ change_pos = index
+ break
+ assert_equal(change_pos, pos)
+
+ def run_test(self):
+ self.log.info("Setting up")
+ # Mine some coins
+ self.generate(self.nodes[0], COINBASE_MATURITY + 1)
+
+ # Get some addresses from the two nodes
+ addr1 = [self.nodes[1].getnewaddress() for _ in range(3)]
+ addr2 = [self.nodes[2].getnewaddress() for _ in range(3)]
+ addrs = addr1 + addr2
+
+ # Send 1 + 0.5 coin to each address
+ [self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
+ [self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
+ self.generate(self.nodes[0], 1)
+
+ for i in range(20):
+ for n in [1, 2]:
+ self.log.debug(f"Send transaction from node {n}: expected change index {i}")
+ txid = self.nodes[n].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
+ tx = self.nodes[n].getrawtransaction(txid, True)
+ # find the change output and ensure that expected change index was used
+ self.assert_change_index(self.nodes[n], tx, i)
+
+ # Start next test with fresh wallets and new coins
+ self.nodes[1].createwallet("w1")
+ self.nodes[2].createwallet("w2")
+ w1 = self.nodes[1].get_wallet_rpc("w1")
+ w2 = self.nodes[2].get_wallet_rpc("w2")
+ addr1 = w1.getnewaddress()
+ addr2 = w2.getnewaddress()
+ self.nodes[0].sendtoaddress(addr1, 3.0)
+ self.nodes[0].sendtoaddress(addr1, 0.1)
+ self.nodes[0].sendtoaddress(addr2, 3.0)
+ self.nodes[0].sendtoaddress(addr2, 0.1)
+ self.generate(self.nodes[0], 1)
+
+ sendTo1 = self.nodes[0].getnewaddress()
+ sendTo2 = self.nodes[0].getnewaddress()
+ sendTo3 = self.nodes[0].getnewaddress()
+
+ # The avoid partial spends wallet will always create a change output
+ node = self.nodes[2]
+ res = w2.send({sendTo1: "1.0", sendTo2: "1.0", sendTo3: "0.9999"}, options={"change_position": 0})
+ tx = node.getrawtransaction(res["txid"], True)
+ self.assert_change_pos(w2, tx, 0)
+
+ # The default wallet will internally create a tx without change first,
+ # then create a second candidate using APS that requires a change output.
+ # Ensure that the user-configured change position is kept
+ node = self.nodes[1]
+ res = w1.send({sendTo1: "1.0", sendTo2: "1.0", sendTo3: "0.9999"}, options={"change_position": 0})
+ tx = node.getrawtransaction(res["txid"], True)
+ # If the wallet ignores the user's change_position there is still a 25%
+ # that the random change position passes the test
+ self.assert_change_pos(w1, tx, 0)
+
+if __name__ == '__main__':
+ WalletChangeAddressTest().main()
diff --git a/test/functional/wallet_crosschain.py b/test/functional/wallet_crosschain.py
index 6f93ad4e3b..7a1297e65f 100755
--- a/test/functional/wallet_crosschain.py
+++ b/test/functional/wallet_crosschain.py
@@ -25,11 +25,7 @@ class WalletCrossChain(BitcoinTestFramework):
# Switch node 1 to testnet before starting it.
self.nodes[1].chain = 'testnet3'
self.nodes[1].extra_args = ['-maxconnections=0', '-prune=550'] # disable testnet sync
- with open(self.nodes[1].bitcoinconf, 'r', encoding='utf8') as conf:
- conf_data = conf.read()
- with open (self.nodes[1].bitcoinconf, 'w', encoding='utf8') as conf:
- conf.write(conf_data.replace('regtest=', 'testnet=').replace('[regtest]', '[test]'))
-
+ self.nodes[1].replace_in_config([('regtest=', 'testnet='), ('[regtest]', '[test]')])
self.start_nodes()
def run_test(self):
diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py
index 885c52cf2e..88b9ebbddd 100755
--- a/test/functional/wallet_encryption.py
+++ b/test/functional/wallet_encryption.py
@@ -90,6 +90,17 @@ class WalletEncryptionTest(BitcoinTestFramework):
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_equal(actual_time, expected_time)
+ self.nodes[0].walletlock()
+
+ # Test passphrase with null characters
+ passphrase_with_nulls = "Phrase\0With\0Nulls"
+ self.nodes[0].walletpassphrasechange(passphrase2, passphrase_with_nulls)
+ # walletpassphrasechange should not stop at null characters
+ assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase_with_nulls.partition("\0")[0], 10)
+ self.nodes[0].walletpassphrase(passphrase_with_nulls, 10)
+ sig = self.nodes[0].signmessage(address, msg)
+ assert self.nodes[0].verifymessage(address, sig, msg)
+ self.nodes[0].walletlock()
if __name__ == '__main__':
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index 83c1826a41..bdb9081261 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -41,6 +41,11 @@ class WalletGroupTest(BitcoinTestFramework):
def run_test(self):
self.log.info("Setting up")
+ # To take full use of immediate tx relay, all nodes need to be reachable
+ # via inbound peers, i.e. connect first to last to close the circle
+ # (the default test network topology looks like this:
+ # node0 <-- node1 <-- node2 <-- node3 <-- node4 <-- node5)
+ self.connect_nodes(0, self.num_nodes - 1)
# Mine some coins
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index ca0209b61d..e66eb2c289 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -448,14 +448,14 @@ class ImportDescriptorsTest(BitcoinTestFramework):
wallet=wmulti_priv)
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1001) # Range end (1000) is inclusive, so 1001 addresses generated
- addr = wmulti_priv.getnewaddress('', 'bech32')
+ addr = wmulti_priv.getnewaddress('', 'bech32') # uses receive 0
assert_equal(addr, 'bcrt1qdt0qy5p7dzhxzmegnn4ulzhard33s2809arjqgjndx87rv5vd0fq2czhy8') # Derived at m/84'/0'/0'/0
- change_addr = wmulti_priv.getrawchangeaddress('bech32')
- assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e')
+ change_addr = wmulti_priv.getrawchangeaddress('bech32') # uses change 0
+ assert_equal(change_addr, 'bcrt1qt9uhe3a9hnq7vajl7a094z4s3crm9ttf8zw3f5v9gr2nyd7e3lnsy44n8e') # Derived at m/84'/1'/0'/0
assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000)
txid = w0.sendtoaddress(addr, 10)
self.generate(self.nodes[0], 6)
- send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8)
+ send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8) # uses change 1
decoded = wmulti_priv.gettransaction(txid=send_txid, verbose=True)['decoded']
assert_equal(len(decoded['vin'][0]['txinwitness']), 4)
self.sync_all()
@@ -481,10 +481,10 @@ class ImportDescriptorsTest(BitcoinTestFramework):
wallet=wmulti_pub)
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 1000) # The first one was already consumed by previous import and is detected as used
- addr = wmulti_pub.getnewaddress('', 'bech32')
+ addr = wmulti_pub.getnewaddress('', 'bech32') # uses receive 1
assert_equal(addr, 'bcrt1qp8s25ckjl7gr6x2q3dx3tn2pytwp05upkjztk6ey857tt50r5aeqn6mvr9') # Derived at m/84'/0'/0'/1
- change_addr = wmulti_pub.getrawchangeaddress('bech32')
- assert_equal(change_addr, 'bcrt1qzxl0qz2t88kljdnkzg4n4gapr6kte26390gttrg79x66nt4p04fssj53nl')
+ change_addr = wmulti_pub.getrawchangeaddress('bech32') # uses change 2
+ assert_equal(change_addr, 'bcrt1qp6j3jw8yetefte7kw6v5pc89rkgakzy98p6gf7ayslaveaxqyjusnw580c') # Derived at m/84'/1'/0'/2
assert send_txid in self.nodes[0].getrawmempool(True)
assert send_txid in (x['txid'] for x in wmulti_pub.listunspent(0))
assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999)
@@ -667,5 +667,33 @@ class ImportDescriptorsTest(BitcoinTestFramework):
success=True,
warnings=["Unknown output type, cannot set descriptor to active."])
+ self.log.info("Test importing a descriptor to an encrypted wallet")
+
+ descriptor = {"desc": descsum_create("pkh(" + xpriv + "/1h/*h)"),
+ "timestamp": "now",
+ "active": True,
+ "range": [0,4000],
+ "next_index": 4000}
+
+ self.nodes[0].createwallet("temp_wallet", blank=True, descriptors=True)
+ temp_wallet = self.nodes[0].get_wallet_rpc("temp_wallet")
+ temp_wallet.importdescriptors([descriptor])
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, temp_wallet.getnewaddress())
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, temp_wallet.getnewaddress())
+
+ self.nodes[0].createwallet("encrypted_wallet", blank=True, descriptors=True, passphrase="passphrase")
+ encrypted_wallet = self.nodes[0].get_wallet_rpc("encrypted_wallet")
+
+ descriptor["timestamp"] = 0
+ descriptor["next_index"] = 0
+
+ batch = []
+ batch.append(encrypted_wallet.walletpassphrase.get_request("passphrase", 3))
+ batch.append(encrypted_wallet.importdescriptors.get_request([descriptor]))
+
+ encrypted_wallet.batch(batch)
+
+ assert_equal(temp_wallet.getbalance(), encrypted_wallet.getbalance())
+
if __name__ == '__main__':
ImportDescriptorsTest().main()
diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py
index 688ac98617..7c2959bb89 100755
--- a/test/functional/wallet_migration.py
+++ b/test/functional/wallet_migration.py
@@ -163,6 +163,10 @@ class WalletMigrationTest(BitcoinTestFramework):
assert_equal(basic2.getbalance(), basic2_balance)
self.assert_list_txs_equal(basic2.listtransactions(), basic2_txs)
+ # Now test migration on a descriptor wallet
+ self.log.info("Test \"nothing to migrate\" when the user tries to migrate a wallet with no legacy data")
+ assert_raises_rpc_error(-4, "Error: This wallet is already a descriptor wallet", basic2.migratewallet)
+
def test_multisig(self):
default = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
@@ -396,11 +400,75 @@ class WalletMigrationTest(BitcoinTestFramework):
def test_encrypted(self):
self.log.info("Test migration of an encrypted wallet")
wallet = self.create_legacy_wallet("encrypted")
+ default = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
wallet.encryptwallet("pass")
+ addr = wallet.getnewaddress()
+ txid = default.sendtoaddress(addr, 1)
+ self.generate(self.nodes[0], 1)
+ bals = wallet.getbalances()
+
+ assert_raises_rpc_error(-4, "Error: Wallet decryption failed, the wallet passphrase was not provided or was incorrect", wallet.migratewallet)
+ assert_raises_rpc_error(-4, "Error: Wallet decryption failed, the wallet passphrase was not provided or was incorrect", wallet.migratewallet, None, "badpass")
+ assert_raises_rpc_error(-4, "The passphrase contains a null character", wallet.migratewallet, None, "pass\0with\0null")
+
+ wallet.migratewallet(passphrase="pass")
+
+ info = wallet.getwalletinfo()
+ assert_equal(info["descriptors"], True)
+ assert_equal(info["format"], "sqlite")
+ assert_equal(info["unlocked_until"], 0)
+ wallet.gettransaction(txid)
+
+ assert_equal(bals, wallet.getbalances())
+
+ def test_unloaded(self):
+ self.log.info("Test migration of a wallet that isn't loaded")
+ wallet = self.create_legacy_wallet("notloaded")
+ default = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+
+ addr = wallet.getnewaddress()
+ txid = default.sendtoaddress(addr, 1)
+ self.generate(self.nodes[0], 1)
+ bals = wallet.getbalances()
+
+ wallet.unloadwallet()
- assert_raises_rpc_error(-15, "Error: migratewallet on encrypted wallets is currently unsupported.", wallet.migratewallet)
- # TODO: Fix migratewallet so that we can actually migrate encrypted wallets
+ assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", wallet.migratewallet, "someotherwallet")
+ assert_raises_rpc_error(-8, "Either RPC endpoint wallet or wallet_name parameter must be provided", self.nodes[0].migratewallet)
+ self.nodes[0].migratewallet("notloaded")
+
+ info = wallet.getwalletinfo()
+ assert_equal(info["descriptors"], True)
+ assert_equal(info["format"], "sqlite")
+ wallet.gettransaction(txid)
+
+ assert_equal(bals, wallet.getbalances())
+
+ def test_unloaded_by_path(self):
+ self.log.info("Test migration of a wallet that isn't loaded, specified by path")
+ wallet = self.create_legacy_wallet("notloaded2")
+ default = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+
+ addr = wallet.getnewaddress()
+ txid = default.sendtoaddress(addr, 1)
+ self.generate(self.nodes[0], 1)
+ bals = wallet.getbalances()
+
+ wallet.unloadwallet()
+
+ wallet_file_path = os.path.join(self.nodes[0].datadir, "regtest", "wallets", "notloaded2")
+ self.nodes[0].migratewallet(wallet_file_path)
+
+ # Because we gave the name by full path, the loaded wallet's name is that path too.
+ wallet = self.nodes[0].get_wallet_rpc(wallet_file_path)
+
+ info = wallet.getwalletinfo()
+ assert_equal(info["descriptors"], True)
+ assert_equal(info["format"], "sqlite")
+ wallet.gettransaction(txid)
+
+ assert_equal(bals, wallet.getbalances())
def run_test(self):
self.generate(self.nodes[0], 101)
@@ -412,6 +480,8 @@ class WalletMigrationTest(BitcoinTestFramework):
self.test_no_privkeys()
self.test_pk_coinbases()
self.test_encrypted()
+ self.test_unloaded()
+ self.test_unloaded_by_path()
if __name__ == '__main__':
WalletMigrationTest().main()
diff --git a/test/functional/wallet_miniscript.py b/test/functional/wallet_miniscript.py
index cefcaf4dc7..7bc3424bf4 100755
--- a/test/functional/wallet_miniscript.py
+++ b/test/functional/wallet_miniscript.py
@@ -5,19 +5,137 @@
"""Test Miniscript descriptors integration in the wallet."""
from test_framework.descriptors import descsum_create
+from test_framework.psbt import PSBT, PSBT_IN_SHA256
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+TPRVS = [
+ "tprv8ZgxMBicQKsPerQj6m35no46amfKQdjY7AhLnmatHYXs8S4MTgeZYkWAn4edSGwwL3vkSiiGqSZQrmy5D3P5gBoqgvYP2fCUpBwbKTMTAkL",
+ "tprv8ZgxMBicQKsPd3cbrKjE5GKKJLDEidhtzSSmPVtSPyoHQGL2LZw49yt9foZsN9BeiC5VqRaESUSDV2PS9w7zAVBSK6EQH3CZW9sMKxSKDwD",
+ "tprv8iF7W37EHnVEtDr9EFeyFjQJFL6SfGby2AnZ2vQARxTQHQXy9tdzZvBBVp8a19e5vXhskczLkJ1AZjqgScqWL4FpmXVp8LLjiorcrFK63Sr",
+]
+TPUBS = [
+ "tpubD6NzVbkrYhZ4YPAbyf6urxqqnmJF79PzQtyERAmvkSVS9fweCTjxjDh22Z5St9fGb1a5DUCv8G27nYupKP1Ctr1pkamJossoetzws1moNRn",
+ "tpubD6NzVbkrYhZ4YMQC15JS7QcrsAyfGrGiykweqMmPxTkEVScu7vCZLNpPXW1XphHwzsgmqdHWDQAfucbM72EEB1ZEyfgZxYvkZjYVXx1xS9p",
+ "tpubD6NzVbkrYhZ4YU9vM1s53UhD75UyJatx8EMzMZ3VUjR2FciNfLLkAw6a4pWACChzobTseNqdWk4G7ZdBqRDLtLSACKykTScmqibb1ZrCvJu",
+ "tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a",
+ "tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy",
+ "tpubDEFLeBkKTm8aiYkySz8hXAXPVnPSfxMi7Fxhg9sejUrkwJuRWvPdLEiXjTDbhGbjLKCZUDUUibLxTnK5UP1q7qYrSnPqnNe7M8mvAW1STcc",
+]
+PUBKEYS = [
+ "02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068",
+ "030f64b922aee2fd597f104bc6cb3b670f1ca2c6c49b1071a1a6c010575d94fe5a",
+ "02abe475b199ec3d62fa576faee16a334fdb86ffb26dce75becebaaedf328ac3fe",
+ "0314f3dc33595b0d016bb522f6fe3a67680723d842c1b9b8ae6b59fdd8ab5cccb4",
+ "025eba3305bd3c829e4e1551aac7358e4178832c739e4fc4729effe428de0398ab",
+ "029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0",
+ "0211c7b2e18b6fd330f322de087da62da92ae2ae3d0b7cec7e616479cce175f183",
+]
+
MINISCRIPTS = [
# One of two keys
- "or_b(pk(tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a/*),s:pk(tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy/*))",
+ f"or_b(pk({TPUBS[0]}/*),s:pk({TPUBS[1]}/*))",
# A script similar (same spending policy) to BOLT3's offered HTLC (with anchor outputs)
- "or_d(pk(tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a/*),and_v(and_v(v:pk(tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy/*),or_c(pk(tpubD6NzVbkrYhZ4YNwtTWrKRJQzQX3PjPKeUQg1gYh1hiLMkk1cw8SRLgB1yb7JzE8bHKNt6EcZXkJ6AqpCZL1aaRSjnG36mLgbQvJZBNsjWnG/*),v:hash160(7f999c905d5e35cefd0a37673f746eb13fba3640))),older(1)))",
+ f"or_d(pk({TPUBS[0]}/*),and_v(and_v(v:pk({TPUBS[1]}/*),or_c(pk({TPUBS[2]}/*),v:hash160(7f999c905d5e35cefd0a37673f746eb13fba3640))),older(1)))",
# A Revault Unvault policy with the older() replaced by an after()
- "andor(multi(2,tpubD6NzVbkrYhZ4YMQC15JS7QcrsAyfGrGiykweqMmPxTkEVScu7vCZLNpPXW1XphHwzsgmqdHWDQAfucbM72EEB1ZEyfgZxYvkZjYVXx1xS9p/*,tpubD6NzVbkrYhZ4WkCyc7E3z6g6NkypHMiecnwc4DpWHTPqFdteRGkEKukdrSSyJGNnGrHNMfy4BCw2UXo5soYRCtCDDfy4q8pc8oyB7RgTFv8/*),and_v(v:multi(4,030f64b922aee2fd597f104bc6cb3b670f1ca2c6c49b1071a1a6c010575d94fe5a,02abe475b199ec3d62fa576faee16a334fdb86ffb26dce75becebaaedf328ac3fe,0314f3dc33595b0d016bb522f6fe3a67680723d842c1b9b8ae6b59fdd8ab5cccb4,025eba3305bd3c829e4e1551aac7358e4178832c739e4fc4729effe428de0398ab),after(424242)),thresh(4,pkh(tpubD6NzVbkrYhZ4YVrNggiT2ptVHwnFbLBqDkCtV5HkxR4WtcRLAQReKTkqZGNcV6GE7cQsmpBzzSzhk16DUwB1gn1L7ZPnJF2dnNePP1uMBCY/*),a:pkh(tpubD6NzVbkrYhZ4YU9vM1s53UhD75UyJatx8EMzMZ3VUjR2FciNfLLkAw6a4pWACChzobTseNqdWk4G7ZdBqRDLtLSACKykTScmqibb1ZrCvJu/*),a:pkh(tpubD6NzVbkrYhZ4YUHcFfuH9iEBLiH8CBRJTpS7X3qjHmh82m1KCNbzs6w9gyK8oWHSZmKHWcakAXCGfbKg6xoCvKzQCWAHyxaC7QcWfmzyBf4/*),a:pkh(tpubD6NzVbkrYhZ4XXEmQtS3sgxpJbMyMg4McqRR1Af6ULzyrTRnhwjyr1etPD7svap9oFtJf4MM72brUb5o7uvF2Jyszc5c1t836fJW7SX2e8D/*)))",
+ f"andor(multi(2,{TPUBS[0]}/*,{TPUBS[1]}/*),and_v(v:multi(4,{PUBKEYS[0]},{PUBKEYS[1]},{PUBKEYS[2]},{PUBKEYS[3]}),after(424242)),thresh(4,pkh({TPUBS[2]}/*),a:pkh({TPUBS[3]}/*),a:pkh({TPUBS[4]}/*),a:pkh({TPUBS[5]}/*)))",
# Liquid-like federated pegin with emergency recovery keys
- "or_i(and_b(pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),a:and_b(pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),a:and_b(pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),a:and_b(pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),s:pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2))))),and_v(v:thresh(2,pkh(tpubD6NzVbkrYhZ4YK67cd5fDe4fBVmGB2waTDrAt1q4ey9HPq9veHjWkw3VpbaCHCcWozjkhgAkWpFrxuPMUrmXVrLHMfEJ9auoZA6AS1g3grC/*),a:pkh(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),a:pkh(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068)),older(4209713)))",
+ f"or_i(and_b(pk({PUBKEYS[0]}),a:and_b(pk({PUBKEYS[1]}),a:and_b(pk({PUBKEYS[2]}),a:and_b(pk({PUBKEYS[3]}),s:pk({PUBKEYS[4]}))))),and_v(v:thresh(2,pkh({TPUBS[0]}/*),a:pkh({PUBKEYS[5]}),a:pkh({PUBKEYS[6]})),older(4209713)))",
+]
+
+MINISCRIPTS_PRIV = [
+ # One of two keys, of which one private key is known
+ {
+ "ms": f"or_i(pk({TPRVS[0]}/*),pk({TPUBS[0]}/*))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 1,
+ "stack_size": 3,
+ },
+ # A more complex policy, that can't be satisfied through the first branch (need for a preimage)
+ {
+ "ms": f"andor(ndv:older(2),and_v(v:pk({TPRVS[0]}),sha256(2a8ce30189b2ec3200b47aeb4feaac8fcad7c0ba170389729f4898b0b7933bcb)),and_v(v:pkh({TPRVS[1]}),pk({TPRVS[2]}/*)))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 5,
+ },
+ # The same policy but we provide the preimage. This path will be chosen as it's a smaller witness.
+ {
+ "ms": f"andor(ndv:older(2),and_v(v:pk({TPRVS[0]}),sha256(61e33e9dbfefc45f6a194187684d278f789fd4d5e207a357e79971b6519a8b12)),and_v(v:pkh({TPRVS[1]}),pk({TPRVS[2]}/*)))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 4,
+ "sha256_preimages": {
+ "61e33e9dbfefc45f6a194187684d278f789fd4d5e207a357e79971b6519a8b12": "e8774f330f5f330c23e8bbefc5595cb87009ddb7ac3b8deaaa8e9e41702d919c"
+ },
+ },
+ # Signature with a relative timelock
+ {
+ "ms": f"and_v(v:older(2),pk({TPRVS[0]}/*))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # Signature with an absolute timelock
+ {
+ "ms": f"and_v(v:after(20),pk({TPRVS[0]}/*))",
+ "sequence": None,
+ "locktime": 20,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # Signature with both
+ {
+ "ms": f"and_v(v:older(4),and_v(v:after(30),pk({TPRVS[0]}/*)))",
+ "sequence": 4,
+ "locktime": 30,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # We have one key on each branch; Core signs both (can't finalize)
+ {
+ "ms": f"c:andor(pk({TPRVS[0]}/*),pk_k({TPUBS[0]}),and_v(v:pk({TPRVS[1]}),pk_k({TPUBS[1]})))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 2,
+ "stack_size": None,
+ },
+ # We have all the keys, wallet selects the timeout path to sign since it's smaller and sequence is set
+ {
+ "ms": f"andor(pk({TPRVS[0]}/*),pk({TPRVS[2]}),and_v(v:pk({TPRVS[1]}),older(10)))",
+ "sequence": 10,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 3,
+ },
+ # We have all the keys, wallet selects the primary path to sign unconditionally since nsequence wasn't set to be valid for timeout path
+ {
+ "ms": f"andor(pk({TPRVS[0]}/*),pk({TPRVS[2]}),and_v(v:pkh({TPRVS[1]}),older(10)))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 3,
+ },
+ # Finalizes to the smallest valid witness, regardless of sequence
+ {
+ "ms": f"or_d(pk({TPRVS[0]}/*),and_v(v:pk({TPRVS[1]}),and_v(v:pk({TPRVS[2]}),older(10))))",
+ "sequence": 12,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 2,
+ },
+ # Liquid-like federated pegin with emergency recovery privkeys
+ {
+ "ms": f"or_i(and_b(pk({TPUBS[0]}/*),a:and_b(pk({TPUBS[1]}),a:and_b(pk({TPUBS[2]}),a:and_b(pk({TPUBS[3]}),s:pk({PUBKEYS[0]}))))),and_v(v:thresh(2,pkh({TPRVS[0]}),a:pkh({TPRVS[1]}),a:pkh({TPUBS[4]})),older(42)))",
+ "sequence": 42,
+ "locktime": None,
+ "sigs_count": 2,
+ "stack_size": 8,
+ },
]
@@ -62,7 +180,77 @@ class WalletMiniscriptTest(BitcoinTestFramework):
lambda: len(self.ms_wo_wallet.listunspent(minconf=0, addresses=[addr])) == 1
)
utxo = self.ms_wo_wallet.listunspent(minconf=0, addresses=[addr])[0]
- assert utxo["txid"] == txid and not utxo["solvable"] # No satisfaction logic (yet)
+ assert utxo["txid"] == txid and utxo["solvable"]
+
+ def signing_test(
+ self, ms, sequence, locktime, sigs_count, stack_size, sha256_preimages
+ ):
+ self.log.info(f"Importing private Miniscript '{ms}'")
+ desc = descsum_create(f"wsh({ms})")
+ res = self.ms_sig_wallet.importdescriptors(
+ [
+ {
+ "desc": desc,
+ "active": True,
+ "range": 0,
+ "next_index": 0,
+ "timestamp": "now",
+ }
+ ]
+ )
+ assert res[0]["success"], res
+
+ self.log.info("Generating an address for it and testing it detects funds")
+ addr = self.ms_sig_wallet.getnewaddress()
+ txid = self.funder.sendtoaddress(addr, 0.01)
+ self.wait_until(lambda: txid in self.funder.getrawmempool())
+ self.funder.generatetoaddress(1, self.funder.getnewaddress())
+ utxo = self.ms_sig_wallet.listunspent(addresses=[addr])[0]
+ assert txid == utxo["txid"] and utxo["solvable"]
+
+ self.log.info("Creating a transaction spending these funds")
+ dest_addr = self.funder.getnewaddress()
+ seq = sequence if sequence is not None else 0xFFFFFFFF - 2
+ lt = locktime if locktime is not None else 0
+ psbt = self.ms_sig_wallet.createpsbt(
+ [
+ {
+ "txid": txid,
+ "vout": utxo["vout"],
+ "sequence": seq,
+ }
+ ],
+ [{dest_addr: 0.009}],
+ lt,
+ )
+
+ self.log.info("Signing it and checking the satisfaction.")
+ if sha256_preimages is not None:
+ psbt = PSBT.from_base64(psbt)
+ for (h, preimage) in sha256_preimages.items():
+ k = PSBT_IN_SHA256.to_bytes(1, "big") + bytes.fromhex(h)
+ psbt.i[0].map[k] = bytes.fromhex(preimage)
+ psbt = psbt.to_base64()
+ res = self.ms_sig_wallet.walletprocesspsbt(psbt=psbt, finalize=False)
+ psbtin = self.nodes[0].rpc.decodepsbt(res["psbt"])["inputs"][0]
+ assert len(psbtin["partial_signatures"]) == sigs_count
+ res = self.ms_sig_wallet.finalizepsbt(res["psbt"])
+ assert res["complete"] == (stack_size is not None)
+
+ if stack_size is not None:
+ txin = self.nodes[0].rpc.decoderawtransaction(res["hex"])["vin"][0]
+ assert len(txin["txinwitness"]) == stack_size, txin["txinwitness"]
+ self.log.info("Broadcasting the transaction.")
+ # If necessary, satisfy a relative timelock
+ if sequence is not None:
+ self.funder.generatetoaddress(sequence, self.funder.getnewaddress())
+ # If necessary, satisfy an absolute timelock
+ height = self.funder.getblockcount()
+ if locktime is not None and height < locktime:
+ self.funder.generatetoaddress(
+ locktime - height, self.funder.getnewaddress()
+ )
+ self.ms_sig_wallet.sendrawtransaction(res["hex"])
def run_test(self):
self.log.info("Making a descriptor wallet")
@@ -71,6 +259,8 @@ class WalletMiniscriptTest(BitcoinTestFramework):
wallet_name="ms_wo", descriptors=True, disable_private_keys=True
)
self.ms_wo_wallet = self.nodes[0].get_wallet_rpc("ms_wo")
+ self.nodes[0].createwallet(wallet_name="ms_sig", descriptors=True)
+ self.ms_sig_wallet = self.nodes[0].get_wallet_rpc("ms_sig")
# Sanity check we wouldn't let an insane Miniscript descriptor in
res = self.ms_wo_wallet.importdescriptors(
@@ -91,6 +281,17 @@ class WalletMiniscriptTest(BitcoinTestFramework):
for ms in MINISCRIPTS:
self.watchonly_test(ms)
+ # Test we can sign for any Miniscript.
+ for ms in MINISCRIPTS_PRIV:
+ self.signing_test(
+ ms["ms"],
+ ms["sequence"],
+ ms["locktime"],
+ ms["sigs_count"],
+ ms["stack_size"],
+ ms.get("sha256_preimages"),
+ )
+
if __name__ == "__main__":
WalletMiniscriptTest().main()
diff --git a/test/functional/wallet_orphanedreward.py b/test/functional/wallet_orphanedreward.py
index d9f7c14ded..d8931fa620 100755
--- a/test/functional/wallet_orphanedreward.py
+++ b/test/functional/wallet_orphanedreward.py
@@ -34,29 +34,40 @@ class OrphanedBlockRewardTest(BitcoinTestFramework):
# the existing balance and the block reward.
self.generate(self.nodes[0], 150)
assert_equal(self.nodes[1].getbalance(), 10 + 25)
+ pre_reorg_conf_bals = self.nodes[1].getbalances()
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 30)
+ orig_chain_tip = self.nodes[0].getbestblockhash()
+ self.sync_mempools()
# Orphan the block reward and make sure that the original coins
# from the wallet can still be spent.
self.nodes[0].invalidateblock(blk)
- self.generate(self.nodes[0], 152)
- # Without the following abandontransaction call, the coins are
- # not considered available yet.
- assert_equal(self.nodes[1].getbalances()["mine"], {
- "trusted": 0,
- "untrusted_pending": 0,
- "immature": 0,
- })
- # The following abandontransaction is necessary to make the later
- # lines succeed, and probably should not be needed; see
- # https://github.com/bitcoin/bitcoin/issues/14148.
- self.nodes[1].abandontransaction(txid)
+ blocks = self.generate(self.nodes[0], 152)
+ conflict_block = blocks[0]
+ # We expect the descendants of orphaned rewards to no longer be considered
assert_equal(self.nodes[1].getbalances()["mine"], {
"trusted": 10,
"untrusted_pending": 0,
"immature": 0,
})
- self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 9)
+ # And the unconfirmed tx to be abandoned
+ assert_equal(self.nodes[1].gettransaction(txid)["details"][0]["abandoned"], True)
+
+ # The abandoning should persist through reloading
+ self.nodes[1].unloadwallet(self.default_wallet_name)
+ self.nodes[1].loadwallet(self.default_wallet_name)
+ assert_equal(self.nodes[1].gettransaction(txid)["details"][0]["abandoned"], True)
+
+ # If the orphaned reward is reorged back into the main chain, any unconfirmed
+ # descendant txs at the time of the original reorg remain abandoned.
+ self.nodes[0].invalidateblock(conflict_block)
+ self.nodes[0].reconsiderblock(blk)
+ assert_equal(self.nodes[0].getbestblockhash(), orig_chain_tip)
+ self.generate(self.nodes[0], 3)
+
+ assert_equal(self.nodes[1].getbalances(), pre_reorg_conf_bals)
+ assert_equal(self.nodes[1].gettransaction(txid)["details"][0]["abandoned"], True)
+
if __name__ == '__main__':
OrphanedBlockRewardTest().main()
diff --git a/test/functional/wallet_pruning.py b/test/functional/wallet_pruning.py
index 6d8475ce8d..1ceceaee93 100755
--- a/test/functional/wallet_pruning.py
+++ b/test/functional/wallet_pruning.py
@@ -39,11 +39,15 @@ class WalletPruningTest(BitcoinTestFramework):
def mine_large_blocks(self, node, n):
# Get the block parameters for the first block
- best_block = node.getblock(node.getbestblockhash())
+ best_block = node.getblockheader(node.getbestblockhash())
height = int(best_block["height"]) + 1
self.nTime = max(self.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
big_script = CScript([OP_RETURN] + [OP_TRUE] * 950000)
+ # Set mocktime to accept all future blocks
+ for i in self.nodes:
+ if i.running:
+ i.setmocktime(self.nTime + 600 * n)
for _ in range(n):
block = create_block(hashprev=previousblockhash, ntime=self.nTime, coinbase=create_coinbase(height, script_pubkey=big_script))
block.solve()
@@ -57,9 +61,6 @@ class WalletPruningTest(BitcoinTestFramework):
# Simulate 10 minutes of work time per block
# Important for matching a timestamp with a block +- some window
self.nTime += 600
- for n in self.nodes:
- if n.running:
- n.setmocktime(self.nTime) # Update node's time to accept future blocks
self.sync_all()
def test_wallet_import_pruned(self, wallet_name):
@@ -122,7 +123,7 @@ class WalletPruningTest(BitcoinTestFramework):
# A blk*.dat file is 128MB
# Generate 250 light blocks
- self.generate(self.nodes[0], 250, sync_fun=self.no_op)
+ self.generate(self.nodes[0], 250)
# Generate 50MB worth of large blocks in the blk00000.dat file
self.mine_large_blocks(self.nodes[0], 50)
diff --git a/test/functional/wallet_transactiontime_rescan.py b/test/functional/wallet_transactiontime_rescan.py
index de9616b4a1..904013cdef 100755
--- a/test/functional/wallet_transactiontime_rescan.py
+++ b/test/functional/wallet_transactiontime_rescan.py
@@ -14,6 +14,9 @@ from test_framework.util import (
assert_raises_rpc_error,
set_node_times,
)
+from test_framework.wallet_util import (
+ get_generate_key,
+)
class TransactionTimeRescanTest(BitcoinTestFramework):
@@ -23,6 +26,10 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
+ self.extra_args = [["-keypool=400"],
+ ["-keypool=400"],
+ []
+ ]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -167,6 +174,38 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "Invalid stop_height", restorewo_wallet.rescanblockchain, 1, -1)
assert_raises_rpc_error(-8, "stop_height must be greater than start_height", restorewo_wallet.rescanblockchain, 20, 10)
+ self.log.info("Test `rescanblockchain` fails when wallet is encrypted and locked")
+ usernode.createwallet(wallet_name="enc_wallet", passphrase="passphrase")
+ enc_wallet = usernode.get_wallet_rpc("enc_wallet")
+ assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", enc_wallet.rescanblockchain)
+
+ if not self.options.descriptors:
+ self.log.info("Test rescanning an encrypted wallet")
+ hd_seed = get_generate_key().privkey
+
+ usernode.createwallet(wallet_name="temp_wallet", blank=True, descriptors=False)
+ temp_wallet = usernode.get_wallet_rpc("temp_wallet")
+ temp_wallet.sethdseed(seed=hd_seed)
+
+ for i in range(399):
+ temp_wallet.getnewaddress()
+
+ self.generatetoaddress(usernode, COINBASE_MATURITY + 1, temp_wallet.getnewaddress())
+ self.generatetoaddress(usernode, COINBASE_MATURITY + 1, temp_wallet.getnewaddress())
+
+ minernode.createwallet("encrypted_wallet", blank=True, passphrase="passphrase", descriptors=False)
+ encrypted_wallet = minernode.get_wallet_rpc("encrypted_wallet")
+
+ encrypted_wallet.walletpassphrase("passphrase", 1)
+ encrypted_wallet.sethdseed(seed=hd_seed)
+
+ batch = []
+ batch.append(encrypted_wallet.walletpassphrase.get_request("passphrase", 3))
+ batch.append(encrypted_wallet.rescanblockchain.get_request())
+
+ encrypted_wallet.batch(batch)
+
+ assert_equal(encrypted_wallet.getbalance(), temp_wallet.getbalance())
if __name__ == '__main__':
TransactionTimeRescanTest().main()
diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py
index 7f5f15655c..60c868ca04 100755
--- a/test/get_previous_releases.py
+++ b/test/get_previous_releases.py
@@ -80,6 +80,15 @@ SHA256_SUMS = {
"078f96b1e92895009c798ab827fb3fde5f6719eee886bd0c0e93acab18ea4865": {"tag": "v23.0", "tarball": "bitcoin-23.0-riscv64-linux-gnu.tar.gz"},
"c816780583009a9dad426dc0c183c89be9da98906e1e2c7ebae91041c1aaaaf3": {"tag": "v23.0", "tarball": "bitcoin-23.0-x86_64-apple-darwin.tar.gz"},
"2cca490c1f2842884a3c5b0606f179f9f937177da4eadd628e3f7fd7e25d26d0": {"tag": "v23.0", "tarball": "bitcoin-23.0-x86_64-linux-gnu.tar.gz"},
+
+ "0b48b9e69b30037b41a1e6b78fb7cbcc48c7ad627908c99686e81f3802454609": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-aarch64-linux-gnu.tar.gz"},
+ "37d7660f0277301744e96426bbb001d2206b8d4505385dfdeedf50c09aaaef60": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-arm-linux-gnueabihf.tar.gz"},
+ "90ed59e86bfda1256f4b4cad8cc1dd77ee0efec2492bcb5af61402709288b62c": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-arm64-apple-darwin.tar.gz"},
+ "7590645e8676f8b5fda62dc20174474c4ac8fd0defc83a19ed908ebf2e94dc11": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-powerpc64-linux-gnu.tar.gz"},
+ "79e89a101f23ff87816675b98769cd1ee91059f95c5277f38f48f21a9f7f8509": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-powerpc64le-linux-gnu.tar.gz"},
+ "6b163cef7de4beb07b8cb3347095e0d76a584019b1891135cd1268a1f05b9d88": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-riscv64-linux-gnu.tar.gz"},
+ "e2f751512f3c0f00eb68ba946d9c829e6cf99422a61e8f5e0a7c109c318674d0": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-apple-darwin.tar.gz"},
+ "49df6e444515d457ea0b885d66f521f2a26ca92ccf73d5296082e633544253bf": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-linux-gnu.tar.gz"},
}
diff --git a/test/lint/README.md b/test/lint/README.md
index 8d592c3282..704922d7ab 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -1,5 +1,23 @@
This folder contains lint scripts.
+Running locally
+===============
+
+To run linters locally with the same versions as the CI environment, use the included
+Dockerfile:
+
+```sh
+cd ./ci/lint
+docker build -t bitcoin-linter .
+
+cd /root/of/bitcoin/repo
+docker run --rm -v $(pwd):/bitcoin -it bitcoin-linter
+```
+
+After building the container once, you can simply run the last command any time you
+want to lint.
+
+
check-doc.py
============
Check for missing documentation of command line options.
diff --git a/test/lint/lint-locale-dependence.py b/test/lint/lint-locale-dependence.py
index c5cb34b20a..faea643882 100755
--- a/test/lint/lint-locale-dependence.py
+++ b/test/lint/lint-locale-dependence.py
@@ -34,8 +34,6 @@
#
# See https://doc.qt.io/qt-5/qcoreapplication.html#locale-settings and
# https://stackoverflow.com/a/34878283 for more details.
-#
-# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent snprintf with strprintf.
import re
import sys
@@ -45,7 +43,6 @@ from subprocess import check_output, CalledProcessError
KNOWN_VIOLATIONS = [
"src/dbwrapper.cpp:.*vsnprintf",
- "src/test/dbwrapper_tests.cpp:.*snprintf",
"src/test/fuzz/locale.cpp:.*setlocale",
"src/test/fuzz/string.cpp:.*strtol",
"src/test/fuzz/string.cpp:.*strtoul",
diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py
index 4d16facfea..4ec7608708 100755
--- a/test/lint/lint-python.py
+++ b/test/lint/lint-python.py
@@ -47,6 +47,7 @@ ENABLED = (
'E711,' # comparison to None should be 'if cond is None:'
'E714,' # test for object identity should be "is not"
'E721,' # do not compare types, use "isinstance()"
+ 'E722,' # do not use bare 'except'
'E742,' # do not define classes named "l", "O", or "I"
'E743,' # do not define functions named "l", "O", or "I"
'E901,' # SyntaxError: invalid syntax
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 67ef512895..2fa4e383e2 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -53,6 +53,7 @@ unsigned-integer-overflow:policy/fees.cpp
unsigned-integer-overflow:prevector.h
unsigned-integer-overflow:script/interpreter.cpp
unsigned-integer-overflow:txmempool.cpp
+unsigned-integer-overflow:xoroshiro128plusplus.h
implicit-integer-sign-change:compat/stdin.cpp
implicit-integer-sign-change:compressor.h
implicit-integer-sign-change:crypto/
@@ -69,3 +70,4 @@ shift-base:crypto/
shift-base:hash.cpp
shift-base:streams.h
shift-base:util/bip32.cpp
+shift-base:xoroshiro128plusplus.h
diff --git a/test/util/rpcauth-test.py b/test/util/rpcauth-test.py
index 53058dc394..8a7ff26dcb 100755
--- a/test/util/rpcauth-test.py
+++ b/test/util/rpcauth-test.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test share/rpcauth/rpcauth.py
"""
-import base64
+import re
import configparser
import hmac
import importlib
@@ -28,18 +28,17 @@ class TestRPCAuth(unittest.TestCase):
self.assertEqual(len(self.rpcauth.generate_salt(i)), i * 2)
def test_generate_password(self):
+ """Test that generated passwords only consist of urlsafe characters."""
+ r = re.compile(r"[0-9a-zA-Z_-]*")
password = self.rpcauth.generate_password()
- expected_password = base64.urlsafe_b64encode(
- base64.urlsafe_b64decode(password)).decode('utf-8')
- self.assertEqual(expected_password, password)
+ self.assertTrue(r.fullmatch(password))
def test_check_password_hmac(self):
salt = self.rpcauth.generate_salt(16)
password = self.rpcauth.generate_password()
password_hmac = self.rpcauth.password_to_hmac(salt, password)
- m = hmac.new(bytearray(salt, 'utf-8'),
- bytearray(password, 'utf-8'), 'SHA256')
+ m = hmac.new(salt.encode('utf-8'), password.encode('utf-8'), 'SHA256')
expected_password_hmac = m.hexdigest()
self.assertEqual(expected_password_hmac, password_hmac)
diff --git a/test/util/test_runner.py b/test/util/test_runner.py
index ea3626fa65..e5cdd0bc3a 100755
--- a/test/util/test_runner.py
+++ b/test/util/test_runner.py
@@ -54,7 +54,7 @@ def bctester(testDir, input_basename, buildenv):
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
- except:
+ except Exception:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
@@ -96,7 +96,7 @@ def bctest(testDir, testObj, buildenv):
try:
with open(os.path.join(testDir, outputFn), encoding="utf8") as f:
outputData = f.read()
- except:
+ except Exception:
logging.error("Output file " + outputFn + " cannot be opened")
raise
if not outputData: