aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml18
-rw-r--r--.editorconfig12
-rw-r--r--.github/workflows/ci.yml16
-rw-r--r--.python-version2
-rw-r--r--CMakeLists.txt60
-rw-r--r--CMakePresets.json62
-rw-r--r--README.md3
-rwxr-xr-xci/lint/04_install.sh1
-rwxr-xr-xci/test/00_setup_env.sh6
-rwxr-xr-xci/test/00_setup_env_i686_centos.sh2
-rwxr-xr-xci/test/00_setup_env_mac_native.sh5
-rwxr-xr-xci/test/00_setup_env_native_asan.sh3
-rwxr-xr-xci/test/00_setup_env_native_fuzz.sh1
-rwxr-xr-xci/test/00_setup_env_native_fuzz_with_msan.sh1
-rwxr-xr-xci/test/00_setup_env_native_fuzz_with_valgrind.sh2
-rwxr-xr-xci/test/00_setup_env_native_msan.sh1
-rwxr-xr-xci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh2
-rwxr-xr-xci/test/00_setup_env_native_previous_releases.sh4
-rwxr-xr-xci/test/00_setup_env_native_tidy.sh6
-rwxr-xr-xci/test/00_setup_env_native_valgrind.sh4
-rwxr-xr-xci/test/01_base_install.sh2
-rwxr-xr-xci/test/02_run_container.sh8
-rwxr-xr-xci/test/03_test_script.sh24
-rw-r--r--ci/test/GetCMakeLogFiles.cmake11
-rw-r--r--cmake/bitcoin-build-config.h.in (renamed from cmake/bitcoin-config.h.in)0
-rw-r--r--cmake/introspection.cmake2
-rw-r--r--cmake/module/AddBoostIfNeeded.cmake6
-rw-r--r--cmake/module/FindNATPMP.cmake32
-rw-r--r--cmake/module/FindQt.cmake (renamed from cmake/module/FindQt5.cmake)24
-rw-r--r--cmake/module/FlagsSummary.cmake3
-rw-r--r--cmake/module/GenerateHeaders.cmake4
-rw-r--r--cmake/module/GenerateSetupNsi.cmake2
-rw-r--r--cmake/module/Maintenance.cmake4
-rw-r--r--cmake/module/ProcessConfigurations.cmake4
-rw-r--r--cmake/module/TryAppendCXXFlags.cmake15
-rw-r--r--cmake/module/TryAppendLinkerFlag.cmake9
-rw-r--r--cmake/script/Coverage.cmake12
-rw-r--r--cmake/script/CoverageFuzz.cmake17
-rw-r--r--cmake/script/CoverageInclude.cmake.in3
-rw-r--r--cmake/script/GenerateHeaderFromJson.cmake30
-rw-r--r--cmake/script/GenerateHeaderFromRaw.cmake29
-rw-r--r--contrib/devtools/bitcoin-tidy/CMakeLists.txt4
-rw-r--r--contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp2
-rw-r--r--contrib/devtools/bitcoin-tidy/example_logprintf.cpp108
-rw-r--r--contrib/devtools/bitcoin-tidy/logprintf.cpp60
-rw-r--r--contrib/devtools/bitcoin-tidy/logprintf.h29
-rwxr-xr-xcontrib/devtools/check-deps.sh46
-rwxr-xr-xcontrib/devtools/clang-format-diff.py2
-rwxr-xr-xcontrib/devtools/gen-bitcoin-conf.sh5
-rwxr-xr-xcontrib/devtools/security-check.py30
-rwxr-xr-xcontrib/devtools/symbol-check.py2
-rwxr-xr-xcontrib/devtools/test-security-check.py11
-rwxr-xr-xcontrib/devtools/test-symbol-check.py2
-rwxr-xr-xcontrib/devtools/test_deterministic_coverage.sh6
-rwxr-xr-xcontrib/devtools/test_utxo_snapshots.sh209
-rwxr-xr-xcontrib/devtools/utxo_snapshot.sh104
-rwxr-xr-xcontrib/guix/libexec/build.sh2
-rw-r--r--contrib/guix/manifest.scm1
-rw-r--r--contrib/seeds/README.md4
-rw-r--r--contrib/signet/README.md7
-rwxr-xr-xcontrib/signet/miner346
-rw-r--r--depends/Makefile6
-rw-r--r--depends/README.md7
-rw-r--r--depends/hosts/darwin.mk2
-rw-r--r--depends/hosts/mingw32.mk3
-rw-r--r--depends/packages/capnp.mk1
-rw-r--r--depends/packages/libevent.mk1
-rw-r--r--depends/packages/libmultiprocess.mk1
-rw-r--r--depends/packages/libnatpmp.mk20
-rw-r--r--depends/packages/native_libmultiprocess.mk4
-rw-r--r--depends/packages/packages.mk1
-rw-r--r--depends/toolchain.cmake.in10
-rw-r--r--doc/CMakeLists.txt2
-rw-r--r--doc/README.md2
-rw-r--r--doc/assumeutxo.md85
-rw-r--r--doc/bitcoin-conf.md2
-rw-r--r--doc/build-freebsd.md2
-rw-r--r--doc/build-openbsd.md2
-rw-r--r--doc/build-osx.md31
-rw-r--r--doc/build-unix.md10
-rw-r--r--doc/build-windows-msvc.md16
-rw-r--r--doc/build-windows.md25
-rw-r--r--doc/dependencies.md3
-rw-r--r--doc/design/assumeutxo.md47
-rw-r--r--doc/design/libraries.md4
-rw-r--r--doc/design/multiprocess.md8
-rw-r--r--doc/developer-notes.md36
-rw-r--r--doc/fuzzing.md178
-rw-r--r--doc/multiprocess.md16
-rw-r--r--doc/release-notes-28358.md6
-rw-r--r--doc/release-notes-empty-template.md9
-rw-r--r--doc/release-notes/release-notes-28.0.md371
-rw-r--r--doc/translation_process.md4
-rw-r--r--doc/translation_strings_policy.md14
-rw-r--r--doc/zmq.md9
-rw-r--r--libbitcoinkernel.pc.in11
-rw-r--r--share/qt/Info.plist.in2
-rwxr-xr-xshare/qt/extract_strings_qt.py2
-rw-r--r--src/.clang-tidy1
-rw-r--r--src/CMakeLists.txt48
-rw-r--r--src/addrdb.cpp4
-rw-r--r--src/addrman.cpp82
-rw-r--r--src/addrman.h5
-rw-r--r--src/addrman_impl.h37
-rw-r--r--src/bench/CMakeLists.txt3
-rw-r--r--src/bench/addrman.cpp2
-rw-r--r--src/bench/checkblock.cpp2
-rw-r--r--src/bench/cluster_linearize.cpp213
-rw-r--r--src/bench/data.cpp16
-rw-r--r--src/bench/data.h19
-rw-r--r--src/bench/load_external.cpp2
-rw-r--r--src/bench/logging.cpp44
-rw-r--r--src/bench/readblock.cpp2
-rw-r--r--src/bench/rpc_blockchain.cpp2
-rw-r--r--src/bench/streams_findbyte.cpp2
-rw-r--r--src/bench/strencodings.cpp2
-rw-r--r--src/bench/wallet_create.cpp2
-rw-r--r--src/bench/wallet_ismine.cpp2
-rw-r--r--src/bench/wallet_loading.cpp2
-rw-r--r--src/bitcoin-chainstate.cpp2
-rw-r--r--src/bitcoin-cli.cpp14
-rw-r--r--src/bitcoin-tx.cpp2
-rw-r--r--src/bitcoin-util.cpp2
-rw-r--r--src/bitcoin-wallet.cpp2
-rw-r--r--src/bitcoind.cpp11
-rw-r--r--src/chain.h2
-rw-r--r--src/clientversion.cpp8
-rw-r--r--src/clientversion.h4
-rw-r--r--src/cluster_linearize.h505
-rw-r--r--src/common/args.cpp24
-rw-r--r--src/common/args.h9
-rw-r--r--src/common/netif.cpp303
-rw-r--r--src/common/netif.h19
-rw-r--r--src/common/pcp.cpp524
-rw-r--r--src/common/pcp.h68
-rw-r--r--src/common/run_command.cpp2
-rw-r--r--src/common/settings.cpp2
-rw-r--r--src/common/system.cpp2
-rw-r--r--src/common/system.h2
-rw-r--r--src/consensus/merkle.cpp103
-rw-r--r--src/consensus/merkle.h10
-rw-r--r--src/crypto/common.h6
-rw-r--r--src/crypto/sha256.cpp2
-rw-r--r--src/httprpc.cpp5
-rw-r--r--src/httpserver.cpp18
-rw-r--r--src/index/base.cpp9
-rw-r--r--src/index/base.h10
-rw-r--r--src/index/blockfilterindex.cpp10
-rw-r--r--src/index/blockfilterindex.h4
-rw-r--r--src/index/coinstatsindex.cpp6
-rw-r--r--src/index/coinstatsindex.h4
-rw-r--r--src/index/txindex.cpp5
-rw-r--r--src/init.cpp543
-rw-r--r--src/init.h2
-rw-r--r--src/init/bitcoin-gui.cpp5
-rw-r--r--src/init/bitcoin-node.cpp1
-rw-r--r--src/init/common.cpp2
-rw-r--r--src/interfaces/chain.h17
-rw-r--r--src/interfaces/init.h1
-rw-r--r--src/interfaces/ipc.h16
-rw-r--r--src/interfaces/mining.h68
-rw-r--r--src/interfaces/node.h5
-rw-r--r--src/interfaces/types.h20
-rw-r--r--src/ipc/CMakeLists.txt7
-rw-r--r--src/ipc/capnp/common-types.h139
-rw-r--r--src/ipc/capnp/common.capnp16
-rw-r--r--src/ipc/capnp/init-types.h1
-rw-r--r--src/ipc/capnp/init.capnp3
-rw-r--r--src/ipc/capnp/mining-types.h26
-rw-r--r--src/ipc/capnp/mining.capnp52
-rw-r--r--src/ipc/capnp/mining.cpp47
-rw-r--r--src/ipc/capnp/protocol.cpp13
-rw-r--r--src/ipc/interfaces.cpp30
-rw-r--r--src/ipc/process.cpp96
-rw-r--r--src/ipc/process.h10
-rw-r--r--src/ipc/protocol.h28
-rw-r--r--src/kernel/CMakeLists.txt47
-rw-r--r--src/kernel/chainparams.cpp4
-rw-r--r--src/logging.cpp24
-rw-r--r--src/logging.h93
-rw-r--r--src/mapport.cpp210
-rw-r--r--src/mapport.h4
-rw-r--r--src/net.cpp107
-rw-r--r--src/net.h18
-rw-r--r--src/net_processing.cpp70
-rw-r--r--src/net_processing.h9
-rw-r--r--src/netbase.cpp7
-rw-r--r--src/netbase.h7
-rw-r--r--src/node/abort.cpp4
-rw-r--r--src/node/abort.h7
-rw-r--r--src/node/blockstorage.cpp16
-rw-r--r--src/node/blockstorage.h3
-rw-r--r--src/node/caches.cpp1
-rw-r--r--src/node/context.h6
-rw-r--r--src/node/interfaces.cpp140
-rw-r--r--src/node/kernel_notifications.cpp14
-rw-r--r--src/node/kernel_notifications.h25
-rw-r--r--src/node/mempool_persist.cpp4
-rw-r--r--src/node/miner.cpp4
-rw-r--r--src/node/utxo_snapshot.cpp6
-rw-r--r--src/node/warnings.cpp2
-rw-r--r--src/pow.cpp11
-rw-r--r--src/pow.h1
-rw-r--r--src/prevector.h9
-rw-r--r--src/qt/CMakeLists.txt6
-rw-r--r--src/qt/README.md2
-rw-r--r--src/qt/bitcoin.cpp19
-rw-r--r--src/qt/bitcoin.h2
-rw-r--r--src/qt/bitcoingui.cpp2
-rw-r--r--src/qt/bitcoingui.h2
-rw-r--r--src/qt/clientmodel.cpp2
-rw-r--r--src/qt/createwalletdialog.cpp2
-rw-r--r--src/qt/forms/optionsdialog.ui6
-rw-r--r--src/qt/intro.cpp2
-rw-r--r--src/qt/locale/bitcoin_am.ts182
-rw-r--r--src/qt/locale/bitcoin_bn.ts4
-rw-r--r--src/qt/locale/bitcoin_de.ts77
-rw-r--r--src/qt/locale/bitcoin_de_CH.ts16
-rw-r--r--src/qt/locale/bitcoin_gl_ES.ts12
-rw-r--r--src/qt/locale/bitcoin_ru.ts39
-rw-r--r--src/qt/locale/bitcoin_sw.ts260
-rw-r--r--src/qt/locale/bitcoin_th.ts7
-rw-r--r--src/qt/modaloverlay.cpp2
-rw-r--r--src/qt/notificator.cpp2
-rw-r--r--src/qt/notificator.h2
-rw-r--r--src/qt/optionsdialog.cpp19
-rw-r--r--src/qt/optionsmodel.cpp19
-rw-r--r--src/qt/qrimagewidget.cpp2
-rw-r--r--src/qt/receiverequestdialog.cpp2
-rw-r--r--src/qt/rpcconsole.cpp4
-rw-r--r--src/qt/rpcconsole.h2
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/signverifymessagedialog.cpp2
-rw-r--r--src/qt/splashscreen.cpp2
-rw-r--r--src/qt/test/addressbooktests.cpp4
-rw-r--r--src/qt/test/apptests.cpp4
-rw-r--r--src/qt/test/optiontests.cpp2
-rw-r--r--src/qt/test/rpcnestedtests.cpp5
-rw-r--r--src/qt/test/test_main.cpp20
-rw-r--r--src/qt/test/wallettests.cpp4
-rw-r--r--src/qt/utilitydialog.cpp2
-rw-r--r--src/qt/walletcontroller.cpp18
-rw-r--r--src/qt/walletcontroller.h3
-rw-r--r--src/qt/winshutdownmonitor.cpp4
-rw-r--r--src/qt/winshutdownmonitor.h4
-rw-r--r--src/random.cpp2
-rw-r--r--src/randomenv.cpp2
-rw-r--r--src/rest.cpp9
-rw-r--r--src/rpc/blockchain.cpp417
-rw-r--r--src/rpc/blockchain.h6
-rw-r--r--src/rpc/client.cpp4
-rw-r--r--src/rpc/external_signer.cpp2
-rw-r--r--src/rpc/mempool.cpp102
-rw-r--r--src/rpc/mining.cpp100
-rw-r--r--src/rpc/node.cpp8
-rw-r--r--src/rpc/rawtransaction.cpp19
-rw-r--r--src/rpc/register.h2
-rw-r--r--src/rpc/server.cpp29
-rw-r--r--src/rpc/server.h6
-rw-r--r--src/rpc/txoutproof.cpp5
-rw-r--r--src/rpc/util.cpp25
-rw-r--r--src/rpc/util.h9
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/script/script.h61
-rw-r--r--src/script/sign.cpp21
-rw-r--r--src/script/sign.h19
-rw-r--r--src/secp256k1/.github/workflows/ci.yml4
-rw-r--r--src/secp256k1/CHANGELOG.md3
-rw-r--r--src/secp256k1/CMakeLists.txt15
-rw-r--r--src/secp256k1/ci/linux-debian.Dockerfile2
-rw-r--r--src/secp256k1/configure.ac6
-rw-r--r--src/secp256k1/examples/schnorr.c4
-rw-r--r--src/secp256k1/include/secp256k1_ellswift.h2
-rw-r--r--src/secp256k1/src/modules/ellswift/tests_impl.h6
-rw-r--r--src/secp256k1/src/modules/schnorrsig/main_impl.h2
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_impl.h10
-rw-r--r--src/secp256k1/src/testrand_impl.h2
-rw-r--r--src/streams.cpp64
-rw-r--r--src/streams.h18
-rw-r--r--src/test/CMakeLists.txt17
-rw-r--r--src/test/README.md56
-rw-r--r--src/test/addrman_tests.cpp55
-rw-r--r--src/test/arith_uint256_tests.cpp89
-rw-r--r--src/test/blockfilter_index_tests.cpp4
-rw-r--r--src/test/blockmanager_tests.cpp8
-rw-r--r--src/test/cluster_linearize_tests.cpp49
-rw-r--r--src/test/coinstatsindex_tests.cpp4
-rw-r--r--src/test/feefrac_tests.cpp2
-rw-r--r--src/test/fuzz/CMakeLists.txt1
-rw-r--r--src/test/fuzz/addrman.cpp42
-rw-r--r--src/test/fuzz/autofile.cpp1
-rw-r--r--src/test/fuzz/banman.cpp8
-rw-r--r--src/test/fuzz/buffered_file.cpp4
-rw-r--r--src/test/fuzz/cluster_linearize.cpp337
-rw-r--r--src/test/fuzz/connman.cpp16
-rw-r--r--src/test/fuzz/crypto.cpp8
-rw-r--r--src/test/fuzz/crypto_chacha20.cpp9
-rw-r--r--src/test/fuzz/crypto_chacha20poly1305.cpp2
-rw-r--r--src/test/fuzz/crypto_common.cpp4
-rw-r--r--src/test/fuzz/cuckoocache.cpp4
-rw-r--r--src/test/fuzz/fuzz.cpp2
-rw-r--r--src/test/fuzz/hex.cpp9
-rw-r--r--src/test/fuzz/integer.cpp4
-rw-r--r--src/test/fuzz/message.cpp4
-rw-r--r--src/test/fuzz/p2p_headers_presync.cpp216
-rw-r--r--src/test/fuzz/policy_estimator.cpp13
-rw-r--r--src/test/fuzz/pow.cpp2
-rw-r--r--src/test/fuzz/prevector.cpp33
-rw-r--r--src/test/fuzz/rpc.cpp1
-rw-r--r--src/test/fuzz/script_format.cpp4
-rw-r--r--src/test/fuzz/script_interpreter.cpp10
-rw-r--r--src/test/fuzz/script_sign.cpp10
-rw-r--r--src/test/fuzz/socks5.cpp8
-rw-r--r--src/test/fuzz/system.cpp18
-rw-r--r--src/test/fuzz/util/net.cpp12
-rw-r--r--src/test/fuzz/utxo_snapshot.cpp4
-rw-r--r--src/test/ipc_test.capnp7
-rw-r--r--src/test/ipc_test.cpp132
-rw-r--r--src/test/ipc_test.h9
-rw-r--r--src/test/ipc_test_types.h12
-rw-r--r--src/test/ipc_tests.cpp33
-rw-r--r--src/test/logging_tests.cpp26
-rw-r--r--src/test/merkle_tests.cpp104
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/multisig_tests.cpp1
-rw-r--r--src/test/orphanage_tests.cpp20
-rw-r--r--src/test/raii_event_tests.cpp8
-rw-r--r--src/test/script_p2sh_tests.cpp1
-rw-r--r--src/test/script_tests.cpp10
-rw-r--r--src/test/streams_tests.cpp6
-rw-r--r--src/test/system_tests.cpp13
-rw-r--r--src/test/transaction_tests.cpp18
-rw-r--r--src/test/txindex_tests.cpp2
-rw-r--r--src/test/uint256_tests.cpp140
-rw-r--r--src/test/util/cluster_linearize.h248
-rw-r--r--src/test/util/json.cpp8
-rw-r--r--src/test/util/json.h8
-rw-r--r--src/test/util/random.cpp2
-rw-r--r--src/test/util/random.h12
-rw-r--r--src/test/util/setup_common.cpp51
-rw-r--r--src/test/util/setup_common.h58
-rw-r--r--src/test/util/transaction_utils.cpp42
-rw-r--r--src/test/util/transaction_utils.h24
-rw-r--r--src/test/util_string_tests.cpp85
-rw-r--r--src/test/validation_block_tests.cpp2
-rw-r--r--src/test/validation_chainstate_tests.cpp19
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp38
-rw-r--r--src/txdb.h2
-rw-r--r--src/txorphanage.cpp12
-rw-r--r--src/txorphanage.h10
-rw-r--r--src/uint256.h10
-rw-r--r--src/util/CMakeLists.txt1
-rw-r--r--src/util/asmap.cpp6
-rw-r--r--src/util/check.cpp2
-rw-r--r--src/util/check.h2
-rw-r--r--src/util/feefrac.h8
-rw-r--r--src/util/fs_helpers.cpp4
-rw-r--r--src/util/string.h72
-rw-r--r--src/util/syserror.cpp2
-rw-r--r--src/util/threadnames.cpp2
-rw-r--r--src/util/tokenpipe.cpp2
-rw-r--r--src/util/trace.h2
-rw-r--r--src/validation.cpp27
-rw-r--r--src/validation.h18
-rw-r--r--src/validationinterface.cpp3
-rw-r--r--src/wallet/init.cpp2
-rw-r--r--src/wallet/load.cpp12
-rw-r--r--src/wallet/rpc/addresses.cpp2
-rw-r--r--src/wallet/rpc/backup.cpp2
-rw-r--r--src/wallet/rpc/util.cpp2
-rw-r--r--src/wallet/rpc/wallet.cpp2
-rw-r--r--src/wallet/scriptpubkeyman.h4
-rw-r--r--src/wallet/spend.cpp2
-rw-r--r--src/wallet/sqlite.cpp2
-rw-r--r--src/wallet/test/db_tests.cpp4
-rw-r--r--src/wallet/test/fuzz/coinselection.cpp2
-rw-r--r--src/wallet/test/fuzz/scriptpubkeyman.cpp5
-rw-r--r--src/wallet/test/fuzz/wallet_bdb_parser.cpp17
-rw-r--r--src/wallet/test/ismine_tests.cpp4
-rw-r--r--src/wallet/test/util.h2
-rw-r--r--src/wallet/test/wallet_tests.cpp15
-rw-r--r--src/wallet/wallet.cpp15
-rw-r--r--src/wallet/wallet.h4
-rw-r--r--src/wallet/walletdb.cpp2
-rw-r--r--src/wallet/wallettool.cpp2
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/functional/README.md3
-rwxr-xr-xtest/functional/feature_assumeutxo.py166
-rwxr-xr-xtest/functional/feature_blocksxor.py2
-rwxr-xr-xtest/functional/feature_config_args.py7
-rwxr-xr-xtest/functional/feature_fee_estimation.py5
-rwxr-xr-xtest/functional/feature_framework_miniwallet.py16
-rwxr-xr-xtest/functional/feature_settings.py23
-rwxr-xr-xtest/functional/interface_bitcoin_cli.py14
-rwxr-xr-xtest/functional/interface_usdt_coinselection.py10
-rwxr-xr-xtest/functional/mempool_limit.py31
-rwxr-xr-xtest/functional/mempool_package_limits.py17
-rwxr-xr-xtest/functional/mempool_package_rbf.py24
-rwxr-xr-xtest/functional/mempool_sigoplimit.py2
-rwxr-xr-xtest/functional/mempool_truc.py49
-rwxr-xr-xtest/functional/p2p_1p1c_network.py5
-rwxr-xr-xtest/functional/p2p_node_network_limited.py4
-rwxr-xr-xtest/functional/p2p_permissions.py5
-rwxr-xr-xtest/functional/p2p_seednode.py55
-rwxr-xr-xtest/functional/p2p_tx_download.py6
-rwxr-xr-xtest/functional/p2p_unrequested_blocks.py8
-rwxr-xr-xtest/functional/rpc_bind.py17
-rwxr-xr-xtest/functional/rpc_blockchain.py94
-rwxr-xr-xtest/functional/rpc_createmultisig.py4
-rwxr-xr-xtest/functional/rpc_dumptxoutset.py31
-rwxr-xr-xtest/functional/rpc_getblockfrompeer.py2
-rwxr-xr-xtest/functional/rpc_getblockstats.py13
-rwxr-xr-xtest/functional/rpc_getorphantxs.py130
-rwxr-xr-xtest/functional/rpc_txoutproof.py4
-rwxr-xr-xtest/functional/rpc_users.py21
-rw-r--r--test/functional/test_framework/blocktools.py4
-rw-r--r--test/functional/test_framework/mempool_util.py32
-rw-r--r--test/functional/test_framework/util.py11
-rw-r--r--test/functional/test_framework/wallet.py32
-rwxr-xr-xtest/functional/test_runner.py45
-rwxr-xr-xtest/functional/tool_signet_miner.py1
-rwxr-xr-xtest/functional/wallet_assumeutxo.py42
-rwxr-xr-xtest/functional/wallet_backup.py21
-rwxr-xr-xtest/functional/wallet_backwards_compatibility.py27
-rwxr-xr-xtest/functional/wallet_multiwallet.py4
-rwxr-xr-xtest/functional/wallet_upgradewallet.py11
-rw-r--r--test/lint/README.md2
-rwxr-xr-xtest/lint/lint-format-strings.py21
-rwxr-xr-xtest/lint/lint-python.py96
-rwxr-xr-xtest/lint/lint-spelling.py2
-rwxr-xr-xtest/lint/run-lint-format-strings.py9
-rw-r--r--test/lint/test_runner/src/main.rs119
-rwxr-xr-xtest/util/test_runner.py27
433 files changed, 8102 insertions, 4011 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index f5874744b5..7f7a882cee 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -1,12 +1,9 @@
env: # Global defaults
CIRRUS_CLONE_DEPTH: 1
- PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y"
+ CIRRUS_LOG_TIMESTAMP: true
MAKEJOBS: "-j10"
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error
- CCACHE_MAXSIZE: "200M"
- CCACHE_DIR: "/tmp/ccache_dir"
- CCACHE_NOHASHDIR: "1" # Debug info might contain a stale path if the build dir changes, but this is fine
# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with
# multiple users to run tasks in parallel. No sudo permission is required.
@@ -16,9 +13,9 @@ env: # Global defaults
# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+.
#
# The following specific types should exist, with the following requirements:
-# - small: For an x86_64 machine, recommended to have 2 CPUs and 8 GB of memory.
-# - medium: For an x86_64 machine, recommended to have 4 CPUs and 16 GB of memory.
-# - arm64: For an aarch64 machine, recommended to have 2 CPUs and 8 GB of memory.
+# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory.
+# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory.
+# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory.
#
# CI jobs for the latter configuration can be run on x86_64 hardware
# by installing qemu-user-static, which works out of the box with
@@ -39,14 +36,13 @@ env: # Global defaults
# This requires installing Podman instead of Docker.
#
# Futhermore:
-# - apt-get is required due to PACKAGE_MANAGER_INSTALL
# - podman-docker-4.1+ is required due to the bugfix in 4.1
# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200)
# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example
# for a single user setup with sudo permission:
#
# ```
-# apt update && apt install git screen python3 bash podman-docker curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus
+# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus
# ```
#
# - There are no strict requirements on the hardware. Having fewer CPU threads
@@ -75,8 +71,8 @@ filter_template: &FILTER_TEMPLATE
base_template: &BASE_TEMPLATE
<< : *FILTER_TEMPLATE
merge_base_script:
- # Unconditionally install git (used in fingerprint_script).
- - git --version || bash -c "$PACKAGE_MANAGER_INSTALL git"
+ # Require git (used in fingerprint_script).
+ - git --version || ( apt-get update && apt-get install -y git )
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
- git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge"
- git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts
diff --git a/.editorconfig b/.editorconfig
index ae7e92d1c8..c5f3028c50 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -10,17 +10,17 @@ insert_final_newline = true
trim_trailing_whitespace = true
# Source code files
-[*.{h,cpp,py,sh}]
+[*.{h,cpp,rs,py,sh}]
indent_size = 4
-# .cirrus.yml, .fuzzbuzz.yml, etc.
+# .cirrus.yml, etc.
[*.yml]
indent_size = 2
-# Makefiles
-[{*.am,Makefile.*.include}]
+# Makefiles (only relevant for depends build)
+[Makefile]
indent_style = tab
-# Autoconf scripts
-[configure.ac]
+# CMake files
+[{CMakeLists.txt,*.cmake,*.cmake.in}]
indent_size = 2
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 482525f949..439d02cc8b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -67,18 +67,18 @@ jobs:
echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD $EXCLUDE_MERGE_BASE_ANCESTORS | head -1)" >> "$GITHUB_ENV"
- run: |
sudo apt-get update
- sudo apt-get install clang ccache build-essential cmake pkg-config python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y
+ sudo apt-get install clang ccache build-essential cmake pkg-config python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libzmq3-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y
- name: Compile and run tests
run: |
# Run tests on commits after the last merge commit and before the PR head commit
# Use clang++, because it is a bit faster and uses less memory than g++
- git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_NATPMP=ON -DWITH_MINIUPNPC=ON -DWITH_USDT=ON && cmake --build build -j $(nproc) && ctest --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }}
+ git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_MINIUPNPC=ON -DWITH_USDT=ON && cmake --build build -j $(nproc) && ctest --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }}
- macos-native-x86_64:
- name: 'macOS 13 native, x86_64, no depends, sqlite only, gui'
+ macos-native-arm64:
+ name: 'macOS 14 native, arm64, no depends, sqlite only, gui'
# Use latest image, but hardcode version to avoid silent upgrades (and breaks).
# See: https://github.com/actions/runner-images#available-images.
- runs-on: macos-13
+ runs-on: macos-14
# No need to run on the read-only mirror, unless it is a PR.
if: github.repository != 'bitcoin-core/gui' || github.event_name == 'pull_request'
@@ -105,7 +105,7 @@ jobs:
run: |
# A workaround for "The `brew link` step did not complete successfully" error.
brew install --quiet python@3 || brew link --overwrite python@3
- brew install --quiet automake libtool pkg-config gnu-getopt ccache boost libevent miniupnpc libnatpmp zeromq qt@5 qrencode
+ brew install --quiet coreutils ninja pkg-config gnu-getopt ccache boost libevent miniupnpc zeromq qt@5 qrencode
- name: Set Ccache directory
run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV"
@@ -182,7 +182,7 @@ jobs:
- name: Generate build system
run: |
- cmake -B build --preset vs2022-static -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT\scripts\buildsystems\vcpkg.cmake" -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWERROR=ON
+ cmake -B build --preset vs2022-static -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT\scripts\buildsystems\vcpkg.cmake" -DBUILD_GUI=ON -DWITH_BDB=ON -DWITH_MINIUPNPC=ON -DWITH_ZMQ=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWERROR=ON
- name: Save vcpkg binary cache
uses: actions/cache/save@v4
@@ -224,7 +224,7 @@ jobs:
env:
BITCOINFUZZ: '${{ github.workspace }}\build\src\test\fuzz\Release\fuzz.exe'
shell: cmd
- run: py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_seed_corpus
+ run: py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_corpora
asan-lsan-ubsan-integer-no-depends-usdt:
name: 'ASan + LSan + UBSan + integer, no depends, USDT'
diff --git a/.python-version b/.python-version
index 43077b2460..1445aee866 100644
--- a/.python-version
+++ b/.python-version
@@ -1 +1 @@
-3.9.18
+3.10.14
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5ef80ffc6f..edc4710637 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -8,12 +8,6 @@
# Centos Stream 9, https://www.centos.org/cl-vs-cs/#end-of-life, EOL in May 2027:
# - CMake 3.26.5, https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/
cmake_minimum_required(VERSION 3.22)
-if(POLICY CMP0141)
- # MSVC debug information format flags are selected by an abstraction.
- # We want to use the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT variable
- # to select the MSVC debug information format.
- cmake_policy(SET CMP0141 NEW)
-endif()
if (${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
message(FATAL_ERROR "In-source builds are not allowed.")
@@ -103,14 +97,12 @@ if(WITH_SQLITE)
find_package(SQLite3 3.7.17 REQUIRED)
endif()
set(USE_SQLITE ON)
- set(ENABLE_WALLET ON)
endif()
option(WITH_BDB "Enable Berkeley DB (BDB) wallet support." OFF)
cmake_dependent_option(WARN_INCOMPATIBLE_BDB "Warn when using a Berkeley DB (BDB) version other than 4.8." ON "WITH_BDB" OFF)
if(WITH_BDB)
find_package(BerkeleyDB 4.8 MODULE REQUIRED)
set(USE_BDB ON)
- set(ENABLE_WALLET ON)
if(NOT BerkeleyDB_VERSION VERSION_EQUAL 4.8)
message(WARNING "Found Berkeley DB (BDB) other than 4.8.\n"
"BDB (legacy) wallets opened by this build will not be portable!"
@@ -129,11 +121,6 @@ option(REDUCE_EXPORTS "Attempt to reduce exported symbols in the resulting execu
option(WERROR "Treat compiler warnings as errors." OFF)
option(WITH_CCACHE "Attempt to use ccache for compiling." ON)
-option(WITH_NATPMP "Enable NAT-PMP." OFF)
-if(WITH_NATPMP)
- find_package(NATPMP MODULE REQUIRED)
-endif()
-
option(WITH_MINIUPNPC "Enable UPnP." OFF)
if(WITH_MINIUPNPC)
find_package(MiniUPnPc MODULE REQUIRED)
@@ -145,7 +132,9 @@ if(WITH_ZMQ)
find_package(ZeroMQ CONFIG REQUIRED)
else()
# The ZeroMQ project has provided config files since v4.2.2.
- # TODO: Switch to find_package(ZeroMQ) at some point in the future.
+ # However, mainstream distributions do not yet provide CMake
+ # config files for ZeroMQ packages. If they do in the future,
+ # find_package(ZeroMQ) may be used instead.
find_package(PkgConfig REQUIRED)
pkg_check_modules(libzmq REQUIRED IMPORTED_TARGET libzmq>=4)
endif()
@@ -188,7 +177,7 @@ if(BUILD_GUI)
if(BUILD_GUI_TESTS)
list(APPEND qt_components Test)
endif()
- find_package(Qt5 5.11.3 MODULE REQUIRED
+ find_package(Qt 5.11.3 MODULE REQUIRED
COMPONENTS ${qt_components}
)
unset(qt_components)
@@ -196,7 +185,7 @@ endif()
option(BUILD_BENCH "Build bench_bitcoin executable." OFF)
option(BUILD_FUZZ_BINARY "Build fuzz binary." OFF)
-cmake_dependent_option(BUILD_FOR_FUZZING "Build for fuzzing. Enabling this will disable all other targets and override BUILD_FUZZ_BINARY." OFF "NOT MSVC" OFF)
+option(BUILD_FOR_FUZZING "Build for fuzzing. Enabling this will disable all other targets and override BUILD_FUZZ_BINARY." OFF)
option(INSTALL_MAN "Install man pages." ON)
@@ -245,7 +234,6 @@ if(BUILD_FOR_FUZZING)
set(BUILD_WALLET_TOOL OFF)
set(BUILD_GUI OFF)
set(ENABLE_EXTERNAL_SIGNER OFF)
- set(WITH_NATPMP OFF)
set(WITH_MINIUPNPC OFF)
set(WITH_ZMQ OFF)
set(BUILD_TESTS OFF)
@@ -255,6 +243,7 @@ if(BUILD_FOR_FUZZING)
target_compile_definitions(core_interface INTERFACE
ABORT_ON_FAILED_ASSUME
+ FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
)
endif()
@@ -458,10 +447,10 @@ else()
)
endif()
-configure_file(cmake/script/Coverage.cmake Coverage.cmake COPYONLY)
-configure_file(cmake/script/CoverageFuzz.cmake CoverageFuzz.cmake COPYONLY)
-configure_file(cmake/script/CoverageInclude.cmake.in CoverageInclude.cmake @ONLY)
-configure_file(contrib/filter-lcov.py filter-lcov.py COPYONLY)
+configure_file(cmake/script/Coverage.cmake Coverage.cmake USE_SOURCE_PERMISSIONS COPYONLY)
+configure_file(cmake/script/CoverageFuzz.cmake CoverageFuzz.cmake USE_SOURCE_PERMISSIONS COPYONLY)
+configure_file(cmake/script/CoverageInclude.cmake.in CoverageInclude.cmake USE_SOURCE_PERMISSIONS @ONLY)
+configure_file(contrib/filter-lcov.py filter-lcov.py USE_SOURCE_PERMISSIONS COPYONLY)
# Don't allow extended (non-ASCII) symbols in identifiers. This is easier for code review.
try_append_cxx_flags("-fno-extended-identifiers" TARGET core_interface SKIP_LINK)
@@ -480,18 +469,21 @@ if(ENABLE_HARDENING)
try_append_linker_flag("/HIGHENTROPYVA" TARGET hardening_interface)
try_append_linker_flag("/NXCOMPAT" TARGET hardening_interface)
else()
+
+ # _FORTIFY_SOURCE requires that there is some level of optimization,
+ # otherwise it does nothing and just creates a compiler warning.
try_append_cxx_flags("-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3"
RESULT_VAR cxx_supports_fortify_source
+ SOURCE "int main() {
+ # if !defined __OPTIMIZE__ || __OPTIMIZE__ <= 0
+ #error
+ #endif
+ }"
)
if(cxx_supports_fortify_source)
- # When the build configuration is Debug, all optimizations are disabled.
- # However, _FORTIFY_SOURCE requires that there is some level of optimization,
- # otherwise it does nothing and just creates a compiler warning.
- # Since _FORTIFY_SOURCE is a no-op without optimizations, do not enable it
- # when the build configuration is Debug.
target_compile_options(hardening_interface INTERFACE
- $<$<NOT:$<CONFIG:Debug>>:-U_FORTIFY_SOURCE>
- $<$<NOT:$<CONFIG:Debug>>:-D_FORTIFY_SOURCE=3>
+ -U_FORTIFY_SOURCE
+ -D_FORTIFY_SOURCE=3
)
endif()
unset(cxx_supports_fortify_source)
@@ -509,7 +501,11 @@ if(ENABLE_HARDENING)
endif()
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- try_append_cxx_flags("-mbranch-protection=bti" TARGET hardening_interface SKIP_LINK)
+ if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+ try_append_cxx_flags("-mbranch-protection=bti" TARGET hardening_interface SKIP_LINK)
+ else()
+ try_append_cxx_flags("-mbranch-protection=standard" TARGET hardening_interface SKIP_LINK)
+ endif()
endif()
try_append_linker_flag("-Wl,--enable-reloc-section" TARGET hardening_interface)
@@ -544,7 +540,7 @@ if(WERROR)
unset(werror_flag)
endif()
-find_package(Python3 3.9 COMPONENTS Interpreter)
+find_package(Python3 3.10 COMPONENTS Interpreter)
if(Python3_EXECUTABLE)
set(PYTHON_COMMAND ${Python3_EXECUTABLE})
else()
@@ -619,9 +615,7 @@ if(ENABLE_WALLET)
message(" - legacy wallets (Berkeley DB) ..... ${WITH_BDB}")
endif()
message(" external signer ..................... ${ENABLE_EXTERNAL_SIGNER}")
-message(" port mapping:")
-message(" - using NAT-PMP .................... ${WITH_NATPMP}")
-message(" - using UPnP ....................... ${WITH_MINIUPNPC}")
+message(" port mapping using UPnP ............. ${WITH_MINIUPNPC}")
message(" ZeroMQ .............................. ${WITH_ZMQ}")
message(" USDT tracing ........................ ${WITH_USDT}")
message(" QR code (GUI) ....................... ${WITH_QRENCODE}")
diff --git a/CMakePresets.json b/CMakePresets.json
index a5f2ce7919..3bbb61afce 100644
--- a/CMakePresets.json
+++ b/CMakePresets.json
@@ -16,8 +16,7 @@
"cacheVariables": {
"VCPKG_TARGET_TRIPLET": "x64-windows",
"BUILD_GUI": "ON",
- "WITH_QRENCODE": "OFF",
- "WITH_NATPMP": "OFF"
+ "WITH_QRENCODE": "OFF"
}
},
{
@@ -34,8 +33,63 @@
"cacheVariables": {
"VCPKG_TARGET_TRIPLET": "x64-windows-static",
"BUILD_GUI": "ON",
- "WITH_QRENCODE": "OFF",
- "WITH_NATPMP": "OFF"
+ "WITH_QRENCODE": "OFF"
+ }
+ },
+ {
+ "name": "libfuzzer",
+ "displayName": "Build for fuzzing with libfuzzer, and sanitizers enabled",
+ "binaryDir": "${sourceDir}/build_fuzz",
+ "cacheVariables": {
+ "BUILD_FOR_FUZZING": "ON",
+ "CMAKE_C_COMPILER": "clang",
+ "CMAKE_C_FLAGS": "-ftrivial-auto-var-init=pattern",
+ "CMAKE_CXX_COMPILER": "clang++",
+ "CMAKE_CXX_FLAGS": "-ftrivial-auto-var-init=pattern",
+ "SANITIZERS": "undefined,address,fuzzer"
+ }
+ },
+ {
+ "name": "libfuzzer-nosan",
+ "displayName": "Build for fuzzing with libfuzzer, and sanitizers disabled",
+ "binaryDir": "${sourceDir}/build_fuzz_nosan",
+ "cacheVariables": {
+ "BUILD_FOR_FUZZING": "ON",
+ "CMAKE_C_COMPILER": "clang",
+ "CMAKE_CXX_COMPILER": "clang++",
+ "SANITIZERS": "fuzzer"
+ }
+ },
+ {
+ "name": "dev-mode",
+ "displayName": "Developer mode, with all features/dependencies enabled",
+ "binaryDir": "${sourceDir}/build_dev_mode",
+ "cacheVariables": {
+ "BUILD_BENCH": "ON",
+ "BUILD_CLI": "ON",
+ "BUILD_DAEMON": "ON",
+ "BUILD_FUZZ_BINARY": "ON",
+ "BUILD_GUI": "ON",
+ "BUILD_GUI_TESTS": "ON",
+ "BUILD_KERNEL_LIB": "ON",
+ "BUILD_SHARED_LIBS": "ON",
+ "BUILD_TESTING": "ON",
+ "BUILD_TESTS": "ON",
+ "BUILD_TX": "ON",
+ "BUILD_UTIL": "ON",
+ "BUILD_UTIL_CHAINSTATE": "ON",
+ "BUILD_WALLET_TOOL": "ON",
+ "ENABLE_EXTERNAL_SIGNER": "ON",
+ "ENABLE_HARDENING": "ON",
+ "ENABLE_WALLET": "ON",
+ "WARN_INCOMPATIBLE_BDB": "OFF",
+ "WITH_BDB": "ON",
+ "WITH_MINIUPNPC": "ON",
+ "WITH_MULTIPROCESS": "ON",
+ "WITH_QRENCODE": "ON",
+ "WITH_SQLITE": "ON",
+ "WITH_USDT": "ON",
+ "WITH_ZMQ": "ON"
}
}
]
diff --git a/README.md b/README.md
index cf106ad687..c5b6ce4588 100644
--- a/README.md
+++ b/README.md
@@ -53,7 +53,8 @@ and extending unit tests can be found in [/src/test/README.md](/src/test/README.
There are also [regression and integration tests](/test), written
in Python.
-These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py`
+These tests can be run (if the [test dependencies](/test) are installed) with: `build/test/functional/test_runner.py`
+(assuming `build` is your build directory).
The CI (Continuous Integration) systems make sure that every pull request is built for Windows, Linux, and macOS,
and that unit/sanity tests are run automatically.
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index 655db50361..d899c0c67a 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -48,7 +48,6 @@ fi
${CI_RETRY_EXE} pip3 install \
codespell==2.2.6 \
- flake8==6.1.0 \
lief==0.13.2 \
mypy==1.4.1 \
pyzmq==25.1.0 \
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 944655d8ad..021d5e1597 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -53,18 +53,18 @@ export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1}
# See man 7 debconf
export DEBIAN_FRONTEND=noninteractive
-export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-100M}
+export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-500M}
export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp}
export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1}
# The cache dir.
# This folder exists only on the ci guest, and on the ci host as a volume.
-export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache}
+export CCACHE_DIR="${CCACHE_DIR:-$BASE_SCRATCH_DIR/ccache}"
# Folder where the build result is put (bin and lib).
export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out}
# The folder for previous release binaries.
# This folder exists only on the ci guest, and on the ci host as a volume.
export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/prev_releases}
-export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential libtool autotools-dev automake pkg-config curl ca-certificates ccache python3 rsync git procps bison e2fsprogs cmake}
+export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential pkg-config curl ca-certificates ccache python3 rsync git procps bison e2fsprogs cmake}
export GOAL=${GOAL:-install}
export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets}
export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"}
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 881f006732..5604004d3a 100755
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8
export HOST=i686-pc-linux-gnu
export CONTAINER_NAME=ci_i686_centos
export CI_IMAGE_NAME_TAG="quay.io/centos/amd64:stream9"
-export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison util-linux e2fsprogs cmake"
+export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache make git python3 python3-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison util-linux e2fsprogs cmake"
export PIP_PACKAGES="pyzmq"
export GOAL="install"
export NO_WERROR=1 # Suppress error: #warning _FORTIFY_SOURCE > 2 is treated like 2 on this platform [-Werror=cpp]
diff --git a/ci/test/00_setup_env_mac_native.sh b/ci/test/00_setup_env_mac_native.sh
index 76668d97f2..45d644d9ca 100755
--- a/ci/test/00_setup_env_mac_native.sh
+++ b/ci/test/00_setup_env_mac_native.sh
@@ -6,14 +6,13 @@
export LC_ALL=C.UTF-8
-export HOST=x86_64-apple-darwin
# Homebrew's python@3.12 is marked as externally managed (PEP 668).
# Therefore, `--break-system-packages` is needed.
export PIP_PACKAGES="--break-system-packages zmq"
export GOAL="install"
-export BITCOIN_CONFIG="-DBUILD_GUI=ON -DWITH_ZMQ=ON -DWITH_MINIUPNPC=ON -DWITH_NATPMP=ON -DREDUCE_EXPORTS=ON"
+export CMAKE_GENERATOR="Ninja"
+export BITCOIN_CONFIG="-DBUILD_GUI=ON -DWITH_ZMQ=ON -DWITH_MINIUPNPC=ON -DREDUCE_EXPORTS=ON"
export CI_OS_NAME="macos"
export NO_DEPENDS=1
export OSX_SDK=""
-export CCACHE_MAXSIZE=400M
export RUN_FUZZ_TESTS=true
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 0ec30f23af..dc84ef49a4 100755
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -19,7 +19,7 @@ else
fi
export CONTAINER_NAME=ci_native_asan
-export PACKAGES="systemtap-sdt-dev clang-18 llvm-18 libclang-rt-18-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
+export PACKAGES="systemtap-sdt-dev clang-18 llvm-18 libclang-rt-18-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
export NO_DEPENDS=1
export GOAL="install"
export BITCOIN_CONFIG="\
@@ -32,4 +32,3 @@ export BITCOIN_CONFIG="\
-DAPPEND_CXXFLAGS='-std=c++23' \
-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' \
"
-export CCACHE_MAXSIZE=300M
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index e1a353056d..1aa2487045 100755
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -23,5 +23,4 @@ export BITCOIN_CONFIG="\
-DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \
-DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \
"
-export CCACHE_MAXSIZE=200M
export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-18"
diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh
index 7cea4d73af..cfdbc8c014 100755
--- a/ci/test/00_setup_env_native_fuzz_with_msan.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh
@@ -31,4 +31,3 @@ export USE_MEMORY_SANITIZER="true"
export RUN_UNIT_TESTS="false"
export RUN_FUNCTIONAL_TESTS="false"
export RUN_FUZZ_TESTS=true
-export CCACHE_MAXSIZE=250M
diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
index 02903b5199..c65c05bff9 100755
--- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
@@ -21,4 +21,4 @@ export BITCOIN_CONFIG="\
-DCMAKE_C_COMPILER=clang-16 \
-DCMAKE_CXX_COMPILER=clang++-16 \
"
-export CCACHE_MAXSIZE=200M
+export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-16"
diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh
index 2c85ba31d1..c6b3d68be6 100755
--- a/ci/test/00_setup_env_native_msan.sh
+++ b/ci/test/00_setup_env_native_msan.sh
@@ -28,4 +28,3 @@ export BITCOIN_CONFIG="\
"
export USE_MEMORY_SANITIZER="true"
export RUN_FUNCTIONAL_TESTS="false"
-export CCACHE_MAXSIZE=250M
diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
index 479628d3e8..3d5d1b7745 100755
--- a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
+++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_nowallet_libbitcoinkernel
export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm"
-# Use minimum supported python3.9 (or best-effort 3.11) and clang-16, see doc/dependencies.md
+# Use minimum supported python3.10 (or best-effort 3.11) and clang-16, see doc/dependencies.md
export PACKAGES="python3-zmq clang-16 llvm-16 libc++abi-16-dev libc++-16-dev"
export DEP_OPTS="NO_WALLET=1 CC=clang-16 CXX='clang++-16 -stdlib=libc++'"
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_previous_releases.sh b/ci/test/00_setup_env_native_previous_releases.sh
index 2482e545e1..717eb67a28 100755
--- a/ci/test/00_setup_env_native_previous_releases.sh
+++ b/ci/test/00_setup_env_native_previous_releases.sh
@@ -8,9 +8,9 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_previous_releases
export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04"
-# Use minimum supported python3.9 (or best effort 3.10) and gcc-11, see doc/dependencies.md
+# Use minimum supported python3.10 and gcc-11, see doc/dependencies.md
export PACKAGES="gcc-11 g++-11 python3-zmq"
-export DEP_OPTS="NO_UPNP=1 NO_NATPMP=1 DEBUG=1 CC=gcc-11 CXX=g++-11"
+export DEP_OPTS="NO_UPNP=1 DEBUG=1 CC=gcc-11 CXX=g++-11"
export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
diff --git a/ci/test/00_setup_env_native_tidy.sh b/ci/test/00_setup_env_native_tidy.sh
index 581de16bed..cc1dea09cb 100755
--- a/ci/test/00_setup_env_native_tidy.sh
+++ b/ci/test/00_setup_env_native_tidy.sh
@@ -9,19 +9,19 @@ export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04"
export CONTAINER_NAME=ci_native_tidy
export TIDY_LLVM_V="18"
-export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq libevent-dev libboost-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev systemtap-sdt-dev qtbase5-dev qttools5-dev qttools5-dev-tools libqrencode-dev libsqlite3-dev libdb++-dev"
+export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq libevent-dev libboost-dev libminiupnpc-dev libzmq3-dev systemtap-sdt-dev qtbase5-dev qttools5-dev qttools5-dev-tools libqrencode-dev libsqlite3-dev libdb++-dev"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=false
+export RUN_CHECK_DEPS=true
export RUN_TIDY=true
export GOAL="install"
export BITCOIN_CONFIG="\
- -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DWITH_NATPMP=ON -DWITH_MINIUPNPC=ON -DWITH_USDT=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF \
+ -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DWITH_MINIUPNPC=ON -DWITH_USDT=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF \
-DENABLE_HARDENING=OFF \
-DCMAKE_C_COMPILER=clang-${TIDY_LLVM_V} \
-DCMAKE_CXX_COMPILER=clang++-${TIDY_LLVM_V} \
-DCMAKE_C_FLAGS_RELWITHDEBINFO='-O0 -g0' \
-DCMAKE_CXX_FLAGS_RELWITHDEBINFO='-O0 -g0' \
"
-export CCACHE_MAXSIZE=200M
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index 60bbe83119..3c5622cd02 100755
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
@@ -8,14 +8,14 @@ export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04"
export CONTAINER_NAME=ci_native_valgrind
-export PACKAGES="valgrind clang-16 llvm-16 libclang-rt-16-dev python3-zmq libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev"
+export PACKAGES="valgrind clang-16 llvm-16 libclang-rt-16-dev python3-zmq libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libsqlite3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # feature_init excluded for now, see https://github.com/bitcoin/bitcoin/issues/30011 ; bind tests excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export GOAL="install"
# TODO enable GUI
export BITCOIN_CONFIG="\
- -DWITH_ZMQ=ON -DWITH_BDB=ON -DWITH_NATPMP=ON -DWITH_MINIUPNPC=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=OFF \
+ -DWITH_ZMQ=ON -DWITH_BDB=ON -DWITH_MINIUPNPC=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=OFF \
-DCMAKE_C_COMPILER=clang-16 \
-DCMAKE_CXX_COMPILER=clang++-16 \
"
diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh
index bb99fc30e9..538a58cbd5 100755
--- a/ci/test/01_base_install.sh
+++ b/ci/test/01_base_install.sh
@@ -36,7 +36,7 @@ if [ -n "$PIP_PACKAGES" ]; then
fi
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
- ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-18.1.3" /msan/llvm-project
+ ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-19.1.0" /msan/llvm-project
cmake -G Ninja -B /msan/clang_build/ \
-DLLVM_ENABLE_PROJECTS="clang" \
diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh
index afd447c347..1727f9296b 100755
--- a/ci/test/02_run_container.sh
+++ b/ci/test/02_run_container.sh
@@ -48,6 +48,14 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
CI_PREVIOUS_RELEASES_MOUNT="type=bind,src=${PREVIOUS_RELEASES_DIR},dst=$PREVIOUS_RELEASES_DIR"
fi
+ if [ "$DANGER_CI_ON_HOST_CCACHE_FOLDER" ]; then
+ if [ ! -d "${CCACHE_DIR}" ]; then
+ echo "Error: Directory '${CCACHE_DIR}' must be created in advance."
+ exit 1
+ fi
+ CI_CCACHE_MOUNT="type=bind,src=${CCACHE_DIR},dst=${CCACHE_DIR}"
+ fi
+
docker network create --ipv6 --subnet 1111:1111::/112 ci-ip6net || true
if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then
diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh
index baeeca4814..6eab4f7467 100755
--- a/ci/test/03_test_script.sh
+++ b/ci/test/03_test_script.sh
@@ -13,12 +13,11 @@ export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/l
export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1"
export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
+echo "Number of available processing units: $(nproc)"
if [ "$CI_OS_NAME" == "macos" ]; then
top -l 1 -s 0 | awk ' /PhysMem/ {print}'
- echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
else
free -m -h
- echo "Number of CPUs (nproc): $(nproc)"
echo "System info: $(uname --kernel-name --kernel-release)"
lscpu
fi
@@ -30,6 +29,10 @@ df -h
# Tests that run natively guess the host
export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")}
+echo "=== BEGIN env ==="
+env
+echo "=== END env ==="
+
(
# compact->outputs[i].file_size is uninitialized memory, so reading it is UB.
# The statistic bytes_written is only used for logging, which is disabled in
@@ -54,7 +57,7 @@ EOF
)
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
- export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/
+ export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_corpora/
if [ ! -d "$DIR_FUZZ_IN" ]; then
${CI_RETRY_EXE} git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}"
fi
@@ -121,9 +124,9 @@ if [[ "${RUN_TIDY}" == "true" ]]; then
BITCOIN_CONFIG_ALL="$BITCOIN_CONFIG_ALL -DCMAKE_EXPORT_COMPILE_COMMANDS=ON"
fi
-bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log) && false)"
+bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $(cmake -P "${BASE_ROOT_DIR}/ci/test/GetCMakeLogFiles.cmake")) && false)"
-bash -c "make $MAKEJOBS all $GOAL" || ( echo "Build failure. Verbose build follows." && make all "$GOAL" V=1 ; false )
+bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false )
bash -c "${PRINT_CCACHE_STATISTICS}"
du -sh "${DEPENDS_DIR}"/*/
@@ -137,8 +140,12 @@ if [ -n "$USE_VALGRIND" ]; then
"${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh"
fi
+if [ "$RUN_CHECK_DEPS" = "true" ]; then
+ "${BASE_ROOT_DIR}/contrib/devtools/check-deps.sh" .
+fi
+
if [ "$RUN_UNIT_TESTS" = "true" ]; then
- DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" CTEST_OUTPUT_ON_FAILURE=ON ctest "${MAKEJOBS}"
+ DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" CTEST_OUTPUT_ON_FAILURE=ON ctest "${MAKEJOBS}" --timeout $(( TEST_RUNNER_TIMEOUT_FACTOR * 60 ))
fi
if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
@@ -146,8 +153,9 @@ if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
- # shellcheck disable=SC2086
- LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" ${TEST_RUNNER_EXTRA} --quiet --failfast
+ # parses TEST_RUNNER_EXTRA as an array which allows for multiple arguments such as TEST_RUNNER_EXTRA='--exclude "rpc_bind.py --ipv6"'
+ eval "TEST_RUNNER_EXTRA=($TEST_RUNNER_EXTRA)"
+ LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA[@]}" --quiet --failfast
fi
if [ "${RUN_TIDY}" = "true" ]; then
diff --git a/ci/test/GetCMakeLogFiles.cmake b/ci/test/GetCMakeLogFiles.cmake
new file mode 100644
index 0000000000..80f71dcf63
--- /dev/null
+++ b/ci/test/GetCMakeLogFiles.cmake
@@ -0,0 +1,11 @@
+# Copyright (c) 2024-present The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or https://opensource.org/license/mit/.
+
+if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.26)
+ set(log_files "CMakeFiles/CMakeConfigureLog.yaml")
+else()
+ set(log_files "CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log")
+endif()
+
+execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${log_files})
diff --git a/cmake/bitcoin-config.h.in b/cmake/bitcoin-build-config.h.in
index 094eb8040a..094eb8040a 100644
--- a/cmake/bitcoin-config.h.in
+++ b/cmake/bitcoin-build-config.h.in
diff --git a/cmake/introspection.cmake b/cmake/introspection.cmake
index 5435a109d4..29c93869a7 100644
--- a/cmake/introspection.cmake
+++ b/cmake/introspection.cmake
@@ -6,7 +6,7 @@ include(CheckCXXSourceCompiles)
include(CheckCXXSymbolExists)
include(CheckIncludeFileCXX)
-# The following HAVE_{HEADER}_H variables go to the bitcoin-config.h header.
+# The following HAVE_{HEADER}_H variables go to the bitcoin-build-config.h header.
check_include_file_cxx(sys/prctl.h HAVE_SYS_PRCTL_H)
check_include_file_cxx(sys/resources.h HAVE_SYS_RESOURCES_H)
check_include_file_cxx(sys/vmmeter.h HAVE_SYS_VMMETER_H)
diff --git a/cmake/module/AddBoostIfNeeded.cmake b/cmake/module/AddBoostIfNeeded.cmake
index 89603ecd61..ecd0d6f2ab 100644
--- a/cmake/module/AddBoostIfNeeded.cmake
+++ b/cmake/module/AddBoostIfNeeded.cmake
@@ -64,9 +64,9 @@ function(add_boost_if_needed)
set(CMAKE_REQUIRED_DEFINITIONS)
endif()
- if(BUILD_TESTS)
- # Some package managers, such as vcpkg, vendor Boost.Test separately
- # from the rest of the headers, so we have to check for it individually.
+ # Some package managers, such as vcpkg, vendor Boost.Test separately
+ # from the rest of the headers, so we have to check for it individually.
+ if(BUILD_TESTS AND DEFINED VCPKG_TARGET_TRIPLET)
list(APPEND CMAKE_REQUIRED_DEFINITIONS -DBOOST_TEST_NO_MAIN)
include(CheckIncludeFileCXX)
check_include_file_cxx(boost/test/included/unit_test.hpp HAVE_BOOST_INCLUDED_UNIT_TEST_H)
diff --git a/cmake/module/FindNATPMP.cmake b/cmake/module/FindNATPMP.cmake
deleted file mode 100644
index 930555232b..0000000000
--- a/cmake/module/FindNATPMP.cmake
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2023-present The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or https://opensource.org/license/mit/.
-
-find_path(NATPMP_INCLUDE_DIR
- NAMES natpmp.h
-)
-
-find_library(NATPMP_LIBRARY
- NAMES natpmp
-)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(NATPMP
- REQUIRED_VARS NATPMP_LIBRARY NATPMP_INCLUDE_DIR
-)
-
-if(NATPMP_FOUND AND NOT TARGET NATPMP::NATPMP)
- add_library(NATPMP::NATPMP UNKNOWN IMPORTED)
- set_target_properties(NATPMP::NATPMP PROPERTIES
- IMPORTED_LOCATION "${NATPMP_LIBRARY}"
- INTERFACE_INCLUDE_DIRECTORIES "${NATPMP_INCLUDE_DIR}"
- )
- set_property(TARGET NATPMP::NATPMP PROPERTY
- INTERFACE_COMPILE_DEFINITIONS USE_NATPMP=1 $<$<PLATFORM_ID:Windows>:NATPMP_STATICLIB>
- )
-endif()
-
-mark_as_advanced(
- NATPMP_INCLUDE_DIR
- NATPMP_LIBRARY
-)
diff --git a/cmake/module/FindQt5.cmake b/cmake/module/FindQt.cmake
index f39ee53d5b..2e43294a99 100644
--- a/cmake/module/FindQt5.cmake
+++ b/cmake/module/FindQt.cmake
@@ -3,10 +3,10 @@
# file COPYING or https://opensource.org/license/mit/.
#[=======================================================================[
-FindQt5
--------
+FindQt
+------
-Finds the Qt 5 headers and libraries.
+Finds the Qt headers and libraries.
This is a wrapper around find_package() command that:
- facilitates searching in various build environments
@@ -19,7 +19,7 @@ if(CMAKE_HOST_APPLE)
find_program(HOMEBREW_EXECUTABLE brew)
if(HOMEBREW_EXECUTABLE)
execute_process(
- COMMAND ${HOMEBREW_EXECUTABLE} --prefix qt@5
+ COMMAND ${HOMEBREW_EXECUTABLE} --prefix qt@${Qt_FIND_VERSION_MAJOR}
OUTPUT_VARIABLE _qt_homebrew_prefix
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE
@@ -40,10 +40,10 @@ endif()
# /usr/x86_64-w64-mingw32/lib/libm.a or /usr/arm-linux-gnueabihf/lib/libm.a.
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH)
-find_package(Qt5 ${Qt5_FIND_VERSION}
- COMPONENTS ${Qt5_FIND_COMPONENTS}
+find_package(Qt${Qt_FIND_VERSION_MAJOR} ${Qt_FIND_VERSION}
+ COMPONENTS ${Qt_FIND_COMPONENTS}
HINTS ${_qt_homebrew_prefix}
- PATH_SUFFIXES Qt5 # Required on OpenBSD systems.
+ PATH_SUFFIXES Qt${Qt_FIND_VERSION_MAJOR} # Required on OpenBSD systems.
)
unset(_qt_homebrew_prefix)
@@ -56,11 +56,11 @@ else()
endif()
include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(Qt5
- REQUIRED_VARS Qt5_DIR
- VERSION_VAR Qt5_VERSION
+find_package_handle_standard_args(Qt
+ REQUIRED_VARS Qt${Qt_FIND_VERSION_MAJOR}_DIR
+ VERSION_VAR Qt${Qt_FIND_VERSION_MAJOR}_VERSION
)
-foreach(component IN LISTS Qt5_FIND_COMPONENTS ITEMS "")
- mark_as_advanced(Qt5${component}_DIR)
+foreach(component IN LISTS Qt_FIND_COMPONENTS ITEMS "")
+ mark_as_advanced(Qt${Qt_FIND_VERSION_MAJOR}${component}_DIR)
endforeach()
diff --git a/cmake/module/FlagsSummary.cmake b/cmake/module/FlagsSummary.cmake
index 9a408f715d..91d1df90d9 100644
--- a/cmake/module/FlagsSummary.cmake
+++ b/cmake/module/FlagsSummary.cmake
@@ -22,7 +22,8 @@ function(print_flags_per_config config indent_num)
get_target_interface(definitions "${config}" core_interface COMPILE_DEFINITIONS)
indent_message("Preprocessor defined macros ..........." "${definitions}" ${indent_num})
- string(STRIP "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${config_uppercase}}" combined_cxx_flags)
+ string(STRIP "${CMAKE_CXX_COMPILER_ARG1} ${CMAKE_CXX_FLAGS}" combined_cxx_flags)
+ string(STRIP "${combined_cxx_flags} ${CMAKE_CXX_FLAGS_${config_uppercase}}" combined_cxx_flags)
string(STRIP "${combined_cxx_flags} ${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}" combined_cxx_flags)
if(CMAKE_POSITION_INDEPENDENT_CODE)
string(JOIN " " combined_cxx_flags ${combined_cxx_flags} ${CMAKE_CXX_COMPILE_OPTIONS_PIC})
diff --git a/cmake/module/GenerateHeaders.cmake b/cmake/module/GenerateHeaders.cmake
index 35dc54eebb..c69007acb6 100644
--- a/cmake/module/GenerateHeaders.cmake
+++ b/cmake/module/GenerateHeaders.cmake
@@ -11,10 +11,10 @@ function(generate_header_from_json json_source_relpath)
)
endfunction()
-function(generate_header_from_raw raw_source_relpath)
+function(generate_header_from_raw raw_source_relpath raw_namespace)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${raw_source_relpath}.h
- COMMAND ${CMAKE_COMMAND} -DRAW_SOURCE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/${raw_source_relpath} -DHEADER_PATH=${CMAKE_CURRENT_BINARY_DIR}/${raw_source_relpath}.h -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromRaw.cmake
+ COMMAND ${CMAKE_COMMAND} -DRAW_SOURCE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/${raw_source_relpath} -DHEADER_PATH=${CMAKE_CURRENT_BINARY_DIR}/${raw_source_relpath}.h -DRAW_NAMESPACE=${raw_namespace} -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromRaw.cmake
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${raw_source_relpath} ${PROJECT_SOURCE_DIR}/cmake/script/GenerateHeaderFromRaw.cmake
VERBATIM
)
diff --git a/cmake/module/GenerateSetupNsi.cmake b/cmake/module/GenerateSetupNsi.cmake
index b7ea423611..3c358c5495 100644
--- a/cmake/module/GenerateSetupNsi.cmake
+++ b/cmake/module/GenerateSetupNsi.cmake
@@ -14,5 +14,5 @@ function(generate_setup_nsi)
set(BITCOIN_WALLET_TOOL_NAME "bitcoin-wallet")
set(BITCOIN_TEST_NAME "test_bitcoin")
set(EXEEXT ${CMAKE_EXECUTABLE_SUFFIX})
- configure_file(${PROJECT_SOURCE_DIR}/share/setup.nsi.in ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.nsi @ONLY)
+ configure_file(${PROJECT_SOURCE_DIR}/share/setup.nsi.in ${PROJECT_BINARY_DIR}/bitcoin-win64-setup.nsi USE_SOURCE_PERMISSIONS @ONLY)
endfunction()
diff --git a/cmake/module/Maintenance.cmake b/cmake/module/Maintenance.cmake
index 2c6cfc1863..456419b722 100644
--- a/cmake/module/Maintenance.cmake
+++ b/cmake/module/Maintenance.cmake
@@ -92,11 +92,11 @@ function(add_macos_deploy_target)
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND TARGET bitcoin-qt)
set(macos_app "Bitcoin-Qt.app")
# Populate Contents subdirectory.
- configure_file(${PROJECT_SOURCE_DIR}/share/qt/Info.plist.in ${macos_app}/Contents/Info.plist)
+ configure_file(${PROJECT_SOURCE_DIR}/share/qt/Info.plist.in ${macos_app}/Contents/Info.plist NO_SOURCE_PERMISSIONS)
file(CONFIGURE OUTPUT ${macos_app}/Contents/PkgInfo CONTENT "APPL????")
# Populate Contents/Resources subdirectory.
file(CONFIGURE OUTPUT ${macos_app}/Contents/Resources/empty.lproj CONTENT "")
- configure_file(${PROJECT_SOURCE_DIR}/src/qt/res/icons/bitcoin.icns ${macos_app}/Contents/Resources/bitcoin.icns COPYONLY)
+ configure_file(${PROJECT_SOURCE_DIR}/src/qt/res/icons/bitcoin.icns ${macos_app}/Contents/Resources/bitcoin.icns NO_SOURCE_PERMISSIONS COPYONLY)
file(CONFIGURE OUTPUT ${macos_app}/Contents/Resources/Base.lproj/InfoPlist.strings
CONTENT "{ CFBundleDisplayName = \"@PACKAGE_NAME@\"; CFBundleName = \"@PACKAGE_NAME@\"; }"
)
diff --git a/cmake/module/ProcessConfigurations.cmake b/cmake/module/ProcessConfigurations.cmake
index 5286d10267..7e2fc0080e 100644
--- a/cmake/module/ProcessConfigurations.cmake
+++ b/cmake/module/ProcessConfigurations.cmake
@@ -163,8 +163,8 @@ else()
unset(cxx_flags_debug_overridden)
endif()
-set(CMAKE_CXX_FLAGS_COVERAGE "-Og --coverage")
-set(CMAKE_OBJCXX_FLAGS_COVERAGE "-Og --coverage")
+set(CMAKE_CXX_FLAGS_COVERAGE "-g -Og --coverage")
+set(CMAKE_OBJCXX_FLAGS_COVERAGE "-g -Og --coverage")
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "--coverage")
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "--coverage")
get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
diff --git a/cmake/module/TryAppendCXXFlags.cmake b/cmake/module/TryAppendCXXFlags.cmake
index 0f6e014d43..c07455e89e 100644
--- a/cmake/module/TryAppendCXXFlags.cmake
+++ b/cmake/module/TryAppendCXXFlags.cmake
@@ -32,12 +32,6 @@ Usage examples:
)
- try_append_cxx_flags("-Werror=return-type" TARGET core_interface
- IF_CHECK_FAILED "-Wno-error=return-type"
- SOURCE "#include <cassert>\nint f(){ assert(false); }"
- )
-
-
In configuration output, this function prints a string by the following pattern:
-- Performing Test CXX_SUPPORTS_[flags]
@@ -49,7 +43,7 @@ function(try_append_cxx_flags flags)
TACXXF # prefix
"SKIP_LINK" # options
"TARGET;VAR;SOURCE;RESULT_VAR" # one_value_keywords
- "IF_CHECK_PASSED;IF_CHECK_FAILED" # multi_value_keywords
+ "IF_CHECK_PASSED" # multi_value_keywords
)
set(flags_as_string "${flags}")
@@ -88,13 +82,6 @@ function(try_append_cxx_flags flags)
string(STRIP "${${TACXXF_VAR}} ${flags_as_string}" ${TACXXF_VAR})
endif()
endif()
- elseif(DEFINED TACXXF_IF_CHECK_FAILED)
- if(DEFINED TACXXF_TARGET)
- target_compile_options(${TACXXF_TARGET} INTERFACE ${TACXXF_IF_CHECK_FAILED})
- endif()
- if(DEFINED TACXXF_VAR)
- string(STRIP "${${TACXXF_VAR}} ${TACXXF_IF_CHECK_FAILED}" ${TACXXF_VAR})
- endif()
endif()
if(DEFINED TACXXF_VAR)
diff --git a/cmake/module/TryAppendLinkerFlag.cmake b/cmake/module/TryAppendLinkerFlag.cmake
index 749120d445..8cbd83678d 100644
--- a/cmake/module/TryAppendLinkerFlag.cmake
+++ b/cmake/module/TryAppendLinkerFlag.cmake
@@ -22,7 +22,7 @@ function(try_append_linker_flag flag)
TALF # prefix
"" # options
"TARGET;VAR;SOURCE;RESULT_VAR" # one_value_keywords
- "IF_CHECK_PASSED;IF_CHECK_FAILED" # multi_value_keywords
+ "IF_CHECK_PASSED" # multi_value_keywords
)
string(MAKE_C_IDENTIFIER "${flag}" result)
@@ -58,13 +58,6 @@ function(try_append_linker_flag flag)
string(STRIP "${${TALF_VAR}} ${flag}" ${TALF_VAR})
endif()
endif()
- elseif(DEFINED TALF_IF_CHECK_FAILED)
- if(DEFINED TALF_TARGET)
- target_link_options(${TALF_TARGET} INTERFACE ${TACXXF_IF_CHECK_FAILED})
- endif()
- if(DEFINED TALF_VAR)
- string(STRIP "${${TALF_VAR}} ${TACXXF_IF_CHECK_FAILED}" ${TALF_VAR})
- endif()
endif()
if(DEFINED TALF_VAR)
diff --git a/cmake/script/Coverage.cmake b/cmake/script/Coverage.cmake
index 0df2e0b734..72587a5eb6 100644
--- a/cmake/script/Coverage.cmake
+++ b/cmake/script/Coverage.cmake
@@ -21,26 +21,32 @@ execute_process(
execute_process(
COMMAND ${LCOV_COMMAND} --capture --directory src --test-name test_bitcoin --output-file test_bitcoin.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --zerocounters --directory src
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_FILTER_COMMAND} test_bitcoin.info test_bitcoin_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile test_bitcoin_filtered.info --output-file test_bitcoin_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile test_bitcoin_filtered.info --output-file test_bitcoin_coverage.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${GENHTML_COMMAND} test_bitcoin_coverage.info --output-directory test_bitcoin.coverage
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
@@ -51,18 +57,22 @@ execute_process(
execute_process(
COMMAND ${LCOV_COMMAND} --capture --directory src --test-name functional-tests --output-file functional_test.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --zerocounters --directory src
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_FILTER_COMMAND} functional_test.info functional_test_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile functional_test_filtered.info --output-file functional_test_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile test_bitcoin_filtered.info --add-tracefile functional_test_filtered.info --output-file total_coverage.info
@@ -70,8 +80,10 @@ execute_process(
COMMAND ${AWK_EXECUTABLE} "{ print substr($3,2,50) \"/\" $5 }"
OUTPUT_FILE coverage_percent.txt
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${GENHTML_COMMAND} total_coverage.info --output-directory total.coverage
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
diff --git a/cmake/script/CoverageFuzz.cmake b/cmake/script/CoverageFuzz.cmake
index 2626ea0cb5..0558805394 100644
--- a/cmake/script/CoverageFuzz.cmake
+++ b/cmake/script/CoverageFuzz.cmake
@@ -4,30 +4,39 @@
include(${CMAKE_CURRENT_LIST_DIR}/CoverageInclude.cmake)
-if(NOT DEFINED FUZZ_SEED_CORPUS_DIR)
- set(FUZZ_SEED_CORPUS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/qa-assets/fuzz_seed_corpus)
+if(NOT DEFINED FUZZ_CORPORA_DIR)
+ set(FUZZ_CORPORA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/qa-assets/fuzz_corpora)
+endif()
+
+set(fuzz_test_runner test/fuzz/test_runner.py ${FUZZ_CORPORA_DIR})
+if(DEFINED JOBS)
+ list(APPEND fuzz_test_runner -j ${JOBS})
endif()
execute_process(
- COMMAND test/fuzz/test_runner.py ${FUZZ_SEED_CORPUS_DIR} --loglevel DEBUG
+ COMMAND ${fuzz_test_runner} --loglevel DEBUG
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --capture --directory src --test-name fuzz-tests --output-file fuzz.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --zerocounters --directory src
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_FILTER_COMMAND} fuzz.info fuzz_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile fuzz_filtered.info --output-file fuzz_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --add-tracefile fuzz_filtered.info --output-file fuzz_coverage.info
@@ -35,8 +44,10 @@ execute_process(
COMMAND ${AWK_EXECUTABLE} "{ print substr($3,2,50) \"/\" $5 }"
OUTPUT_FILE coverage_percent.txt
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${GENHTML_COMMAND} fuzz_coverage.info --output-directory fuzz.coverage
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
diff --git a/cmake/script/CoverageInclude.cmake.in b/cmake/script/CoverageInclude.cmake.in
index 7a8bf2f0af..59bf5e3af2 100644
--- a/cmake/script/CoverageInclude.cmake.in
+++ b/cmake/script/CoverageInclude.cmake.in
@@ -45,12 +45,15 @@ list(APPEND LCOV_FILTER_COMMAND -p "depends")
execute_process(
COMMAND ${LCOV_COMMAND} --capture --initial --directory src --output-file baseline.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_FILTER_COMMAND} baseline.info baseline_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
execute_process(
COMMAND ${LCOV_COMMAND} --add-tracefile baseline_filtered.info --output-file baseline_filtered.info
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+ COMMAND_ERROR_IS_FATAL ANY
)
diff --git a/cmake/script/GenerateHeaderFromJson.cmake b/cmake/script/GenerateHeaderFromJson.cmake
index 279ceedf04..4a3bddb323 100644
--- a/cmake/script/GenerateHeaderFromJson.cmake
+++ b/cmake/script/GenerateHeaderFromJson.cmake
@@ -2,23 +2,21 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/license/mit/.
+cmake_path(GET JSON_SOURCE_PATH STEM json_source_basename)
+
file(READ ${JSON_SOURCE_PATH} hex_content HEX)
-string(REGEX MATCHALL "([A-Za-z0-9][A-Za-z0-9])" bytes "${hex_content}")
+string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}")
+string(REGEX REPLACE "[^\n][^\n]" "0x\\0, " formatted_bytes "${formatted_bytes}")
-file(WRITE ${HEADER_PATH} "#include <string>\n")
-file(APPEND ${HEADER_PATH} "namespace json_tests{\n")
-get_filename_component(json_source_basename ${JSON_SOURCE_PATH} NAME_WE)
-file(APPEND ${HEADER_PATH} "static const std::string ${json_source_basename}{\n")
+set(header_content
+"#include <string_view>
-set(i 0)
-foreach(byte ${bytes})
- math(EXPR i "${i} + 1")
- math(EXPR remainder "${i} % 8")
- if(remainder EQUAL 0)
- file(APPEND ${HEADER_PATH} "0x${byte},\n")
- else()
- file(APPEND ${HEADER_PATH} "0x${byte}, ")
- endif()
-endforeach()
+namespace json_tests {
+inline constexpr char detail_${json_source_basename}_bytes[] {
+${formatted_bytes}
+};
-file(APPEND ${HEADER_PATH} "\n};};")
+inline constexpr std::string_view ${json_source_basename}{std::begin(detail_${json_source_basename}_bytes), std::end(detail_${json_source_basename}_bytes)};
+}
+")
+file(WRITE ${HEADER_PATH} "${header_content}")
diff --git a/cmake/script/GenerateHeaderFromRaw.cmake b/cmake/script/GenerateHeaderFromRaw.cmake
index 18c5b4bef2..638876ecea 100644
--- a/cmake/script/GenerateHeaderFromRaw.cmake
+++ b/cmake/script/GenerateHeaderFromRaw.cmake
@@ -2,21 +2,22 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/license/mit/.
+cmake_path(GET RAW_SOURCE_PATH STEM raw_source_basename)
+
file(READ ${RAW_SOURCE_PATH} hex_content HEX)
-string(REGEX MATCHALL "([A-Za-z0-9][A-Za-z0-9])" bytes "${hex_content}")
+string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}")
+string(REGEX REPLACE "[^\n][^\n]" "std::byte{0x\\0}, " formatted_bytes "${formatted_bytes}")
-get_filename_component(raw_source_basename ${RAW_SOURCE_PATH} NAME_WE)
-file(WRITE ${HEADER_PATH} "static unsigned const char ${raw_source_basename}_raw[] = {\n")
+set(header_content
+"#include <cstddef>
+#include <span>
-set(i 0)
-foreach(byte ${bytes})
- math(EXPR i "${i} + 1")
- math(EXPR remainder "${i} % 8")
- if(remainder EQUAL 0)
- file(APPEND ${HEADER_PATH} "0x${byte},\n")
- else()
- file(APPEND ${HEADER_PATH} "0x${byte}, ")
- endif()
-endforeach()
+namespace ${RAW_NAMESPACE} {
+inline constexpr std::byte detail_${raw_source_basename}_raw[] {
+${formatted_bytes}
+};
-file(APPEND ${HEADER_PATH} "\n};")
+inline constexpr std::span ${raw_source_basename}{detail_${raw_source_basename}_raw};
+}
+")
+file(WRITE ${HEADER_PATH} "${header_content}")
diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt
index 95345b4782..c6f683f7ab 100644
--- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt
+++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt
@@ -25,7 +25,7 @@ find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
message(STATUS "Found clang-tidy: ${CLANG_TIDY_EXE}")
-add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp logprintf.cpp nontrivial-threadlocal.cpp)
+add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp nontrivial-threadlocal.cpp)
target_include_directories(bitcoin-tidy SYSTEM PRIVATE ${LLVM_INCLUDE_DIRS})
# Disable RTTI and exceptions as necessary
@@ -58,7 +58,7 @@ else()
endif()
# Create a dummy library that runs clang-tidy tests as a side-effect of building
-add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_logprintf.cpp example_nontrivial-threadlocal.cpp)
+add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_nontrivial-threadlocal.cpp)
add_dependencies(bitcoin-tidy-tests bitcoin-tidy)
set_target_properties(bitcoin-tidy-tests PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
diff --git a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp
index 1ef4494973..f2658b5a58 100644
--- a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp
+++ b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp
@@ -2,7 +2,6 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "logprintf.h"
#include "nontrivial-threadlocal.h"
#include <clang-tidy/ClangTidyModule.h>
@@ -13,7 +12,6 @@ class BitcoinModule final : public clang::tidy::ClangTidyModule
public:
void addCheckFactories(clang::tidy::ClangTidyCheckFactories& CheckFactories) override
{
- CheckFactories.registerCheck<bitcoin::LogPrintfCheck>("bitcoin-unterminated-logprintf");
CheckFactories.registerCheck<bitcoin::NonTrivialThreadLocal>("bitcoin-nontrivial-threadlocal");
}
};
diff --git a/contrib/devtools/bitcoin-tidy/example_logprintf.cpp b/contrib/devtools/bitcoin-tidy/example_logprintf.cpp
deleted file mode 100644
index dc77f668e3..0000000000
--- a/contrib/devtools/bitcoin-tidy/example_logprintf.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2023 Bitcoin Developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <string>
-
-// Test for bitcoin-unterminated-logprintf
-
-enum LogFlags {
- NONE
-};
-
-enum Level {
- None
-};
-
-template <typename... Args>
-static inline void LogPrintf_(const std::string& logging_function, const std::string& source_file, const int source_line, const LogFlags flag, const Level level, const char* fmt, const Args&... args)
-{
-}
-
-#define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
-#define LogPrintf(...) LogPrintLevel_(LogFlags::NONE, Level::None, __VA_ARGS__)
-
-#define LogDebug(category, ...) \
- do { \
- LogPrintf(__VA_ARGS__); \
- } while (0)
-
-
-class CWallet
-{
- std::string GetDisplayName() const
- {
- return "default wallet";
- }
-
-public:
- template <typename... Params>
- void WalletLogPrintf(const char* fmt, Params... parameters) const
- {
- LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...);
- };
-};
-
-struct ScriptPubKeyMan
-{
- std::string GetDisplayName() const
- {
- return "default wallet";
- }
-
- template <typename... Params>
- void WalletLogPrintf(const char* fmt, Params... parameters) const
- {
- LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...);
- };
-};
-
-void good_func()
-{
- LogPrintf("hello world!\n");
-}
-void good_func2()
-{
- CWallet wallet;
- wallet.WalletLogPrintf("hi\n");
- ScriptPubKeyMan spkm;
- spkm.WalletLogPrintf("hi\n");
-
- const CWallet& walletref = wallet;
- walletref.WalletLogPrintf("hi\n");
-
- auto* walletptr = new CWallet();
- walletptr->WalletLogPrintf("hi\n");
- delete walletptr;
-}
-void bad_func()
-{
- LogPrintf("hello world!");
-}
-void bad_func2()
-{
- LogPrintf("");
-}
-void bad_func3()
-{
- // Ending in "..." has no special meaning.
- LogPrintf("hello world!...");
-}
-void bad_func4_ignored()
-{
- LogPrintf("hello world!"); // NOLINT(bitcoin-unterminated-logprintf)
-}
-void bad_func5()
-{
- CWallet wallet;
- wallet.WalletLogPrintf("hi");
- ScriptPubKeyMan spkm;
- spkm.WalletLogPrintf("hi");
-
- const CWallet& walletref = wallet;
- walletref.WalletLogPrintf("hi");
-
- auto* walletptr = new CWallet();
- walletptr->WalletLogPrintf("hi");
- delete walletptr;
-}
diff --git a/contrib/devtools/bitcoin-tidy/logprintf.cpp b/contrib/devtools/bitcoin-tidy/logprintf.cpp
deleted file mode 100644
index 36beac28c8..0000000000
--- a/contrib/devtools/bitcoin-tidy/logprintf.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2023 Bitcoin Developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include "logprintf.h"
-
-#include <clang/AST/ASTContext.h>
-#include <clang/ASTMatchers/ASTMatchFinder.h>
-
-
-namespace {
-AST_MATCHER(clang::StringLiteral, unterminated)
-{
- size_t len = Node.getLength();
- if (len > 0 && Node.getCodeUnit(len - 1) == '\n') {
- return false;
- }
- return true;
-}
-} // namespace
-
-namespace bitcoin {
-
-void LogPrintfCheck::registerMatchers(clang::ast_matchers::MatchFinder* finder)
-{
- using namespace clang::ast_matchers;
-
- /*
- Logprintf(..., ..., ..., ..., ..., "foo", ...)
- */
-
- finder->addMatcher(
- callExpr(
- callee(functionDecl(hasName("LogPrintf_"))),
- hasArgument(5, stringLiteral(unterminated()).bind("logstring"))),
- this);
-
- /*
- auto walletptr = &wallet;
- wallet.WalletLogPrintf("foo");
- wallet->WalletLogPrintf("foo");
- */
- finder->addMatcher(
- cxxMemberCallExpr(
- callee(cxxMethodDecl(hasName("WalletLogPrintf"))),
- hasArgument(0, stringLiteral(unterminated()).bind("logstring"))),
- this);
-}
-
-void LogPrintfCheck::check(const clang::ast_matchers::MatchFinder::MatchResult& Result)
-{
- if (const clang::StringLiteral* lit = Result.Nodes.getNodeAs<clang::StringLiteral>("logstring")) {
- const clang::ASTContext& ctx = *Result.Context;
- const auto user_diag = diag(lit->getEndLoc(), "Unterminated format string used with LogPrintf");
- const auto& loc = lit->getLocationOfByte(lit->getByteLength(), *Result.SourceManager, ctx.getLangOpts(), ctx.getTargetInfo());
- user_diag << clang::FixItHint::CreateInsertion(loc, "\\n");
- }
-}
-
-} // namespace bitcoin
diff --git a/contrib/devtools/bitcoin-tidy/logprintf.h b/contrib/devtools/bitcoin-tidy/logprintf.h
deleted file mode 100644
index db95dfe143..0000000000
--- a/contrib/devtools/bitcoin-tidy/logprintf.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2023 Bitcoin Developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef LOGPRINTF_CHECK_H
-#define LOGPRINTF_CHECK_H
-
-#include <clang-tidy/ClangTidyCheck.h>
-
-namespace bitcoin {
-
-// Warn about any use of LogPrintf that does not end with a newline.
-class LogPrintfCheck final : public clang::tidy::ClangTidyCheck
-{
-public:
- LogPrintfCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context)
- : clang::tidy::ClangTidyCheck(Name, Context) {}
-
- bool isLanguageVersionSupported(const clang::LangOptions& LangOpts) const override
- {
- return LangOpts.CPlusPlus;
- }
- void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override;
- void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override;
-};
-
-} // namespace bitcoin
-
-#endif // LOGPRINTF_CHECK_H
diff --git a/contrib/devtools/check-deps.sh b/contrib/devtools/check-deps.sh
index 9d2eebe14d..cdfc4e7533 100755
--- a/contrib/devtools/check-deps.sh
+++ b/contrib/devtools/check-deps.sh
@@ -8,11 +8,10 @@ declare -A LIBS
LIBS[cli]="libbitcoin_cli.a"
LIBS[common]="libbitcoin_common.a"
LIBS[consensus]="libbitcoin_consensus.a"
-LIBS[crypto]="crypto/.libs/libbitcoin_crypto_base.a crypto/.libs/libbitcoin_crypto_x86_shani.a crypto/.libs/libbitcoin_crypto_sse41.a crypto/.libs/libbitcoin_crypto_avx2.a"
+LIBS[crypto]="crypto/libbitcoin_crypto.a crypto/libbitcoin_crypto_x86_shani.a crypto/libbitcoin_crypto_sse41.a crypto/libbitcoin_crypto_avx2.a"
LIBS[node]="libbitcoin_node.a"
-LIBS[util]="libbitcoin_util.a"
-LIBS[wallet]="libbitcoin_wallet.a"
-LIBS[wallet_tool]="libbitcoin_wallet_tool.a"
+LIBS[util]="util/libbitcoin_util.a"
+LIBS[wallet]="wallet/libbitcoin_wallet.a"
# Declare allowed dependencies "X Y" where X is allowed to depend on Y. This
# list is taken from doc/design/libraries.md.
@@ -32,43 +31,41 @@ ALLOWED_DEPENDENCIES=(
"wallet common"
"wallet crypto"
"wallet util"
- "wallet_tool util"
- "wallet_tool wallet"
)
# Add minor dependencies omitted from doc/design/libraries.md to keep the
# dependency diagram simple.
ALLOWED_DEPENDENCIES+=(
"wallet consensus"
- "wallet_tool common"
- "wallet_tool crypto"
)
# Declare list of known errors that should be suppressed.
declare -A SUPPRESS
# init.cpp file currently calls Berkeley DB sanity check function on startup, so
# there is an undocumented dependency of the node library on the wallet library.
-SUPPRESS["libbitcoin_node_a-init.o libbitcoin_wallet_a-bdb.o _ZN6wallet27BerkeleyDatabaseSanityCheckEv"]=1
+SUPPRESS["init.cpp.o bdb.cpp.o _ZN6wallet27BerkeleyDatabaseSanityCheckEv"]=1
# init/common.cpp file calls InitError and InitWarning from interface_ui which
# is currently part of the node library. interface_ui should just be part of the
# common library instead, and is moved in
# https://github.com/bitcoin/bitcoin/issues/10102
-SUPPRESS["libbitcoin_common_a-common.o libbitcoin_node_a-interface_ui.o _Z11InitWarningRK13bilingual_str"]=1
-SUPPRESS["libbitcoin_common_a-common.o libbitcoin_node_a-interface_ui.o _Z9InitErrorRK13bilingual_str"]=1
+SUPPRESS["common.cpp.o interface_ui.cpp.o _Z11InitWarningRK13bilingual_str"]=1
+SUPPRESS["common.cpp.o interface_ui.cpp.o _Z9InitErrorRK13bilingual_str"]=1
# rpc/external_signer.cpp adds defines node RPC methods but is built as part of the
# common library. It should be moved to the node library instead.
-SUPPRESS["libbitcoin_common_a-external_signer.o libbitcoin_node_a-server.o _ZN9CRPCTable13appendCommandERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPK11CRPCCommand"]=1
+SUPPRESS["external_signer.cpp.o server.cpp.o _ZN9CRPCTable13appendCommandERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPK11CRPCCommand"]=1
usage() {
echo "Usage: $(basename "${BASH_SOURCE[0]}") [BUILD_DIR]"
}
-# Output makefile targets, converting library .a paths to libtool .la targets
+# Output makefile targets, converting library .a paths to CMake targets
lib_targets() {
for lib in "${!LIBS[@]}"; do
for lib_path in ${LIBS[$lib]}; do
- # shellcheck disable=SC2001
- sed 's:/.libs/\(.*\)\.a$:/\1.la:g' <<<"$lib_path"
+ local name="${lib_path##*/}"
+ name="${name#lib}"
+ name="${name%.a}"
+ echo "$name"
done
done
}
@@ -78,8 +75,8 @@ extract_symbols() {
local temp_dir="$1"
for lib in "${!LIBS[@]}"; do
for lib_path in ${LIBS[$lib]}; do
- nm -o "$lib_path" | grep ' T ' | awk '{print $3, $1}' >> "${temp_dir}/${lib}_exports.txt"
- nm -o "$lib_path" | grep ' U ' | awk '{print $3, $1}' >> "${temp_dir}/${lib}_imports.txt"
+ nm -o "$lib_path" | { grep ' T \| W ' || true; } | awk '{print $3, $1}' >> "${temp_dir}/${lib}_exports.txt"
+ nm -o "$lib_path" | { grep ' U ' || true; } | awk '{print $3, $1}' >> "${temp_dir}/${lib}_imports.txt"
awk '{print $1}' "${temp_dir}/${lib}_exports.txt" | sort -u > "${temp_dir}/${lib}_exported_symbols.txt"
awk '{print $1}' "${temp_dir}/${lib}_imports.txt" | sort -u > "${temp_dir}/${lib}_imported_symbols.txt"
done
@@ -136,7 +133,7 @@ check_disallowed() {
dst_obj=$(obj_names "$symbol" "${temp_dir}/${dst}_exports.txt")
while read src_obj; do
if ! check_suppress "$src_obj" "$dst_obj" "$symbol"; then
- echo "Error: $src_obj depends on $dst_obj symbol '$(c++filt "$symbol")', can suppess with:"
+ echo "Error: $src_obj depends on $dst_obj symbol '$(c++filt "$symbol")', can suppress with:"
echo " SUPPRESS[\"$src_obj $dst_obj $symbol\"]=1"
result=1
fi
@@ -148,7 +145,7 @@ check_disallowed() {
# Declare array to track errors which were suppressed.
declare -A SUPPRESSED
-# Return whether error should be suppressed and record suppresssion in
+# Return whether error should be suppressed and record suppression in
# SUPPRESSED array.
check_suppress() {
local src_obj="$1"
@@ -164,7 +161,7 @@ check_suppress() {
return 1
}
-# Warn about error which were supposed to be suppress, but were not encountered.
+# Warn about error which were supposed to be suppressed, but were not encountered.
check_not_suppressed() {
for suppress in "${!SUPPRESS[@]}"; do
if [[ ! -v SUPPRESSED[$suppress] ]]; then
@@ -175,7 +172,7 @@ check_not_suppressed() {
# Check arguments.
if [ "$#" = 0 ]; then
- BUILD_DIR="$(dirname "${BASH_SOURCE[0]}")/../../src"
+ BUILD_DIR="$(dirname "${BASH_SOURCE[0]}")/../../build"
elif [ "$#" = 1 ]; then
BUILD_DIR="$1"
else
@@ -190,14 +187,17 @@ if [ ! -f "$BUILD_DIR/Makefile" ]; then
fi
# Build libraries and run checks.
-cd "$BUILD_DIR"
# shellcheck disable=SC2046
-make -j"$(nproc)" $(lib_targets)
+cmake --build "$BUILD_DIR" -j"$(nproc)" -t $(lib_targets)
TEMP_DIR="$(mktemp -d)"
+cd "$BUILD_DIR/src"
extract_symbols "$TEMP_DIR"
if check_libraries "$TEMP_DIR"; then
echo "Success! No unexpected dependencies were detected."
+ RET=0
else
echo >&2 "Error: Unexpected dependencies were detected. Check previous output."
+ RET=1
fi
rm -r "$TEMP_DIR"
+exit $RET
diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py
index e2b661d65d..30e804dbe2 100755
--- a/contrib/devtools/clang-format-diff.py
+++ b/contrib/devtools/clang-format-diff.py
@@ -164,7 +164,7 @@ def main():
'Failed to run "%s" - %s"' % (" ".join(command), e.strerror)
)
- stdout, stderr = p.communicate()
+ stdout, _stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
diff --git a/contrib/devtools/gen-bitcoin-conf.sh b/contrib/devtools/gen-bitcoin-conf.sh
index 2ebbd42022..d830852c9e 100755
--- a/contrib/devtools/gen-bitcoin-conf.sh
+++ b/contrib/devtools/gen-bitcoin-conf.sh
@@ -72,9 +72,12 @@ cat >> "${EXAMPLE_CONF_FILE}" << 'EOF'
# Options for mainnet
[main]
-# Options for testnet
+# Options for testnet3
[test]
+# Options for testnet4
+[testnet4]
+
# Options for signet
[signet]
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 46f9ee915f..4c20685b51 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -7,6 +7,7 @@ Perform basic security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
'''
+import re
import sys
import lief
@@ -116,6 +117,25 @@ def check_ELF_CONTROL_FLOW(binary) -> bool:
return True
return False
+def check_ELF_FORTIFY(binary) -> bool:
+
+ # bitcoin-util does not currently contain any fortified functions
+ if 'Bitcoin Core bitcoin-util utility version ' in binary.strings:
+ return True
+
+ chk_funcs = set()
+
+ for sym in binary.imported_symbols:
+ match = re.search(r'__[a-z]*_chk', sym.name)
+ if match:
+ chk_funcs.add(match.group(0))
+
+ # ignore stack-protector and bdb
+ chk_funcs.discard('__stack_chk')
+ chk_funcs.discard('__db_chk')
+
+ return len(chk_funcs) >= 1
+
def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
@@ -228,11 +248,11 @@ BASE_MACHO = [
CHECKS = {
lief.EXE_FORMATS.ELF: {
- lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_CONTROL_FLOW)],
- lief.ARCHITECTURES.ARM: BASE_ELF,
- lief.ARCHITECTURES.ARM64: BASE_ELF,
- lief.ARCHITECTURES.PPC: BASE_ELF,
- lief.ARCHITECTURES.RISCV: BASE_ELF,
+ lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_CONTROL_FLOW), ('FORTIFY', check_ELF_FORTIFY)],
+ lief.ARCHITECTURES.ARM: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)],
+ lief.ARCHITECTURES.ARM64: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)],
+ lief.ARCHITECTURES.PPC: BASE_ELF + [('FORTIFY', check_ELF_FORTIFY)],
+ lief.ARCHITECTURES.RISCV: BASE_ELF, # Skip FORTIFY. See https://github.com/lief-project/LIEF/issues/1082.
},
lief.EXE_FORMATS.PE: {
lief.ARCHITECTURES.X86: BASE_PE,
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 1722c7d290..3f6010280a 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -235,7 +235,7 @@ def check_MACHO_libraries(binary) -> bool:
return ok
def check_MACHO_min_os(binary) -> bool:
- if binary.build_version.minos == [11,0,0]:
+ if binary.build_version.minos == [13,0,0]:
return True
return False
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 4bec6bfe7c..99f171608d 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -59,17 +59,22 @@ class TestSecurityChecks(unittest.TestCase):
arch = get_arch(cxx, source, executable)
if arch == lief.ARCHITECTURES.X86:
- pass_flags = ['-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']
+ pass_flags = ['-D_FORTIFY_SOURCE=3', '-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-zexecstack']), (1, executable + ': failed NX'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-no-pie','-fno-PIE']), (1, executable + ': failed PIE'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-znorelro']), (1, executable + ': failed RELRO'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-z,noseparate-code']), (1, executable + ': failed SEPARATE_CODE'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-fcf-protection=none']), (1, executable + ': failed CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-U_FORTIFY_SOURCE']), (1, executable + ': failed FORTIFY'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, ''))
else:
- pass_flags = ['-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code']
+ pass_flags = ['-D_FORTIFY_SOURCE=3', '-Wl,-znoexecstack', '-Wl,-zrelro', '-Wl,-z,now', '-pie', '-fPIE', '-Wl,-z,separate-code']
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-zexecstack']), (1, executable + ': failed NX'))
- self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-no-pie','-fno-PIE']), (1, executable + ': failed PIE'))
+ # LIEF fails to parse RISC-V with no PIE correctly, and doesn't detect the fortified function,
+ # so skip this test for RISC-V (for now). See https://github.com/lief-project/LIEF/issues/1082.
+ if arch != lief.ARCHITECTURES.RISCV:
+ self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-no-pie','-fno-PIE']), (1, executable + ': failed PIE'))
+ self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-U_FORTIFY_SOURCE']), (1, executable + ': failed FORTIFY'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-znorelro']), (1, executable + ': failed RELRO'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags + ['-Wl,-z,noseparate-code']), (1, executable + ': failed SEPARATE_CODE'))
self.assertEqual(call_security_check(cxx, source, executable, pass_flags), (0, ''))
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index 454dbc6bd2..c75a5e1546 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -116,7 +116,7 @@ class TestSymbolChecks(unittest.TestCase):
}
''')
- self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,11.0', '-Wl,11.4']),
+ self.assertEqual(call_symbol_check(cxx, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,13.0', '-Wl,11.4']),
(1, f'{executable}: failed SDK'))
def test_PE(self):
diff --git a/contrib/devtools/test_deterministic_coverage.sh b/contrib/devtools/test_deterministic_coverage.sh
index 23c260b529..885396bb25 100755
--- a/contrib/devtools/test_deterministic_coverage.sh
+++ b/contrib/devtools/test_deterministic_coverage.sh
@@ -81,7 +81,7 @@ if ! command -v gcovr > /dev/null; then
fi
if [[ ! -e ${TEST_BITCOIN_BINARY} ]]; then
- echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"./configure --enable-lcov\" and compile."
+ echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and compile."
exit 1
fi
@@ -90,7 +90,7 @@ get_file_suffix_count() {
}
if [[ $(get_file_suffix_count gcno) == 0 ]]; then
- echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"./configure --enable-lcov\" and re-compile."
+ echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and re-compile."
exit 1
fi
@@ -115,7 +115,7 @@ while [[ ${TEST_RUN_ID} -lt ${N_TEST_RUNS} ]]; do
fi
rm "${TEST_OUTPUT_TEMPFILE}"
if [[ $(get_file_suffix_count gcda) == 0 ]]; then
- echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"./configure --enable-lcov\" and re-compile."
+ echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"cmake -B build -DCMAKE_BUILD_TYPE=Coverage\" and re-compile."
exit 1
fi
GCOVR_TEMPFILE=$(mktemp)
diff --git a/contrib/devtools/test_utxo_snapshots.sh b/contrib/devtools/test_utxo_snapshots.sh
deleted file mode 100755
index ad948d4a14..0000000000
--- a/contrib/devtools/test_utxo_snapshots.sh
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/env bash
-# Demonstrate the creation and usage of UTXO snapshots.
-#
-# A server node starts up, IBDs up to a certain height, then generates a UTXO
-# snapshot at that point.
-#
-# The server then downloads more blocks (to create a diff from the snapshot).
-#
-# We bring a client up, load the UTXO snapshot, and we show the client sync to
-# the "network tip" and then start a background validation of the snapshot it
-# loaded. We see the background validation chainstate removed after validation
-# completes.
-#
-# The shellcheck rule SC2086 (quoted variables) disablements are necessary
-# since this rule needs to be violated in order to get bitcoind to pick up on
-# $EARLY_IBD_FLAGS for the script to work.
-
-export LC_ALL=C
-set -e
-
-BASE_HEIGHT=${1:-30000}
-INCREMENTAL_HEIGHT=20000
-FINAL_HEIGHT=$((BASE_HEIGHT + INCREMENTAL_HEIGHT))
-
-SERVER_DATADIR="$(pwd)/utxodemo-data-server-$BASE_HEIGHT"
-CLIENT_DATADIR="$(pwd)/utxodemo-data-client-$BASE_HEIGHT"
-UTXO_DAT_FILE="$(pwd)/utxo.$BASE_HEIGHT.dat"
-
-# Chosen to try to not interfere with any running bitcoind processes.
-SERVER_PORT=8633
-SERVER_RPC_PORT=8632
-
-CLIENT_PORT=8733
-CLIENT_RPC_PORT=8732
-
-SERVER_PORTS="-port=${SERVER_PORT} -rpcport=${SERVER_RPC_PORT}"
-CLIENT_PORTS="-port=${CLIENT_PORT} -rpcport=${CLIENT_RPC_PORT}"
-
-# Ensure the client exercises all indexes to test that snapshot use works
-# properly with indexes.
-ALL_INDEXES="-txindex -coinstatsindex -blockfilterindex=1"
-
-if ! command -v jq >/dev/null ; then
- echo "This script requires jq to parse JSON RPC output. Please install it."
- echo "(e.g. sudo apt install jq)"
- exit 1
-fi
-
-DUMP_OUTPUT="dumptxoutset-output-$BASE_HEIGHT.json"
-
-finish() {
- echo
- echo "Killing server and client PIDs ($SERVER_PID, $CLIENT_PID) and cleaning up datadirs"
- echo
- rm -f "$UTXO_DAT_FILE" "$DUMP_OUTPUT"
- rm -rf "$SERVER_DATADIR" "$CLIENT_DATADIR"
- kill -9 "$SERVER_PID" "$CLIENT_PID"
-}
-
-trap finish EXIT
-
-# Need to specify these to trick client into accepting server as a peer
-# it can IBD from, otherwise the default values prevent IBD from the server node.
-EARLY_IBD_FLAGS="-maxtipage=9223372036854775207 -minimumchainwork=0x00"
-
-server_rpc() {
- ./src/bitcoin-cli -rpcport=$SERVER_RPC_PORT -datadir="$SERVER_DATADIR" "$@"
-}
-client_rpc() {
- ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir="$CLIENT_DATADIR" "$@"
-}
-server_sleep_til_boot() {
- while ! server_rpc ping >/dev/null 2>&1; do sleep 0.1; done
-}
-client_sleep_til_boot() {
- while ! client_rpc ping >/dev/null 2>&1; do sleep 0.1; done
-}
-server_sleep_til_shutdown() {
- while server_rpc ping >/dev/null 2>&1; do sleep 0.1; done
-}
-
-mkdir -p "$SERVER_DATADIR" "$CLIENT_DATADIR"
-
-echo "Hi, welcome to the assumeutxo demo/test"
-echo
-echo "We're going to"
-echo
-echo " - start up a 'server' node, sync it via mainnet IBD to height ${BASE_HEIGHT}"
-echo " - create a UTXO snapshot at that height"
-echo " - IBD ${INCREMENTAL_HEIGHT} more blocks on top of that"
-echo
-echo "then we'll demonstrate assumeutxo by "
-echo
-echo " - starting another node (the 'client') and loading the snapshot in"
-echo " * first you'll have to modify the code slightly (chainparams) and recompile"
-echo " * don't worry, we'll make it easy"
-echo " - observing the client sync ${INCREMENTAL_HEIGHT} blocks on top of the snapshot from the server"
-echo " - observing the client validate the snapshot chain via background IBD"
-echo
-read -p "Press [enter] to continue" _
-
-echo
-echo "-- Starting the demo. You might want to run the two following commands in"
-echo " separate terminal windows:"
-echo
-echo " watch -n0.1 tail -n 30 $SERVER_DATADIR/debug.log"
-echo " watch -n0.1 tail -n 30 $CLIENT_DATADIR/debug.log"
-echo
-read -p "Press [enter] to continue" _
-
-echo
-echo "-- IBDing the blocks (height=$BASE_HEIGHT) required to the server node..."
-# shellcheck disable=SC2086
-./src/bitcoind -logthreadnames=1 $SERVER_PORTS \
- -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -stopatheight="$BASE_HEIGHT" >/dev/null
-
-echo
-echo "-- Creating snapshot at ~ height $BASE_HEIGHT ($UTXO_DAT_FILE)..."
-server_sleep_til_shutdown # wait for stopatheight to be hit
-# shellcheck disable=SC2086
-./src/bitcoind -logthreadnames=1 $SERVER_PORTS \
- -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -connect=0 -listen=0 >/dev/null &
-SERVER_PID="$!"
-
-server_sleep_til_boot
-server_rpc dumptxoutset "$UTXO_DAT_FILE" > "$DUMP_OUTPUT"
-cat "$DUMP_OUTPUT"
-kill -9 "$SERVER_PID"
-
-RPC_BASE_HEIGHT=$(jq -r .base_height < "$DUMP_OUTPUT")
-RPC_AU=$(jq -r .txoutset_hash < "$DUMP_OUTPUT")
-RPC_NCHAINTX=$(jq -r .nchaintx < "$DUMP_OUTPUT")
-RPC_BLOCKHASH=$(jq -r .base_hash < "$DUMP_OUTPUT")
-
-server_sleep_til_shutdown
-
-echo
-echo "-- Now: add the following to CMainParams::m_assumeutxo_data"
-echo " in src/kernel/chainparams.cpp, and recompile:"
-echo
-echo " {.height = ${RPC_BASE_HEIGHT}, .hash_serialized = AssumeutxoHash{uint256{\"${RPC_AU}\"}}, .m_chain_tx_count = ${RPC_NCHAINTX}, .blockhash = consteval_ctor(uint256{\"${RPC_BLOCKHASH}\"})},"
-echo
-echo
-echo "-- IBDing more blocks to the server node (height=$FINAL_HEIGHT) so there is a diff between snapshot and tip..."
-# shellcheck disable=SC2086
-./src/bitcoind $SERVER_PORTS -logthreadnames=1 -datadir="$SERVER_DATADIR" \
- $EARLY_IBD_FLAGS -stopatheight="$FINAL_HEIGHT" >/dev/null
-
-echo
-echo "-- Starting the server node to provide blocks to the client node..."
-# shellcheck disable=SC2086
-./src/bitcoind $SERVER_PORTS -logthreadnames=1 -debug=net -datadir="$SERVER_DATADIR" \
- $EARLY_IBD_FLAGS -connect=0 -listen=1 >/dev/null &
-SERVER_PID="$!"
-server_sleep_til_boot
-
-echo
-echo "-- Okay, what you're about to see is the client starting up and activating the snapshot."
-echo " I'm going to display the top 14 log lines from the client on top of an RPC called"
-echo " getchainstates, which is like getblockchaininfo but for both the snapshot and "
-echo " background validation chainstates."
-echo
-echo " You're going to first see the snapshot chainstate sync to the server's tip, then"
-echo " the background IBD chain kicks in to validate up to the base of the snapshot."
-echo
-echo " Once validation of the snapshot is done, you should see log lines indicating"
-echo " that we've deleted the background validation chainstate."
-echo
-echo " Once everything completes, exit the watch command with CTRL+C."
-echo
-read -p "When you're ready for all this, hit [enter]" _
-
-echo
-echo "-- Starting the client node to get headers from the server, then load the snapshot..."
-# shellcheck disable=SC2086
-./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" \
- -connect=0 -addnode=127.0.0.1:$SERVER_PORT -debug=net $EARLY_IBD_FLAGS >/dev/null &
-CLIENT_PID="$!"
-client_sleep_til_boot
-
-echo
-echo "-- Initial state of the client:"
-client_rpc getchainstates
-
-echo
-echo "-- Loading UTXO snapshot into client. Calling RPC in a loop..."
-while ! client_rpc loadtxoutset "$UTXO_DAT_FILE" ; do sleep 10; done
-
-watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat"
-
-echo
-echo "-- Okay, now I'm going to restart the client to make sure that the snapshot chain reloads "
-echo " as the main chain properly..."
-echo
-echo " Press CTRL+C after you're satisfied to exit the demo"
-echo
-read -p "Press [enter] to continue"
-
-client_sleep_til_boot
-# shellcheck disable=SC2086
-./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" -connect=0 \
- -addnode=127.0.0.1:$SERVER_PORT "$EARLY_IBD_FLAGS" >/dev/null &
-CLIENT_PID="$!"
-client_sleep_til_boot
-
-watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat"
-
-echo
-echo "-- Done!"
diff --git a/contrib/devtools/utxo_snapshot.sh b/contrib/devtools/utxo_snapshot.sh
deleted file mode 100755
index e8781d94d9..0000000000
--- a/contrib/devtools/utxo_snapshot.sh
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2019-2023 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-export LC_ALL=C
-
-set -ueo pipefail
-
-NETWORK_DISABLED=false
-
-if (( $# < 3 )); then
- echo 'Usage: utxo_snapshot.sh <generate-at-height> <snapshot-out-path> <bitcoin-cli-call ...>'
- echo
- echo " if <snapshot-out-path> is '-', don't produce a snapshot file but instead print the "
- echo " expected assumeutxo hash"
- echo
- echo 'Examples:'
- echo
- echo " ./contrib/devtools/utxo_snapshot.sh 570000 utxo.dat ./src/bitcoin-cli -datadir=\$(pwd)/testdata"
- echo ' ./contrib/devtools/utxo_snapshot.sh 570000 - ./src/bitcoin-cli'
- exit 1
-fi
-
-GENERATE_AT_HEIGHT="${1}"; shift;
-OUTPUT_PATH="${1}"; shift;
-# Most of the calls we make take a while to run, so pad with a lengthy timeout.
-BITCOIN_CLI_CALL="${*} -rpcclienttimeout=9999999"
-
-# Check if the node is pruned and get the pruned block height
-PRUNED=$( ${BITCOIN_CLI_CALL} getblockchaininfo | awk '/pruneheight/ {print $2}' | tr -d ',' )
-
-if (( GENERATE_AT_HEIGHT < PRUNED )); then
- echo "Error: The requested snapshot height (${GENERATE_AT_HEIGHT}) should be greater than the pruned block height (${PRUNED})."
- exit 1
-fi
-
-# Check current block height to ensure the node has synchronized past the required block
-CURRENT_BLOCK_HEIGHT=$(${BITCOIN_CLI_CALL} getblockcount)
-PIVOT_BLOCK_HEIGHT=$(( GENERATE_AT_HEIGHT + 1 ))
-
-if (( PIVOT_BLOCK_HEIGHT > CURRENT_BLOCK_HEIGHT )); then
- (>&2 echo "Error: The node has not yet synchronized to block height ${PIVOT_BLOCK_HEIGHT}.")
- (>&2 echo "Please wait until the node has synchronized past this block height and try again.")
- exit 1
-fi
-
-# Early exit if file at OUTPUT_PATH already exists
-if [[ -e "$OUTPUT_PATH" ]]; then
- (>&2 echo "Error: $OUTPUT_PATH already exists or is not a valid path.")
- exit 1
-fi
-
-# Validate that the path is correct
-if [[ "${OUTPUT_PATH}" != "-" && ! -d "$(dirname "${OUTPUT_PATH}")" ]]; then
- (>&2 echo "Error: The directory $(dirname "${OUTPUT_PATH}") does not exist.")
- exit 1
-fi
-
-function cleanup {
- (>&2 echo "Restoring chain to original height; this may take a while")
- ${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}"
-
- if $NETWORK_DISABLED; then
- (>&2 echo "Restoring network activity")
- ${BITCOIN_CLI_CALL} setnetworkactive true
- fi
-}
-
-function early_exit {
- (>&2 echo "Exiting due to Ctrl-C")
- cleanup
- exit 1
-}
-
-# Prompt the user to disable network activity
-read -p "Do you want to disable network activity (setnetworkactive false) before running invalidateblock? (Y/n): " -r
-if [[ "$REPLY" =~ ^[Yy]*$ || -z "$REPLY" ]]; then
- # User input is "Y", "y", or Enter key, proceed with the action
- NETWORK_DISABLED=true
- (>&2 echo "Disabling network activity")
- ${BITCOIN_CLI_CALL} setnetworkactive false
-else
- (>&2 echo "Network activity remains enabled")
-fi
-
-# Block we'll invalidate/reconsider to rewind/fast-forward the chain.
-PIVOT_BLOCKHASH=$($BITCOIN_CLI_CALL getblockhash $(( GENERATE_AT_HEIGHT + 1 )) )
-
-# Trap for normal exit and Ctrl-C
-trap cleanup EXIT
-trap early_exit INT
-
-(>&2 echo "Rewinding chain back to height ${GENERATE_AT_HEIGHT} (by invalidating ${PIVOT_BLOCKHASH}); this may take a while")
-${BITCOIN_CLI_CALL} invalidateblock "${PIVOT_BLOCKHASH}"
-
-if [[ "${OUTPUT_PATH}" = "-" ]]; then
- (>&2 echo "Generating txoutset info...")
- ${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_3 | sed 's/^.*: "\(.\+\)\+",/\1/g'
-else
- (>&2 echo "Generating UTXO snapshot...")
- ${BITCOIN_CLI_CALL} dumptxoutset "${OUTPUT_PATH}"
-fi
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 1ffc22a76b..3184cd4afe 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -230,8 +230,6 @@ case "$HOST" in
*mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;;
esac
-# Make $HOST-specific native binaries from depends available in $PATH
-export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
mkdir -p "$DISTSRC"
(
cd "$DISTSRC"
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 5f62765a65..3da98cf651 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -434,6 +434,7 @@ inspecting signatures in Mach-O binaries.")
"--enable-default-ssp=yes",
"--enable-default-pie=yes",
"--enable-standard-branch-protection=yes",
+ "--enable-cet=yes",
building-on)))
((#:phases phases)
`(modify-phases ,phases
diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md
index fe469aee9e..10945e5b68 100644
--- a/contrib/seeds/README.md
+++ b/contrib/seeds/README.md
@@ -9,7 +9,7 @@ changes its default return value, as those are the services which seeds are adde
to addrman with).
The seeds compiled into the release are created from sipa's, achow101's and luke-jr's
-DNS seed, virtu's crawler, and fjahr's community AS map data. Run the following commands
+DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands
from the `/contrib/seeds` directory:
```
@@ -18,7 +18,7 @@ curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt
curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt
curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt
curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt
-curl https://raw.githubusercontent.com/fjahr/asmap-data/main/latest_asmap.dat > asmap-filled.dat
+curl https://raw.githubusercontent.com/asmap/asmap-data/main/latest_asmap.dat > asmap-filled.dat
python3 makeseeds.py -a asmap-filled.dat -s seeds_main.txt > nodes_main.txt
python3 makeseeds.py -a asmap-filled.dat -s seeds_test.txt > nodes_test.txt
# TODO: Uncomment when a seeder publishes seeds.txt.gz for testnet4
diff --git a/contrib/signet/README.md b/contrib/signet/README.md
index 706b296c54..5fcd8944e6 100644
--- a/contrib/signet/README.md
+++ b/contrib/signet/README.md
@@ -23,9 +23,8 @@ miner
You will first need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target appropriate for your hardware, eg:
- cd src/
- MINER="../contrib/signet/miner"
- GRIND="./bitcoin-util grind"
+ MINER="./contrib/signet/miner"
+ GRIND="./build/src/bitcoin-util grind"
$MINER calibrate --grind-cmd="$GRIND"
nbits=1e00f403 for 25s average mining time
@@ -33,7 +32,7 @@ It defaults to estimating an nbits value resulting in 25s average time to find a
To mine the first block in your custom chain, you can run:
- CLI="./bitcoin-cli -conf=mysignet.conf"
+ CLI="./build/src/bitcoin-cli -conf=mysignet.conf"
ADDR=$($CLI -signet getnewaddress)
NBITS=1e00f403
$MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS
diff --git a/contrib/signet/miner b/contrib/signet/miner
index 4216ada5fa..3c90fe96a1 100755
--- a/contrib/signet/miner
+++ b/contrib/signet/miner
@@ -21,7 +21,7 @@ sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL)
from test_framework.blocktools import get_witness_script, script_BIP34_coinbase_height # noqa: E402
from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_binary, from_hex, ser_string, ser_uint256, tx_from_hex # noqa: E402
from test_framework.psbt import PSBT, PSBTMap, PSBT_GLOBAL_UNSIGNED_TX, PSBT_IN_FINAL_SCRIPTSIG, PSBT_IN_FINAL_SCRIPTWITNESS, PSBT_IN_NON_WITNESS_UTXO, PSBT_IN_SIGHASH_TYPE # noqa: E402
-from test_framework.script import CScriptOp # noqa: E402
+from test_framework.script import CScript, CScriptOp # noqa: E402
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
@@ -32,12 +32,6 @@ SIGNET_HEADER = b"\xec\xc7\xda\xa2"
PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed
RE_MULTIMINER = re.compile(r"^(\d+)(-(\d+))?/(\d+)$")
-def create_coinbase(height, value, spk):
- cb = CTransaction()
- cb.vin = [CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)]
- cb.vout = [CTxOut(value, spk)]
- return cb
-
def signet_txs(block, challenge):
# assumes signet solution has not been added yet so does not need
# to be removed
@@ -72,18 +66,7 @@ def signet_txs(block, challenge):
return spend, to_spend
-def do_createpsbt(block, signme, spendme):
- psbt = PSBT()
- psbt.g = PSBTMap( {PSBT_GLOBAL_UNSIGNED_TX: signme.serialize(),
- PSBT_SIGNET_BLOCK: block.serialize()
- } )
- psbt.i = [ PSBTMap( {PSBT_IN_NON_WITNESS_UTXO: spendme.serialize(),
- PSBT_IN_SIGHASH_TYPE: bytes([1,0,0,0])})
- ]
- psbt.o = [ PSBTMap() ]
- return psbt.to_base64()
-
-def do_decode_psbt(b64psbt):
+def decode_psbt(b64psbt):
psbt = PSBT.from_base64(b64psbt)
assert len(psbt.tx.vin) == 1
@@ -110,11 +93,17 @@ def finish_block(block, signet_solution, grind_cmd):
block.rehash()
return block
-def generate_psbt(tmpl, reward_spk, *, blocktime=None):
+def generate_psbt(tmpl, reward_spk, *, blocktime=None, poolid=None):
signet_spk = tmpl["signet_challenge"]
signet_spk_bin = bytes.fromhex(signet_spk)
- cbtx = create_coinbase(height=tmpl["height"], value=tmpl["coinbasevalue"], spk=reward_spk)
+ scriptSig = script_BIP34_coinbase_height(tmpl["height"])
+ if poolid is not None:
+ scriptSig = CScript(b"" + scriptSig + CScriptOp.encode_op_pushdata(poolid))
+
+ cbtx = CTransaction()
+ cbtx.vin = [CTxIn(COutPoint(0, 0xffffffff), scriptSig, 0xffffffff)]
+ cbtx.vout = [CTxOut(tmpl["coinbasevalue"], reward_spk)]
cbtx.vin[0].nSequence = 2**32-2
cbtx.rehash()
@@ -137,29 +126,23 @@ def generate_psbt(tmpl, reward_spk, *, blocktime=None):
signme, spendme = signet_txs(block, signet_spk_bin)
- return do_createpsbt(block, signme, spendme)
-
-def get_reward_address(args, height):
- if args.address is not None:
- return args.address
-
- if '*' not in args.descriptor:
- addr = json.loads(args.bcli("deriveaddresses", args.descriptor))[0]
- args.address = addr
- return addr
-
- remove = [k for k in args.derived_addresses.keys() if k+20 <= height]
- for k in remove:
- del args.derived_addresses[k]
-
- addr = args.derived_addresses.get(height, None)
- if addr is None:
- addrs = json.loads(args.bcli("deriveaddresses", args.descriptor, "[%d,%d]" % (height, height+20)))
- addr = addrs[0]
- for k, a in enumerate(addrs):
- args.derived_addresses[height+k] = a
+ psbt = PSBT()
+ psbt.g = PSBTMap( {PSBT_GLOBAL_UNSIGNED_TX: signme.serialize(),
+ PSBT_SIGNET_BLOCK: block.serialize()
+ } )
+ psbt.i = [ PSBTMap( {PSBT_IN_NON_WITNESS_UTXO: spendme.serialize(),
+ PSBT_IN_SIGHASH_TYPE: bytes([1,0,0,0])})
+ ]
+ psbt.o = [ PSBTMap() ]
+ return psbt.to_base64()
- return addr
+def get_poolid(args):
+ if args.poolid is not None:
+ return args.poolid.encode('utf8')
+ elif args.poolnum is not None:
+ return b"/signet:%d/" % (args.poolnum)
+ else:
+ return None
def get_reward_addr_spk(args, height):
assert args.address is not None or args.descriptor is not None
@@ -167,7 +150,20 @@ def get_reward_addr_spk(args, height):
if hasattr(args, "reward_spk"):
return args.address, args.reward_spk
- reward_addr = get_reward_address(args, height)
+ if args.address is not None:
+ reward_addr = args.address
+ elif '*' not in args.descriptor:
+ reward_addr = args.address = json.loads(args.bcli("deriveaddresses", args.descriptor))[0]
+ else:
+ remove = [k for k in args.derived_addresses.keys() if k+20 <= height]
+ for k in remove:
+ del args.derived_addresses[k]
+ if height not in args.derived_addresses:
+ addrs = json.loads(args.bcli("deriveaddresses", args.descriptor, "[%d,%d]" % (height, height+20)))
+ for k, a in enumerate(addrs):
+ args.derived_addresses[height+k] = a
+ reward_addr = args.derived_addresses[height]
+
reward_spk = bytes.fromhex(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"])
if args.address is not None:
# will always be the same, so cache
@@ -176,13 +172,14 @@ def get_reward_addr_spk(args, height):
return reward_addr, reward_spk
def do_genpsbt(args):
+ poolid = get_poolid(args)
tmpl = json.load(sys.stdin)
_, reward_spk = get_reward_addr_spk(args, tmpl["height"])
- psbt = generate_psbt(tmpl, reward_spk)
+ psbt = generate_psbt(tmpl, reward_spk, poolid=poolid)
print(psbt)
def do_solvepsbt(args):
- block, signet_solution = do_decode_psbt(sys.stdin.read())
+ block, signet_solution = decode_psbt(sys.stdin.read())
block = finish_block(block, signet_solution, args.grind_cmd)
print(block.serialize().hex())
@@ -225,44 +222,122 @@ def seconds_to_hms(s):
out = "-" + out
return out
-def next_block_delta(last_nbits, last_hash, ultimate_target, do_poisson, max_interval):
- # strategy:
- # 1) work out how far off our desired target we are
- # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period
- # 3) use that to work out the desired average interval in this retarget period
- # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by
- # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes
-
+class Generate:
INTERVAL = 600.0*2016/2015 # 10 minutes, adjusted for the off-by-one bug
- current_target = nbits_to_target(last_nbits)
- retarget_factor = ultimate_target / current_target
- retarget_factor = max(0.25, min(retarget_factor, 4.0))
- avg_interval = INTERVAL * retarget_factor
+ def __init__(self, multiminer=None, ultimate_target=None, poisson=False, max_interval=1800,
+ standby_delay=0, backup_delay=0, set_block_time=None,
+ poolid=None):
+ if multiminer is None:
+ multiminer = (0, 1, 1)
+ (self.multi_low, self.multi_high, self.multi_period) = multiminer
+ self.ultimate_target = ultimate_target
+ self.poisson = poisson
+ self.max_interval = max_interval
+ self.standby_delay = standby_delay
+ self.backup_delay = backup_delay
+ self.set_block_time = set_block_time
+ self.poolid = poolid
+
+ def next_block_delta(self, last_nbits, last_hash):
+ # strategy:
+ # 1) work out how far off our desired target we are
+ # 2) cap it to a factor of 4 since that's the best we can do in a single retarget period
+ # 3) use that to work out the desired average interval in this retarget period
+ # 4) if doing poisson, use the last hash to pick a uniformly random number in [0,1), and work out a random multiplier to vary the average by
+ # 5) cap the resulting interval between 1 second and 1 hour to avoid extremes
+
+ current_target = nbits_to_target(last_nbits)
+ retarget_factor = self.ultimate_target / current_target
+ retarget_factor = max(0.25, min(retarget_factor, 4.0))
+
+ avg_interval = self.INTERVAL * retarget_factor
+
+ if self.poisson:
+ det_rand = int(last_hash[-8:], 16) * 2**-32
+ this_interval_variance = -math.log1p(-det_rand)
+ else:
+ this_interval_variance = 1
- if do_poisson:
- det_rand = int(last_hash[-8:], 16) * 2**-32
- this_interval_variance = -math.log1p(-det_rand)
- else:
- this_interval_variance = 1
+ this_interval = avg_interval * this_interval_variance
+ this_interval = max(1, min(this_interval, self.max_interval))
+
+ return this_interval
+
+ def next_block_is_mine(self, last_hash):
+ det_rand = int(last_hash[-16:-8], 16)
+ return self.multi_low <= (det_rand % self.multi_period) < self.multi_high
+
+ def next_block_time(self, now, bestheader, is_first_block):
+ if self.set_block_time is not None:
+ logging.debug("Setting start time to %d", self.set_block_time)
+ self.mine_time = self.set_block_time
+ self.action_time = now
+ self.is_mine = True
+ elif bestheader["height"] == 0:
+ time_delta = self.INTERVAL * 100 # plenty of time to mine 100 blocks
+ logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60))
+ self.mine_time = now - time_delta
+ self.action_time = now
+ self.is_mine = True
+ else:
+ time_delta = self.next_block_delta(int(bestheader["bits"], 16), bestheader["hash"])
+ self.mine_time = bestheader["time"] + time_delta
+
+ self.is_mine = self.next_block_is_mine(bestheader["hash"])
+
+ self.action_time = self.mine_time
+ if not self.is_mine:
+ self.action_time += self.backup_delay
- this_interval = avg_interval * this_interval_variance
- this_interval = max(1, min(this_interval, max_interval))
+ if self.standby_delay > 0:
+ self.action_time += self.standby_delay
+ elif is_first_block:
+ # for non-standby, always mine immediately on startup,
+ # even if the next block shouldn't be ours
+ self.action_time = now
- return this_interval
+ # don't want fractional times so round down
+ self.mine_time = int(self.mine_time)
+ self.action_time = int(self.action_time)
-def next_block_is_mine(last_hash, my_blocks):
- det_rand = int(last_hash[-16:-8], 16)
- return my_blocks[0] <= (det_rand % my_blocks[2]) < my_blocks[1]
+ # can't mine a block 2h in the future; 1h55m for some safety
+ self.action_time = max(self.action_time, self.mine_time - 6900)
+
+ def gbt(self, bcli, bestblockhash, now):
+ tmpl = json.loads(bcli("getblocktemplate", '{"rules":["signet","segwit"]}'))
+ if tmpl["previousblockhash"] != bestblockhash:
+ logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"])
+ time.sleep(1)
+ return None
+
+ if tmpl["mintime"] > self.mine_time:
+ logging.info("Updating block time from %d to %d", self.mine_time, tmpl["mintime"])
+ self.mine_time = tmpl["mintime"]
+ if self.mine_time > now:
+ logging.error("GBT mintime is in the future: %d is %d seconds later than %d", self.mine_time, (self.mine_time-now), now)
+ return None
+
+ return tmpl
+
+ def mine(self, bcli, grind_cmd, tmpl, reward_spk):
+ psbt = generate_psbt(tmpl, reward_spk, blocktime=self.mine_time, poolid=self.poolid)
+ input_stream = os.linesep.join([psbt, "true", "ALL"]).encode('utf8')
+ psbt_signed = json.loads(bcli("-stdin", "walletprocesspsbt", input=input_stream))
+ if not psbt_signed.get("complete",False):
+ logging.debug("Generated PSBT: %s" % (psbt,))
+ sys.stderr.write("PSBT signing failed\n")
+ return None
+ block, signet_solution = decode_psbt(psbt_signed["psbt"])
+ return finish_block(block, signet_solution, grind_cmd)
def do_generate(args):
- if args.max_blocks is not None:
- if args.ongoing:
- logging.error("Cannot specify both --ongoing and --max-blocks")
- return 1
+ if args.set_block_time is not None:
+ max_blocks = 1
+ elif args.max_blocks is not None:
if args.max_blocks < 1:
- logging.error("N must be a positive integer")
+ logging.error("--max_blocks must specify a positive integer")
return 1
max_blocks = args.max_blocks
elif args.ongoing:
@@ -270,17 +345,11 @@ def do_generate(args):
else:
max_blocks = 1
- if args.set_block_time is not None and max_blocks != 1:
- logging.error("Cannot specify --ongoing or --max-blocks > 1 when using --set-block-time")
- return 1
if args.set_block_time is not None and args.set_block_time < 0:
args.set_block_time = time.time()
logging.info("Treating negative block time as current time (%d)" % (args.set_block_time))
if args.min_nbits:
- if args.nbits is not None:
- logging.error("Cannot specify --nbits and --min-nbits")
- return 1
args.nbits = "1e0377ae"
logging.info("Using nbits=%s" % (args.nbits))
@@ -312,8 +381,13 @@ def do_generate(args):
logging.error("--max-interval must be at least 960 (16 minutes)")
return 1
+ poolid = get_poolid(args)
+
ultimate_target = nbits_to_target(int(args.nbits,16))
+ gen = Generate(multiminer=my_blocks, ultimate_target=ultimate_target, poisson=args.poisson, max_interval=args.max_interval,
+ standby_delay=args.standby_delay, backup_delay=args.backup_delay, set_block_time=args.set_block_time, poolid=poolid)
+
mined_blocks = 0
bestheader = {"hash": None}
lastheader = None
@@ -328,104 +402,55 @@ def do_generate(args):
if lastheader is None:
lastheader = bestheader["hash"]
elif bestheader["hash"] != lastheader:
- next_delta = next_block_delta(int(bestheader["bits"], 16), bestheader["hash"], ultimate_target, args.poisson, args.max_interval)
+ next_delta = gen.next_block_delta(int(bestheader["bits"], 16), bestheader["hash"])
next_delta += bestheader["time"] - time.time()
- next_is_mine = next_block_is_mine(bestheader["hash"], my_blocks)
+ next_is_mine = gen.next_block_is_mine(bestheader["hash"])
logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
lastheader = bestheader["hash"]
# when is the next block due to be mined?
now = time.time()
- if args.set_block_time is not None:
- logging.debug("Setting start time to %d", args.set_block_time)
- mine_time = args.set_block_time
- action_time = now
- is_mine = True
- elif bestheader["height"] == 0:
- time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson, args.max_interval)
- time_delta *= 100 # 100 blocks
- logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60))
- mine_time = now - time_delta
- action_time = now
- is_mine = True
- else:
- time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson, args.max_interval)
- mine_time = bestheader["time"] + time_delta
-
- is_mine = next_block_is_mine(bci["bestblockhash"], my_blocks)
-
- action_time = mine_time
- if not is_mine:
- action_time += args.backup_delay
-
- if args.standby_delay > 0:
- action_time += args.standby_delay
- elif mined_blocks == 0:
- # for non-standby, always mine immediately on startup,
- # even if the next block shouldn't be ours
- action_time = now
-
- # don't want fractional times so round down
- mine_time = int(mine_time)
- action_time = int(action_time)
-
- # can't mine a block 2h in the future; 1h55m for some safety
- action_time = max(action_time, mine_time - 6900)
+ gen.next_block_time(now, bestheader, (mined_blocks == 0))
# ready to go? otherwise sleep and check for new block
- if now < action_time:
- sleep_for = min(action_time - now, 60)
- if mine_time < now:
+ if now < gen.action_time:
+ sleep_for = min(gen.action_time - now, 60)
+ if gen.mine_time < now:
# someone else might have mined the block,
# so check frequently, so we don't end up late
# mining the next block if it's ours
sleep_for = min(20, sleep_for)
- minestr = "mine" if is_mine else "backup"
- logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(mine_time - now), minestr))
+ minestr = "mine" if gen.is_mine else "backup"
+ logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(gen.mine_time - now), minestr))
time.sleep(sleep_for)
continue
# gbt
- tmpl = json.loads(args.bcli("getblocktemplate", '{"rules":["signet","segwit"]}'))
- if tmpl["previousblockhash"] != bci["bestblockhash"]:
- logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"])
- time.sleep(1)
+ tmpl = gen.gbt(args.bcli, bci["bestblockhash"], now)
+ if tmpl is None:
continue
logging.debug("GBT template: %s", tmpl)
- if tmpl["mintime"] > mine_time:
- logging.info("Updating block time from %d to %d", mine_time, tmpl["mintime"])
- mine_time = tmpl["mintime"]
- if mine_time > now:
- logging.error("GBT mintime is in the future: %d is %d seconds later than %d", mine_time, (mine_time-now), now)
- return 1
-
# address for reward
reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"])
# mine block
- logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(mine_time-bestheader["time"]), mine_time, is_mine)
+ logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(gen.mine_time-bestheader["time"]), gen.mine_time, gen.is_mine)
mined_blocks += 1
- psbt = generate_psbt(tmpl, reward_spk, blocktime=mine_time)
- input_stream = os.linesep.join([psbt, "true", "ALL"]).encode('utf8')
- psbt_signed = json.loads(args.bcli("-stdin", "walletprocesspsbt", input=input_stream))
- if not psbt_signed.get("complete",False):
- logging.debug("Generated PSBT: %s" % (psbt,))
- sys.stderr.write("PSBT signing failed\n")
+ block = gen.mine(args.bcli, args.grind_cmd, tmpl, reward_spk)
+ if block is None:
return 1
- block, signet_solution = do_decode_psbt(psbt_signed["psbt"])
- block = finish_block(block, signet_solution, args.grind_cmd)
# submit block
r = args.bcli("-stdin", "submitblock", input=block.serialize().hex().encode('utf8'))
# report
- bstr = "block" if is_mine else "backup block"
+ bstr = "block" if gen.is_mine else "backup block"
- next_delta = next_block_delta(block.nBits, block.hash, ultimate_target, args.poisson, args.max_interval)
+ next_delta = gen.next_block_delta(block.nBits, block.hash)
next_delta += block.nTime - time.time()
- next_is_mine = next_block_is_mine(block.hash, my_blocks)
+ next_is_mine = gen.next_block_is_mine(block.hash)
logging.debug("Block hash %s payout to %s", block.hash, reward_addr)
logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
@@ -492,11 +517,13 @@ def main():
generate = cmds.add_parser("generate", help="Mine blocks")
generate.set_defaults(fn=do_generate)
- generate.add_argument("--ongoing", action="store_true", help="Keep mining blocks")
- generate.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)")
- generate.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp)")
- generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)")
- generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)")
+ howmany = generate.add_mutually_exclusive_group()
+ howmany.add_argument("--ongoing", action="store_true", help="Keep mining blocks")
+ howmany.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)")
+ howmany.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp); implies --max-blocks=1")
+ nbit_target = generate.add_mutually_exclusive_group()
+ nbit_target.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)")
+ nbit_target.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)")
generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times")
generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)")
generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)")
@@ -505,12 +532,17 @@ def main():
calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty")
calibrate.set_defaults(fn=do_calibrate)
- calibrate.add_argument("--nbits", type=str, default=None)
- calibrate.add_argument("--seconds", type=int, default=None)
+ calibrate_by = calibrate.add_mutually_exclusive_group()
+ calibrate_by.add_argument("--nbits", type=str, default=None)
+ calibrate_by.add_argument("--seconds", type=int, default=None)
for sp in [genpsbt, generate]:
- sp.add_argument("--address", default=None, type=str, help="Address for block reward payment")
- sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment")
+ payto = sp.add_mutually_exclusive_group(required=True)
+ payto.add_argument("--address", default=None, type=str, help="Address for block reward payment")
+ payto.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment")
+ pool = sp.add_mutually_exclusive_group()
+ pool.add_argument("--poolnum", default=None, type=int, help="Identify blocks that you mine")
+ pool.add_argument("--poolid", default=None, type=str, help="Identify blocks that you mine (eg: /signet:1/)")
for sp in [solvepsbt, generate, calibrate]:
sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work")
@@ -520,12 +552,6 @@ def main():
args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs)
if hasattr(args, "address") and hasattr(args, "descriptor"):
- if args.address is None and args.descriptor is None:
- sys.stderr.write("Must specify --address or --descriptor\n")
- return 1
- elif args.address is not None and args.descriptor is not None:
- sys.stderr.write("Only specify one of --address or --descriptor\n")
- return 1
args.derived_addresses = {}
if args.debug:
diff --git a/depends/Makefile b/depends/Makefile
index ad1fb2b049..f1dc300b7a 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -42,7 +42,6 @@ NO_WALLET ?=
NO_ZMQ ?=
NO_UPNP ?=
NO_USDT ?=
-NO_NATPMP ?=
MULTIPROCESS ?=
LTO ?=
NO_HARDEN ?=
@@ -159,13 +158,12 @@ sqlite_packages_$(NO_SQLITE) = $(sqlite_packages)
wallet_packages_$(NO_WALLET) = $(bdb_packages_) $(sqlite_packages_)
upnp_packages_$(NO_UPNP) = $(upnp_packages)
-natpmp_packages_$(NO_NATPMP) = $(natpmp_packages)
zmq_packages_$(NO_ZMQ) = $(zmq_packages)
multiprocess_packages_$(MULTIPROCESS) = $(multiprocess_packages)
usdt_packages_$(NO_USDT) = $(usdt_$(host_os)_packages)
-packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(boost_packages_) $(libevent_packages_) $(qt_packages_) $(wallet_packages_) $(upnp_packages_) $(natpmp_packages_) $(usdt_packages_)
+packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(boost_packages_) $(libevent_packages_) $(qt_packages_) $(wallet_packages_) $(upnp_packages_) $(usdt_packages_)
native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages)
ifneq ($(zmq_packages_),)
@@ -191,6 +189,7 @@ $(host_prefix)/.stamp_$(final_build_id): $(native_packages) $(packages)
echo copying packages: $^
echo to: $(@D)
cd $(@D); $(foreach package,$^, $(build_TAR) xf $($(package)_cached); )
+ echo To build Bitcoin Core with these packages, pass \'--toolchain $(@D)/toolchain.cmake\' to the first CMake invocation.
touch $@
ifeq ($(host),$(build))
@@ -233,7 +232,6 @@ $(host_prefix)/toolchain.cmake : toolchain.cmake.in $(host_prefix)/.stamp_$(fina
-e 's|@bdb_packages@|$(bdb_packages_)|' \
-e 's|@sqlite_packages@|$(sqlite_packages_)|' \
-e 's|@upnp_packages@|$(upnp_packages_)|' \
- -e 's|@natpmp_packages@|$(natpmp_packages_)|' \
-e 's|@usdt_packages@|$(usdt_packages_)|' \
-e 's|@no_harden@|$(NO_HARDEN)|' \
-e 's|@multiprocess@|$(MULTIPROCESS)|' \
diff --git a/depends/README.md b/depends/README.md
index 19d704a50c..4ef7247ea4 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -41,7 +41,7 @@ The paths are automatically configured and no other options are needed.
#### Common
- apt install automake bison cmake curl libtool make patch pkg-config python3 xz-utils
+ apt install bison cmake curl make patch pkg-config python3 xz-utils
#### For macOS cross compilation
@@ -54,7 +54,7 @@ For more information, see [SDK Extraction](../contrib/macdeploy/README.md#sdk-ex
#### For Win64 cross compilation
-- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux)
+ apt install g++-mingw-w64-x86-64-posix
#### For linux (including i386, ARM) cross compilation
@@ -113,9 +113,8 @@ The following can be set when running make: `make FOO=bar`
- `NO_BDB`: Don't download/build/cache BerkeleyDB
- `NO_SQLITE`: Don't download/build/cache SQLite
- `NO_UPNP`: Don't download/build/cache packages needed for enabling UPnP
-- `NO_NATPMP`: Don't download/build/cache packages needed for enabling NAT-PMP
- `NO_USDT`: Don't download/build/cache packages needed for enabling USDT tracepoints
-- `MULTIPROCESS`: Build libmultiprocess (experimental, requires CMake)
+- `MULTIPROCESS`: Build libmultiprocess (experimental)
- `DEBUG`: Disable some optimizations and enable more runtime checking
- `HOST_ID_SALT`: Optional salt to use when generating host package ids
- `BUILD_ID_SALT`: Optional salt to use when generating build package ids
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index a27d8b323b..4659d52912 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,4 +1,4 @@
-OSX_MIN_VERSION=11.0
+OSX_MIN_VERSION=13.0
OSX_SDK_VERSION=14.0
XCODE_VERSION=15.0
XCODE_BUILD_ID=15A240d
diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk
index c09f7b1e3a..755d7aebe4 100644
--- a/depends/hosts/mingw32.mk
+++ b/depends/hosts/mingw32.mk
@@ -1,3 +1,6 @@
+ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-gcc-posix"),)
+mingw32_CC := $(host)-gcc-posix
+endif
ifneq ($(shell $(SHELL) $(.SHELLFLAGS) "command -v $(host)-g++-posix"),)
mingw32_CXX := $(host)-g++-posix
endif
diff --git a/depends/packages/capnp.mk b/depends/packages/capnp.mk
index 6d792db711..0c211cbc45 100644
--- a/depends/packages/capnp.mk
+++ b/depends/packages/capnp.mk
@@ -9,6 +9,7 @@ define $(package)_set_vars :=
$(package)_config_opts := -DBUILD_TESTING=OFF
$(package)_config_opts += -DWITH_OPENSSL=OFF
$(package)_config_opts += -DWITH_ZLIB=OFF
+ $(package)_cxxflags += -ffile-prefix-map=$$($(package)_extract_dir)=/usr
endef
define $(package)_config_cmds
diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk
index 24e940eaa0..4c05e8a0a7 100644
--- a/depends/packages/libevent.mk
+++ b/depends/packages/libevent.mk
@@ -14,6 +14,7 @@ define $(package)_set_vars
$(package)_config_opts=-DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON
$(package)_config_opts+=-DEVENT__DISABLE_SAMPLES=ON -DEVENT__DISABLE_REGRESS=ON
$(package)_config_opts+=-DEVENT__DISABLE_TESTS=ON -DEVENT__LIBRARY_TYPE=STATIC
+ $(package)_cppflags += -D_GNU_SOURCE
$(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601
ifeq ($(NO_HARDEN),)
diff --git a/depends/packages/libmultiprocess.mk b/depends/packages/libmultiprocess.mk
index c292c49bfb..a181e05100 100644
--- a/depends/packages/libmultiprocess.mk
+++ b/depends/packages/libmultiprocess.mk
@@ -13,6 +13,7 @@ ifneq ($(host),$(build))
$(package)_config_opts := -DCAPNP_EXECUTABLE="$$(native_capnp_prefixbin)/capnp"
$(package)_config_opts += -DCAPNPC_CXX_EXECUTABLE="$$(native_capnp_prefixbin)/capnpc-c++"
endif
+$(package)_cxxflags += -ffile-prefix-map=$$($(package)_extract_dir)=/usr
endef
define $(package)_config_cmds
diff --git a/depends/packages/libnatpmp.mk b/depends/packages/libnatpmp.mk
deleted file mode 100644
index 5a573a18e7..0000000000
--- a/depends/packages/libnatpmp.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-package=libnatpmp
-$(package)_version=f2433bec24ca3d3f22a8a7840728a3ac177f94ba
-$(package)_download_path=https://github.com/miniupnp/libnatpmp/archive
-$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=ef84979950dfb3556705b63c9cd6c95501b75e887fba466234b187f3c9029669
-$(package)_build_subdir=build
-
-define $(package)_config_cmds
- $($(package)_cmake) -S .. -B .
-endef
-
-define $(package)_build_cmds
- $(MAKE) natpmp
-endef
-
-define $(package)_stage_cmds
- mkdir -p $($(package)_staging_prefix_dir)/include $($(package)_staging_prefix_dir)/lib && \
- install ../natpmp.h ../natpmp_declspec.h $($(package)_staging_prefix_dir)/include && \
- install libnatpmp.a $($(package)_staging_prefix_dir)/lib
-endef
diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk
index 2e30be434c..3fa5faa4ba 100644
--- a/depends/packages/native_libmultiprocess.mk
+++ b/depends/packages/native_libmultiprocess.mk
@@ -1,8 +1,8 @@
package=native_libmultiprocess
-$(package)_version=6aca5f389bacf2942394b8738bbe15d6c9edfb9b
+$(package)_version=015e95f7ebaa47619a213a19801e7fffafc56864
$(package)_download_path=https://github.com/chaincodelabs/libmultiprocess/archive
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=2efeed53542bc1d8af3291f2b6f0e5d430d86a5e04e415ce33c136f2c226a51d
+$(package)_sha256_hash=4b1266b121337f3f6f37e1863fba91c1a5ee9ad126bcffc6fe6b9ca47ad050a1
$(package)_dependencies=native_capnp
define $(package)_config_cmds
diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk
index 01ed0d7a92..08a91cbcbd 100644
--- a/depends/packages/packages.mk
+++ b/depends/packages/packages.mk
@@ -18,7 +18,6 @@ sqlite_packages=sqlite
zmq_packages=zeromq
upnp_packages=miniupnpc
-natpmp_packages=libnatpmp
multiprocess_packages = libmultiprocess capnp
multiprocess_native_packages = native_libmultiprocess native_capnp
diff --git a/depends/toolchain.cmake.in b/depends/toolchain.cmake.in
index c733c81edf..735ebc8ea4 100644
--- a/depends/toolchain.cmake.in
+++ b/depends/toolchain.cmake.in
@@ -146,13 +146,6 @@ else()
set(WITH_MINIUPNPC ON CACHE BOOL "")
endif()
-set(natpmp_packages @natpmp_packages@)
-if("${natpmp_packages}" STREQUAL "")
- set(WITH_NATPMP OFF CACHE BOOL "")
-else()
- set(WITH_NATPMP ON CACHE BOOL "")
-endif()
-
set(usdt_packages @usdt_packages@)
if("${usdt_packages}" STREQUAL "")
set(WITH_USDT OFF CACHE BOOL "")
@@ -168,7 +161,8 @@ endif()
if("@multiprocess@" STREQUAL "1")
set(WITH_MULTIPROCESS ON CACHE BOOL "")
- set(LibmultiprocessNative_DIR "${CMAKE_FIND_ROOT_PATH}/native/lib/cmake/Libmultiprocess" CACHE PATH "")
+ set(Libmultiprocess_ROOT "${CMAKE_CURRENT_LIST_DIR}" CACHE PATH "")
+ set(LibmultiprocessNative_ROOT "${CMAKE_CURRENT_LIST_DIR}/native" CACHE PATH "")
else()
set(WITH_MULTIPROCESS OFF CACHE BOOL "")
endif()
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 61a7653e4a..310a90612b 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -6,7 +6,7 @@ find_package(Doxygen COMPONENTS dot)
if(DOXYGEN_FOUND)
set(doxyfile ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
- configure_file(Doxyfile.in ${doxyfile})
+ configure_file(Doxyfile.in ${doxyfile} USE_SOURCE_PERMISSIONS)
# In CMake 3.27, The FindDoxygen module's doxygen_add_docs()
# command gained a CONFIG_FILE option to specify a custom doxygen
diff --git a/doc/README.md b/doc/README.md
index 7f5db1b5bf..79ca53ce76 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -3,7 +3,7 @@ Bitcoin Core
Setup
---------------------
-Bitcoin Core is the original Bitcoin client and it builds the backbone of the network. It downloads and, by default, stores the entire history of Bitcoin transactions, which requires a few hundred gigabytes of disk space. Depending on the speed of your computer and network connection, the synchronization process can take anywhere from a few hours to a day or more.
+Bitcoin Core is the original Bitcoin client and it builds the backbone of the network. It downloads and, by default, stores the entire history of Bitcoin transactions, which requires several hundred gigabytes or more of disk space. Depending on the speed of your computer and network connection, the synchronization process can take anywhere from a few hours to several days or more.
To download Bitcoin Core, visit [bitcoincore.org](https://bitcoincore.org/en/download/).
diff --git a/doc/assumeutxo.md b/doc/assumeutxo.md
new file mode 100644
index 0000000000..17858de540
--- /dev/null
+++ b/doc/assumeutxo.md
@@ -0,0 +1,85 @@
+# Assumeutxo Usage
+
+Assumeutxo is a feature that allows fast bootstrapping of a validating bitcoind
+instance.
+
+For notes on the design of Assumeutxo, please refer to [the design doc](/doc/design/assumeutxo.md).
+
+## Loading a snapshot
+
+There is currently no canonical source for snapshots, but any downloaded snapshot
+will be checked against a hash that's been hardcoded in source code. If there is
+no source for the snapshot you need, you can generate it yourself using
+`dumptxoutset` on another node that is already synced (see
+[Generating a snapshot](#generating-a-snapshot)).
+
+Once you've obtained the snapshot, you can use the RPC command `loadtxoutset` to
+load it.
+
+```
+$ bitcoin-cli -rpcclienttimeout=0 loadtxoutset /path/to/input
+```
+
+After the snapshot has loaded, the syncing process of both the snapshot chain
+and the background IBD chain can be monitored with the `getchainstates` RPC.
+
+### Pruning
+
+A pruned node can load a snapshot. To save space, it's possible to delete the
+snapshot file as soon as `loadtxoutset` finishes.
+
+The minimum `-prune` setting is 550 MiB, but this functionality ignores that
+minimum and uses at least 1100 MiB.
+
+As the background sync continues there will be temporarily two chainstate
+directories, each multiple gigabytes in size (likely growing larger than the
+downloaded snapshot).
+
+### Indexes
+
+Indexes work but don't take advantage of this feature. They always start building
+from the genesis block and can only apply blocks in order. Once the background
+validation reaches the snapshot block, indexes will continue to build all the
+way to the tip.
+
+
+For indexes that support pruning, note that these indexes only allow blocks that
+were already indexed to be pruned. Blocks that are not indexed yet will also
+not be pruned.
+
+This means that, if the snapshot is old, then a lot of blocks after the snapshot
+block will need to be downloaded, and these blocks can't be pruned until they
+are indexed, so they could consume a lot of disk space until indexing catches up
+to the snapshot block.
+
+## Generating a snapshot
+
+The RPC command `dumptxoutset` can be used to generate a snapshot for the current
+tip (using type "latest") or a recent height (using type "rollback"). A generated
+snapshot from one node can then be loaded
+on any other node. However, keep in mind that the snapshot hash needs to be
+listed in the chainparams to make it usable. If there is no snapshot hash for
+the height you have chosen already, you will need to change the code there and
+re-compile.
+
+Using the type parameter "rollback", `dumptxoutset` can also be used to verify the
+hardcoded snapshot hash in the source code by regenerating the snapshot and
+comparing the hash.
+
+Example usage:
+
+```
+$ bitcoin-cli -rpcclienttimeout=0 dumptxoutset /path/to/output rollback
+```
+
+For most of the duration of `dumptxoutset` running the node is in a temporary
+state that does not actually reflect reality, i.e. blocks are marked invalid
+although we know they are not invalid. Because of this it is discouraged to
+interact with the node in any other way during this time to avoid inconsistent
+results and race conditions, particularly RPCs that interact with blockstorage.
+This inconsistent state is also why network activity is temporarily disabled,
+causing us to disconnect from all peers.
+
+`dumptxoutset` takes some time to complete, independent of hardware and
+what parameter is chosen. Because of that it is recommended to increase the RPC
+client timeout value (use `-rpcclienttimeout=0` for no timeout).
diff --git a/doc/bitcoin-conf.md b/doc/bitcoin-conf.md
index 76711d0e7d..9b31879790 100644
--- a/doc/bitcoin-conf.md
+++ b/doc/bitcoin-conf.md
@@ -31,7 +31,7 @@ Comments may appear in two ways:
### Network specific options
Network specific options can be:
-- placed into sections with headers `[main]` (not `[mainnet]`), `[test]` (not `[testnet]`), `[signet]` or `[regtest]`;
+- placed into sections with headers `[main]` (not `[mainnet]`), `[test]` (not `[testnet]`, for testnet3), `[testnet4]`, `[signet]` or `[regtest]`;
- prefixed with a chain name; e.g., `regtest.maxmempool=100`.
Network specific options take precedence over non-network specific options.
diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md
index 6a25e9a834..8b3b10ab85 100644
--- a/doc/build-freebsd.md
+++ b/doc/build-freebsd.md
@@ -42,7 +42,7 @@ from ports. However, you can build DB 4.8 yourself [using depends](/depends).
```bash
pkg install gmake
-gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
```
When the build is complete, the Berkeley DB installation location will be displayed:
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index fafc91fc5f..908b750a8f 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -44,7 +44,7 @@ from ports. However you can build it yourself, [using depends](/depends).
Refer to [depends/README.md](/depends/README.md) for detailed instructions.
```bash
-gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
...
to: /path/to/bitcoin/depends/*-unknown-openbsd*
```
diff --git a/doc/build-osx.md b/doc/build-osx.md
index 600eebb6ff..cb8e82dae8 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -1,15 +1,15 @@
# macOS Build Guide
-**Updated for MacOS [14](https://www.apple.com/macos/sonoma/)**
+**Updated for MacOS [15](https://www.apple.com/macos/macos-sequoia/)**
-This guide describes how to build bitcoind, command-line utilities, and GUI on macOS
+This guide describes how to build bitcoind, command-line utilities, and GUI on macOS.
## Preparation
The commands in this guide should be executed in a Terminal application.
macOS comes with a built-in Terminal located in:
-```
+```bash
/Applications/Utilities/Terminal.app
```
@@ -51,20 +51,6 @@ To install, run the following from your terminal:
brew install cmake boost pkg-config libevent
```
-For macOS 11 (Big Sur) and 12 (Monterey) you need to install a more recent version of llvm.
-
-``` bash
-brew install llvm
-```
-
-And append the following to the configure commands below:
-
-``` bash
--DCMAKE_C_COMPILER="$(brew --prefix llvm)/bin/clang" -DCMAKE_CXX_COMPILER="$(brew --prefix llvm)/bin/clang++"
-```
-
-Try `llvm@17` if compilation fails with the default version of llvm.
-
### 4. Clone Bitcoin repository
`git` should already be installed by default on your system.
@@ -135,17 +121,6 @@ Skip if you do not need this functionality.
brew install miniupnpc
```
-###### libnatpmp
-
-libnatpmp may be used for NAT-PMP port mapping.
-Skip if you do not need this functionality.
-
-``` bash
-brew install libnatpmp
-```
-
-Check out the [further configuration](#further-configuration) section for more information.
-
---
#### ZMQ Dependencies
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 4c3c659bff..086731be5a 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -60,9 +60,9 @@ executables, which are based on BerkeleyDB 4.8. Otherwise, you can build Berkele
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
-Optional port mapping libraries (see: `-DWITH_MINIUPNPC=ON` and `-DWITH_NATPMP=ON`):
+Optional port mapping library (see: `-DWITH_MINIUPNPC=ON`):
- sudo apt install libminiupnpc-dev libnatpmp-dev
+ sudo apt install libminiupnpc-dev
ZMQ dependencies (provides ZMQ API):
@@ -112,9 +112,9 @@ are based on Berkeley DB 4.8. Otherwise, you can build Berkeley DB [yourself](#b
To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode)
-Optional port mapping libraries (see: `-DWITH_MINIUPNPC=ON` and `-DWITH_NATPMP=ON`):
+Optional port mapping library (see: `-DWITH_MINIUPNPC=ON`):
- sudo dnf install miniupnpc-devel libnatpmp-devel
+ sudo dnf install miniupnpc-devel
ZMQ dependencies (provides ZMQ API):
@@ -153,7 +153,7 @@ The legacy wallet uses Berkeley DB. To ensure backwards compatibility it is
recommended to use Berkeley DB 4.8. If you have to build it yourself, and don't
want to use any other libraries built in depends, you can do:
```bash
-make -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+make -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
...
to: /path/to/bitcoin/depends/x86_64-pc-linux-gnu
```
diff --git a/doc/build-windows-msvc.md b/doc/build-windows-msvc.md
index 1daca1f93e..80c2b77f1e 100644
--- a/doc/build-windows-msvc.md
+++ b/doc/build-windows-msvc.md
@@ -1,6 +1,6 @@
# Windows / MSVC Build Guide
-This guide describes how to build bitcoind, command-line utilities, and GUI on Windows using Micsrosoft Visual Studio.
+This guide describes how to build bitcoind, command-line utilities, and GUI on Windows using Microsoft Visual Studio.
For cross-compiling options, please see [`build-windows.md`](./build-windows.md).
@@ -42,27 +42,31 @@ Available presets can be listed as follows:
cmake --list-presets
```
+By default, all presets:
+- Set `BUILD_GUI` to `ON`.
+- Set `WITH_QRENCODE` to `OFF`, due to known build issues when using vcpkg's `libqrencode` package.
+
## Building
CMake will put the resulting object files, libraries, and executables into a dedicated build directory.
In the following instructions, the "Debug" configuration can be specified instead of the "Release" one.
-### 4. Building with Dynamic Linking with GUI
+### 4. Building with Static Linking with GUI
```
-cmake -B build --preset vs2022 -DBUILD_GUI=ON # It might take a while if the vcpkg binary cache is unpopulated or invalidated.
+cmake -B build --preset vs2022-static # It might take a while if the vcpkg binary cache is unpopulated or invalidated.
cmake --build build --config Release # Use "-j N" for N parallel jobs.
ctest --test-dir build --build-config Release # Use "-j N" for N parallel tests. Some tests are disabled if Python 3 is not available.
+cmake --install build --config Release # Optional.
```
-### 5. Building with Static Linking without GUI
+### 5. Building with Dynamic Linking without GUI
```
-cmake -B build --preset vs2022-static # It might take a while if the vcpkg binary cache is unpopulated or invalidated.
+cmake -B build --preset vs2022 -DBUILD_GUI=OFF # It might take a while if the vcpkg binary cache is unpopulated or invalidated.
cmake --build build --config Release # Use "-j N" for N parallel jobs.
ctest --test-dir build --build-config Release # Use "-j N" for N parallel tests. Some tests are disabled if Python 3 is not available.
-cmake --install build --config Release # Optional.
```
## Performance Notes
diff --git a/doc/build-windows.md b/doc/build-windows.md
index 2d47d120e3..0c1418bff9 100644
--- a/doc/build-windows.md
+++ b/doc/build-windows.md
@@ -28,36 +28,18 @@ The steps below can be performed on Ubuntu or WSL. The depends system
will also work on other Linux distributions, however the commands for
installing the toolchain will be different.
-First, install the general dependencies:
-
- sudo apt update
- sudo apt upgrade
- sudo apt install cmake curl g++ git make pkg-config
-
-A host toolchain (`g++`) is necessary because some dependency
-packages need to build host utilities that are used in the build process.
-
-See [dependencies.md](dependencies.md) for a complete overview.
+See [README.md](../depends/README.md) in the depends directory for which
+dependencies to install and [dependencies.md](dependencies.md) for a complete overview.
If you want to build the Windows installer using the `deploy` build target, you will need [NSIS](https://nsis.sourceforge.io/Main_Page):
- sudo apt install nsis
+ apt install nsis
Acquire the source in the usual way:
git clone https://github.com/bitcoin/bitcoin.git
cd bitcoin
-## Building for 64-bit Windows
-
-The first step is to install the mingw-w64 cross-compilation toolchain:
-
-```sh
-sudo apt install g++-mingw-w64-x86-64-posix
-```
-
-Once the toolchain is installed the build steps are common:
-
Note that for WSL the Bitcoin Core source path MUST be somewhere in the default mount file system, for
example /usr/src/bitcoin, AND not under /mnt/d/. If this is not the case the dependency autoconf scripts will fail.
This means you cannot use a directory that is located directly on the host Windows file system to perform the build.
@@ -67,7 +49,6 @@ Build using:
gmake -C depends HOST=x86_64-w64-mingw32 # Use "-j N" for N parallel jobs.
cmake -B build --toolchain depends/x86_64-w64-mingw32/toolchain.cmake
cmake --build build # Use "-j N" for N parallel jobs.
- ctest --test-dir build # Use "-j N" for N parallel tests. Some tests are disabled if Python 3 is not available.
## Depends system
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 4c620cf876..a3d42fc281 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -9,7 +9,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| [Clang](https://clang.llvm.org) | [16.0](https://github.com/bitcoin/bitcoin/pull/30263) |
| [CMake](https://cmake.org/) | [3.22](https://github.com/bitcoin/bitcoin/pull/30454) |
| [GCC](https://gcc.gnu.org) | [11.1](https://github.com/bitcoin/bitcoin/pull/29091) |
-| [Python](https://www.python.org) (scripts, tests) | [3.9](https://github.com/bitcoin/bitcoin/pull/28211) |
+| [Python](https://www.python.org) (scripts, tests) | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) |
| [systemtap](https://sourceware.org/systemtap/) ([tracing](tracing.md))| N/A |
## Required
@@ -34,7 +34,6 @@ You can find installation instructions in the `build-*.md` file for your platfor
### Networking
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
-| [libnatpmp](../depends/packages/libnatpmp.mk) | [link](https://github.com/miniupnp/libnatpmp/) | commit [f2433be...](https://github.com/bitcoin/bitcoin/pull/29708) | | No |
| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.7](https://github.com/bitcoin/bitcoin/pull/29707) | 2.1 | No |
### Notifications
diff --git a/doc/design/assumeutxo.md b/doc/design/assumeutxo.md
index a4980729d0..123c02ac13 100644
--- a/doc/design/assumeutxo.md
+++ b/doc/design/assumeutxo.md
@@ -1,47 +1,6 @@
-# assumeutxo
+# Assumeutxo Design
-Assumeutxo is a feature that allows fast bootstrapping of a validating bitcoind
-instance.
-
-## Loading a snapshot
-
-There is currently no canonical source for snapshots, but any downloaded snapshot
-will be checked against a hash that's been hardcoded in source code.
-
-Once you've obtained the snapshot, you can use the RPC command `loadtxoutset` to
-load it.
-
-### Pruning
-
-A pruned node can load a snapshot. To save space, it's possible to delete the
-snapshot file as soon as `loadtxoutset` finishes.
-
-The minimum `-prune` setting is 550 MiB, but this functionality ignores that
-minimum and uses at least 1100 MiB.
-
-As the background sync continues there will be temporarily two chainstate
-directories, each multiple gigabytes in size (likely growing larger than the
-downloaded snapshot).
-
-### Indexes
-
-Indexes work but don't take advantage of this feature. They always start building
-from the genesis block. Once the background validation reaches the snapshot block,
-indexes will continue to build all the way to the tip.
-
-For indexes that support pruning, note that no pruning will take place between
-the snapshot and the tip, until the background sync has completed - after which
-everything is pruned. Depending on how old the snapshot is, this may temporarily
-use a significant amount of disk space.
-
-## Generating a snapshot
-
-The RPC command `dumptxoutset` can be used to generate a snapshot. This can be used
-to create a snapshot on one node that you wish to load on another node.
-It can also be used to verify the hardcoded snapshot hash in the source code.
-
-The utility script
-`./contrib/devtools/utxo_snapshot.sh` may be of use.
+For notes on the usage of Assumeutxo, please refer to [the usage doc](/doc/assumeutxo.md).
## General background
@@ -79,7 +38,7 @@ data.
### "Normal" operation via initial block download
`ChainstateManager` manages a single Chainstate object, for which
-`m_snapshot_blockhash` is null. This chainstate is (maybe obviously)
+`m_from_snapshot_blockhash` is `std::nullopt`. This chainstate is (maybe obviously)
considered active. This is the "traditional" mode of operation for bitcoind.
| | |
diff --git a/doc/design/libraries.md b/doc/design/libraries.md
index 8a4ee31568..24185bf477 100644
--- a/doc/design/libraries.md
+++ b/doc/design/libraries.md
@@ -4,11 +4,11 @@
|--------------------------|-------------|
| *libbitcoin_cli* | RPC client functionality used by *bitcoin-cli* executable |
| *libbitcoin_common* | Home for common functionality shared by different executables and libraries. Similar to *libbitcoin_util*, but higher-level (see [Dependencies](#dependencies)). |
-| *libbitcoin_consensus* | Stable, backwards-compatible consensus functionality used by *libbitcoin_node* and *libbitcoin_wallet*. |
+| *libbitcoin_consensus* | Consensus functionality used by *libbitcoin_node* and *libbitcoin_wallet*. |
| *libbitcoin_crypto* | Hardware-optimized functions for data encryption, hashing, message authentication, and key derivation. |
| *libbitcoin_kernel* | Consensus engine and support library used for validation by *libbitcoin_node*. |
| *libbitcoinqt* | GUI functionality used by *bitcoin-qt* and *bitcoin-gui* executables. |
-| *libbitcoin_ipc* | IPC functionality used by *bitcoin-node*, *bitcoin-wallet*, *bitcoin-gui* executables to communicate when [`--enable-multiprocess`](multiprocess.md) is used. |
+| *libbitcoin_ipc* | IPC functionality used by *bitcoin-node*, *bitcoin-wallet*, *bitcoin-gui* executables to communicate when [`-DWITH_MULTIPROCESS=ON`](multiprocess.md) is used. |
| *libbitcoin_node* | P2P and RPC server functionality used by *bitcoind* and *bitcoin-qt* executables. |
| *libbitcoin_util* | Home for common functionality shared by different executables and libraries. Similar to *libbitcoin_common*, but lower-level (see [Dependencies](#dependencies)). |
| *libbitcoin_wallet* | Wallet functionality used by *bitcoind* and *bitcoin-wallet* executables. |
diff --git a/doc/design/multiprocess.md b/doc/design/multiprocess.md
index 49410a4213..a781da8d1b 100644
--- a/doc/design/multiprocess.md
+++ b/doc/design/multiprocess.md
@@ -81,7 +81,7 @@ This section describes the major components of the Inter-Process Communication (
- In the generated code, we have C++ client subclasses that inherit from the abstract classes in [`src/interfaces/`](../../src/interfaces/). These subclasses are the workhorses of the IPC mechanism.
- They implement all the methods of the interface, marshalling arguments into a structured format, sending them as requests to the IPC server via a UNIX socket, and handling the responses.
- These subclasses effectively mask the complexity of IPC, presenting a familiar C++ interface to developers.
-- Internally, the client subclasses generated by the `mpgen` tool wrap [client classes generated by Cap'n Proto](https://capnproto.org/cxxrpc.html#clients), and use them to send IPC requests.
+- Internally, the client subclasses generated by the `mpgen` tool wrap [client classes generated by Cap'n Proto](https://capnproto.org/cxxrpc.html#clients), and use them to send IPC requests. The Cap'n Proto client classes are low-level, with non-blocking methods that use asynchronous I/O and pass request and response objects, while mpgen client subclasses provide normal C++ methods that block while executing and convert between request/response objects and arguments/return values.
### C++ Server Classes in Generated Code
- On the server side, corresponding generated C++ classes receive IPC requests. These server classes are responsible for unmarshalling method arguments, invoking the corresponding methods in the local [`src/interfaces/`](../../src/interfaces/) objects, and creating the IPC response.
@@ -94,7 +94,7 @@ This section describes the major components of the Inter-Process Communication (
- **Asynchronous I/O and Thread Management**: The library is also responsible for managing I/O and threading. Particularly, it ensures that IPC requests never block each other and that new threads on either side of a connection can always make client calls. It also manages worker threads on the server side of calls, ensuring that calls from the same client thread always execute on the same server thread (to avoid locking issues and support nested callbacks).
### Type Hooks in [`src/ipc/capnp/*-types.h`](../../src/ipc/capnp/)
-- **Custom Type Conversions**: In [`src/ipc/capnp/*-types.h`](../../src/ipc/capnp/), function overloads of two `libmultiprocess` C++ functions, `mp::CustomReadField` and `mp::CustomBuildFields`, are defined. These overloads are used for customizing the conversion of specific C++ types to and from Cap’n Proto types.
+- **Custom Type Conversions**: In [`src/ipc/capnp/*-types.h`](../../src/ipc/capnp/), function overloads of `libmultiprocess` C++ functions, `mp::CustomReadField`, `mp::CustomBuildField`, `mp::CustomReadMessage` and `mp::CustomBuildMessage`, are defined. These overloads are used for customizing the conversion of specific C++ types to and from Cap’n Proto types.
- **Handling Special Cases**: The `mpgen` tool and `libmultiprocess` library can convert most C++ types to and from Cap’n Proto types automatically, including interface types, primitive C++ types, standard C++ types like `std::vector`, `std::set`, `std::map`, `std::tuple`, and `std::function`, as well as simple C++ structs that consist of aforementioned types and whose fields correspond 1:1 with Cap’n Proto struct fields. For other types, `*-types.h` files provide custom code to convert between C++ and Cap’n Proto data representations.
### Protocol-Agnostic IPC Code in [`src/ipc/`](../../src/ipc/)
@@ -197,7 +197,7 @@ sequenceDiagram
- Upon receiving the request, the Cap'n Proto dispatching code in the `bitcoin-node` process calls the `getBlockHash` method of the `Chain` [server class](#c-server-classes-in-generated-code).
- The server class is automatically generated by the `mpgen` tool from the [`chain.capnp`](https://github.com/ryanofsky/bitcoin/blob/pr/ipc/src/ipc/capnp/chain.capnp) file in [`src/ipc/capnp/`](../../src/ipc/capnp/).
- The `getBlockHash` method of the generated `Chain` server subclass in `bitcoin-wallet` receives a Cap’n Proto request object with the `height` parameter, and calls the `getBlockHash` method on its local `Chain` object with the provided `height`.
- - When the call returns, it encapsulates the return value in a Cap’n Proto response, which it sends back to the `bitcoin-wallet` process,
+ - When the call returns, it encapsulates the return value in a Cap’n Proto response, which it sends back to the `bitcoin-wallet` process.
5. **Response and Return**
- The `getBlockHash` method of the generated `Chain` client subclass in `bitcoin-wallet` which sent the request now receives the response.
@@ -232,7 +232,7 @@ This modularization represents an advancement in Bitcoin Core's architecture, of
- **Cap’n Proto struct**: A structured data format used in Cap’n Proto, similar to structs in C++, for organizing and transporting data across different processes.
-- **client class (in generated code)**: A C++ class generated from a Cap’n Proto interface which inherits from a Bitcoin core abstract class, and implements each virtual method to send IPC requests to another process. (see also [components section](#c-client-subclasses-in-generated-code))
+- **client class (in generated code)**: A C++ class generated from a Cap’n Proto interface which inherits from a Bitcoin Core abstract class, and implements each virtual method to send IPC requests to another process. (see also [components section](#c-client-subclasses-in-generated-code))
- **IPC (inter-process communication)**: Mechanisms that enable processes to exchange requests and data.
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index ae5513a222..952dbc77a0 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -354,8 +354,14 @@ Development tips and tricks
### Compiling for debugging
-Run configure with `--enable-debug` to add additional compiler flags that
-produce better debugging builds.
+When using the default build configuration by running `cmake -B build`, the
+`-DCMAKE_BUILD_TYPE` is set to `RelWithDebInfo`. This option adds debug symbols
+but also performs some compiler optimizations that may make debugging trickier
+as the code may not correspond directly to the source.
+
+If you need to build exclusively for debugging, set the `-DCMAKE_BUILD_TYPE`
+to `Debug` (i.e. `-DCMAKE_BUILD_TYPE=Debug`). You can always check the cmake
+build options of an existing build with `ccmake build`.
### Show sources in debugging
@@ -412,8 +418,8 @@ see [test/functional/](/test/functional) for tests that run in `-regtest` mode.
### DEBUG_LOCKORDER
Bitcoin Core is a multi-threaded application, and deadlocks or other
-multi-threading bugs can be very difficult to track down. The `--enable-debug`
-configure option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts
+multi-threading bugs can be very difficult to track down. The `-DCMAKE_BUILD_TYPE=Debug`
+build option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts
run-time checks to keep track of which locks are held and adds warnings to the
`debug.log` file if inconsistencies are detected.
@@ -423,9 +429,8 @@ Defining `DEBUG_LOCKCONTENTION` adds a "lock" logging category to the logging
RPC that, when enabled, logs the location and duration of each lock contention
to the `debug.log` file.
-The `--enable-debug` configure option adds `-DDEBUG_LOCKCONTENTION` to the
-compiler flags. You may also enable it manually for a non-debug build by running
-configure with `-DDEBUG_LOCKCONTENTION` added to your CPPFLAGS,
+The `-DCMAKE_BUILD_TYPE=Debug` build option adds `-DDEBUG_LOCKCONTENTION` to the
+compiler flags. You may also enable it manually by building with `-DDEBUG_LOCKCONTENTION` added to your CPPFLAGS,
i.e. `CPPFLAGS="-DDEBUG_LOCKCONTENTION"`, then build and run bitcoind.
You can then use the `-debug=lock` configuration option at bitcoind startup or
@@ -555,7 +560,7 @@ See the functional test documentation for how to invoke perf within tests.
Bitcoin Core can be compiled with various "sanitizers" enabled, which add
instrumentation for issues regarding things like memory safety, thread race
conditions, or undefined behavior. This is controlled with the
-`--with-sanitizers` configure flag, which should be a comma separated list of
+`-DSANITIZERS` cmake build flag, which should be a comma separated list of
sanitizers to enable. The sanitizer list should correspond to supported
`-fsanitize=` options in your compiler. These sanitizers have runtime overhead,
so they are most useful when testing changes or producing debugging builds.
@@ -564,16 +569,16 @@ Some examples:
```bash
# Enable both the address sanitizer and the undefined behavior sanitizer
-./configure --with-sanitizers=address,undefined
+cmake -B build -DSANITIZERS=address,undefined
# Enable the thread sanitizer
-./configure --with-sanitizers=thread
+cmake -B build -DSANITIZERS=thread
```
If you are compiling with GCC you will typically need to install corresponding
"san" libraries to actually compile with these flags, e.g. libasan for the
address sanitizer, libtsan for the thread sanitizer, and libubsan for the
-undefined sanitizer. If you are missing required libraries, the configure script
+undefined sanitizer. If you are missing required libraries, the build
will fail with a linker error when testing the sanitizer flags.
The test suite should pass cleanly with the `thread` and `undefined` sanitizers. You
@@ -589,7 +594,7 @@ See the CI config for more examples, and upstream documentation for more informa
about any additional options.
Not all sanitizer options can be enabled at the same time, e.g. trying to build
-with `--with-sanitizers=address,thread` will fail in the configure script as
+with `-DSANITIZERS=address,thread` will fail in the build as
these sanitizers are mutually incompatible. Refer to your compiler manual to
learn more about these options and which sanitizers are supported by your
compiler.
@@ -603,7 +608,6 @@ Additional resources:
* [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
* [GCC Instrumentation Options](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html)
* [Google Sanitizers Wiki](https://github.com/google/sanitizers/wiki)
- * [Issue #12691: Enable -fsanitize flags in Travis](https://github.com/bitcoin/bitcoin/issues/12691)
Locking/mutex usage notes
-------------------------
@@ -614,7 +618,7 @@ The code is multi-threaded and uses mutexes and the
Deadlocks due to inconsistent lock ordering (thread 1 locks `cs_main` and then
`cs_wallet`, while thread 2 locks them in the opposite order: result, deadlock
as each waits for the other to release its lock) are a problem. Compile with
-`-DDEBUG_LOCKORDER` (or use `--enable-debug`) to get lock order inconsistencies
+`-DDEBUG_LOCKORDER` (or use `-DCMAKE_BUILD_TYPE=Debug`) to get lock order inconsistencies
reported in the `debug.log` file.
Re-architecting the core code so there are better-defined interfaces
@@ -1057,8 +1061,8 @@ bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
```
- Build and run tests with `-DDEBUG_LOCKORDER` to verify that no potential
- deadlocks are introduced. As of 0.12, this is defined by default when
- configuring with `--enable-debug`.
+ deadlocks are introduced. This is defined by default when
+ building with `-DCMAKE_BUILD_TYPE=Debug`.
- When using `LOCK`/`TRY_LOCK` be aware that the lock exists in the context of
the current scope, so surround the statement and the code that needs the lock
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index 4da9f5c3a2..927b0dc8d5 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -7,11 +7,7 @@ To quickly get started fuzzing Bitcoin Core using [libFuzzer](https://llvm.org/d
```sh
$ git clone https://github.com/bitcoin/bitcoin
$ cd bitcoin/
-$ cmake -B build_fuzz \
- -DCMAKE_C_COMPILER="clang" \
- -DCMAKE_CXX_COMPILER="clang++" \
- -DBUILD_FOR_FUZZING=ON \
- -DSANITIZERS=undefined,address,fuzzer
+$ cmake --preset=libfuzzer
# macOS users: If you have problem with this step then make sure to read "macOS hints for
# libFuzzer" on https://github.com/bitcoin/bitcoin/blob/master/doc/fuzzing.md#macos-hints-for-libfuzzer
$ cmake --build build_fuzz
@@ -19,6 +15,9 @@ $ FUZZ=process_message build_fuzz/src/test/fuzz/fuzz
# abort fuzzing using ctrl-c
```
+One can use `--prefix=libfuzzer-nosan` to do the same without common sanitizers enabled.
+See [further](#run-without-sanitizers-for-increased-throughput) for more information.
+
There is also a runner script to execute all fuzz targets. Refer to
`./test/fuzz/test_runner.py --help` for more details.
@@ -80,7 +79,7 @@ of the test. Just make sure to use double-dash to distinguish them from the
fuzzer's own arguments:
```sh
-$ FUZZ=address_deserialize_v2 build_fuzz/src/test/fuzz/fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5 --printtoconsole=1
+$ FUZZ=address_deserialize_v2 build_fuzz/src/test/fuzz/fuzz -runs=1 fuzz_corpora/address_deserialize_v2 --checkaddrman=5 --printtoconsole=1
```
## Fuzzing corpora
@@ -91,11 +90,11 @@ To fuzz `process_message` using the [`bitcoin-core/qa-assets`](https://github.co
```sh
$ git clone https://github.com/bitcoin-core/qa-assets
-$ FUZZ=process_message build_fuzz/src/test/fuzz/fuzz qa-assets/fuzz_seed_corpus/process_message/
+$ FUZZ=process_message build_fuzz/src/test/fuzz/fuzz qa-assets/fuzz_corpora/process_message/
INFO: Seed: 1346407872
INFO: Loaded 1 modules (424174 inline 8-bit counters): 424174 [0x55d8a9004ab8, 0x55d8a906c3a6),
INFO: Loaded 1 PC tables (424174 PCs): 424174 [0x55d8a906c3a8,0x55d8a96e5288),
-INFO: 991 files found in qa-assets/fuzz_seed_corpus/process_message/
+INFO: 991 files found in qa-assets/fuzz_corpora/process_message/
INFO: -max_len is not provided; libFuzzer will not generate inputs larger than 4096 bytes
INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb
#993 INITED cov: 7063 ft: 8236 corp: 25/3821b exec/s: 0 rss: 181Mb
@@ -107,8 +106,8 @@ INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb
Fuzzing on a harness compiled with `-DSANITIZERS=address,fuzzer,undefined` is
good for finding bugs. However, the very slow execution even under libFuzzer
will limit the ability to find new coverage. A good approach is to perform
-occasional long runs without the additional bug-detectors (just
-`-DSANITIZERS=fuzzer`) and then merge new inputs into a corpus as described in
+occasional long runs without the additional bug-detectors
+(`--preset=libfuzzer-nosan`) and then merge new inputs into a corpus as described in
the qa-assets repo
(https://github.com/bitcoin-core/qa-assets/blob/main/.github/PULL_REQUEST_TEMPLATE.md).
Patience is useful; even with improved throughput, libFuzzer may need days and
@@ -124,7 +123,7 @@ Patience is useful; even with improved throughput, libFuzzer may need days and
quickly from a crash case)
- run the fuzzer with the case number appended to the seed corpus path:
`FUZZ=process_message build_fuzz/src/test/fuzz/fuzz
- qa-assets/fuzz_seed_corpus/process_message/1bc91feec9fc00b107d97dc225a9f2cdaa078eb6`
+ qa-assets/fuzz_corpora/process_message/1bc91feec9fc00b107d97dc225a9f2cdaa078eb6`
## Submit improved coverage
@@ -145,11 +144,9 @@ You may also need to take care of giving the correct path for `clang` and
Full configuration step that was tested on macOS with `brew` installed `llvm`:
```sh
-$ cmake -B build_fuzz \
+$ cmake --preset=libfuzzer \
-DCMAKE_C_COMPILER="$(brew --prefix llvm)/bin/clang" \
-DCMAKE_CXX_COMPILER="$(brew --prefix llvm)/bin/clang++" \
- -DBUILD_FOR_FUZZING=ON \
- -DSANITIZERS=undefined,address,fuzzer \
-DAPPEND_LDFLAGS=-Wl,-no_warn_duplicate_libraries
```
@@ -209,157 +206,14 @@ $ FUZZ=process_message ./honggfuzz/honggfuzz -i inputs/ -- build_fuzz/src/test/f
Read the [Honggfuzz documentation](https://github.com/google/honggfuzz/blob/master/docs/USAGE.md) for more information.
-## Fuzzing the Bitcoin Core P2P layer using Honggfuzz NetDriver
-
-Honggfuzz NetDriver allows for very easy fuzzing of TCP servers such as Bitcoin
-Core without having to write any custom fuzzing harness. The `bitcoind` server
-process is largely fuzzed without modification.
-
-This makes the fuzzing highly realistic: a bug reachable by the fuzzer is likely
-also remotely triggerable by an untrusted peer.
-
-To quickly get started fuzzing the P2P layer using Honggfuzz NetDriver:
-
-```sh
-$ mkdir bitcoin-honggfuzz-p2p/
-$ cd bitcoin-honggfuzz-p2p/
-$ git clone https://github.com/bitcoin/bitcoin
-$ cd bitcoin/
-$ git clone https://github.com/google/honggfuzz
-$ cd honggfuzz/
-$ make
-$ cd ..
-$ git apply << "EOF"
-diff --git a/src/compat/compat.h b/src/compat/compat.h
-index 8195bceaec..cce2b31ff0 100644
---- a/src/compat/compat.h
-+++ b/src/compat/compat.h
-@@ -90,8 +90,12 @@ typedef char* sockopt_arg_type;
- // building with a binutils < 2.36 is subject to this ld bug.
- #define MAIN_FUNCTION __declspec(dllexport) int main(int argc, char* argv[])
- #else
-+#ifdef HFND_FUZZING_ENTRY_FUNCTION_CXX
-+#define MAIN_FUNCTION HFND_FUZZING_ENTRY_FUNCTION_CXX(int argc, char* argv[])
-+#else
- #define MAIN_FUNCTION int main(int argc, char* argv[])
- #endif
-+#endif
-
- // Note these both should work with the current usage of poll, but best to be safe
- // WIN32 poll is broken https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/
-diff --git a/src/net.cpp b/src/net.cpp
-index 7601a6ea84..702d0f56ce 100644
---- a/src/net.cpp
-+++ b/src/net.cpp
-@@ -727,7 +727,7 @@ int V1TransportDeserializer::readHeader(Span<const uint8_t> msg_bytes)
- }
-
- // Check start string, network magic
-- if (memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
-+ if (false && memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { // skip network magic checking
- LogDebug(BCLog::NET, "Header error: Wrong MessageStart %s received, peer=%d\n", HexStr(hdr.pchMessageStart), m_node_id);
- return -1;
- }
-@@ -788,7 +788,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds
- RandAddEvent(ReadLE32(hash.begin()));
-
- // Check checksum and header message type string
-- if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) {
-+ if (false && memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { // skip checksum checking
- LogDebug(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n",
- SanitizeString(msg.m_type), msg.m_message_size,
- HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)),
-EOF
-$ cmake -B build_fuzz \
- -DCMAKE_C_COMPILER="$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang" \
- -DCMAKE_CXX_COMPILER="$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang++" \
- -DENABLE_WALLET=OFF \
- -DBUILD_GUI=OFF \
- -DSANITIZERS=address,undefined
-$ cmake --build build_fuzz --target bitcoind
-$ mkdir -p inputs/
-$ ./honggfuzz/honggfuzz --exit_upon_crash --quiet --timeout 4 -n 1 -Q \
- -E HFND_TCP_PORT=18444 -f inputs/ -- \
- build_fuzz/src/bitcoind -regtest -discover=0 -dns=0 -dnsseed=0 -listenonion=0 \
- -nodebuglogfile -bind=127.0.0.1:18444 -logthreadnames \
- -debug
-```
-
-# Fuzzing Bitcoin Core using Eclipser (v1.x)
-
-## Quickstart guide
-
-To quickly get started fuzzing Bitcoin Core using [Eclipser v1.x](https://github.com/SoftSec-KAIST/Eclipser/tree/v1.x):
-
-```sh
-$ git clone https://github.com/bitcoin/bitcoin
-$ cd bitcoin/
-$ sudo vim /etc/apt/sources.list # Uncomment the lines starting with 'deb-src'.
-$ sudo apt-get update
-$ sudo apt-get build-dep qemu
-$ sudo apt-get install libtool libtool-bin wget automake autoconf bison gdb
-```
-
-At this point, you must install the .NET core. The process differs, depending on your Linux distribution.
-See [this link](https://learn.microsoft.com/en-us/dotnet/core/install/linux) for details.
-On Ubuntu 20.04, the following should work:
-
-```sh
-$ wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb
-$ sudo dpkg -i packages-microsoft-prod.deb
-$ rm packages-microsoft-prod.deb
-$ sudo apt-get update
-$ sudo apt-get install -y dotnet-sdk-2.1
-```
-
-You will also want to make sure Python is installed as `python` for the Eclipser install to succeed.
-
-```sh
-$ git clone https://github.com/SoftSec-KAIST/Eclipser.git
-$ cd Eclipser
-$ git checkout v1.x
-$ make
-$ cd ..
-$ cmake -B build_fuzz -DBUILD_FOR_FUZZING=ON
-$ mkdir -p outputs/
-$ FUZZ=bech32 dotnet ./Eclipser/build/Eclipser.dll fuzz -p build_fuzz/src/test/fuzz/fuzz -t 36000 -o outputs --src stdin
-```
-
-This will perform 10 hours of fuzzing.
-
-To make further use of the inputs generated by Eclipser, you
-must first decode them:
-
-```sh
-$ dotnet Eclipser/build/Eclipser.dll decode -i outputs/testcase -o decoded_outputs
-```
-This will place raw inputs in the directory `decoded_outputs/decoded_stdins`. Crashes are in the `outputs/crashes` directory, and must
-be decoded in the same way.
-
-Fuzzing with Eclipser will likely be much more effective if using an existing corpus:
-
-```sh
-$ git clone https://github.com/bitcoin-core/qa-assets
-$ FUZZ=bech32 dotnet Eclipser/build/Eclipser.dll fuzz -p build_fuzz/src/test/fuzz/fuzz -t 36000 -i qa-assets/fuzz_seed_corpus/bech32 outputs --src stdin
-```
-
-Note that fuzzing with Eclipser on certain targets (those that create 'full nodes', e.g. `process_message*`) will,
-for now, slowly fill `/tmp/` with improperly cleaned-up files, which will cause spurious crashes.
-See [this proposed patch](https://github.com/bitcoin/bitcoin/pull/22472) for more information.
-
-Read the [Eclipser documentation for v1.x](https://github.com/SoftSec-KAIST/Eclipser/tree/v1.x) for more details on using Eclipser.
-
-
# OSS-Fuzz
Bitcoin Core participates in Google's [OSS-Fuzz](https://github.com/google/oss-fuzz/tree/master/projects/bitcoin-core)
-program, which includes a dashboard of [publicly disclosed vulnerabilities](https://bugs.chromium.org/p/oss-fuzz/issues/list?q=bitcoin-core).
-Generally, we try to disclose vulnerabilities as soon as possible after they
-are fixed to give users the knowledge they need to be protected. However,
-because Bitcoin is a live P2P network, and not just standalone local software,
-we might not fully disclose every issue within Google's standard
+program, which includes a dashboard of [publicly disclosed vulnerabilities](https://issues.oss-fuzz.com/issues?q=bitcoin-core%20status:open).
+
+Bitcoin Core follows its [security disclosure policy](https://bitcoincore.org/en/security-advisories/),
+which may differ from Google's standard
[90-day disclosure window](https://google.github.io/oss-fuzz/getting-started/bug-disclosure-guidelines/)
-if a partial or delayed disclosure is important to protect users or the
-function of the network.
+.
OSS-Fuzz also produces [a fuzzing coverage report](https://oss-fuzz.com/coverage-report/job/libfuzzer_asan_bitcoin-core/latest).
diff --git a/doc/multiprocess.md b/doc/multiprocess.md
index 7ba89b3ff5..1757296eed 100644
--- a/doc/multiprocess.md
+++ b/doc/multiprocess.md
@@ -4,7 +4,7 @@ _This document describes usage of the multiprocess feature. For design informati
## Build Option
-On unix systems, the `--enable-multiprocess` build option can be passed to `./configure` to build new `bitcoin-node`, `bitcoin-wallet`, and `bitcoin-gui` executables alongside existing `bitcoind` and `bitcoin-qt` executables.
+On Unix systems, the `-DWITH_MULTIPROCESS=ON` build option can be passed to build the supplemental `bitcoin-node` and `bitcoin-gui` multiprocess executables.
## Debugging
@@ -17,15 +17,17 @@ The multiprocess feature requires [Cap'n Proto](https://capnproto.org/) and [lib
```
cd <BITCOIN_SOURCE_DIRECTORY>
make -C depends NO_QT=1 MULTIPROCESS=1
-CONFIG_SITE=$PWD/depends/x86_64-pc-linux-gnu/share/config.site ./configure
-make
-src/bitcoin-node -regtest -printtoconsole -debug=ipc
-BITCOIND=bitcoin-node test/functional/test_runner.py
+# Set host platform to output of gcc -dumpmachine or clang -dumpmachine or check the depends/ directory for the generated subdirectory name
+HOST_PLATFORM="x86_64-pc-linux-gnu"
+cmake -B build --toolchain=depends/$HOST_PLATFORM/toolchain.cmake
+cmake --build build
+build/src/bitcoin-node -regtest -printtoconsole -debug=ipc
+BITCOIND=$(pwd)/build/src/bitcoin-node build/test/functional/test_runner.py
```
-The configure script will pick up settings and library locations from the depends directory, so there is no need to pass `--enable-multiprocess` as a separate flag when using the depends system (it's controlled by the `MULTIPROCESS=1` option).
+The `cmake` build will pick up settings and library locations from the depends directory, so there is no need to pass `-DWITH_MULTIPROCESS=ON` as a separate flag when using the depends system (it's controlled by the `MULTIPROCESS=1` option).
-Alternately, you can install [Cap'n Proto](https://capnproto.org/) and [libmultiprocess](https://github.com/chaincodelabs/libmultiprocess) packages on your system, and just run `./configure --enable-multiprocess` without using the depends system. The configure script will be able to locate the installed packages via [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/). See [Installation](https://github.com/chaincodelabs/libmultiprocess/blob/master/doc/install.md) section of the libmultiprocess readme for install steps. See [build-unix.md](build-unix.md) and [build-osx.md](build-osx.md) for information about installing dependencies in general.
+Alternately, you can install [Cap'n Proto](https://capnproto.org/) and [libmultiprocess](https://github.com/chaincodelabs/libmultiprocess) packages on your system, and just run `cmake -B build -DWITH_MULTIPROCESS=ON` without using the depends system. The `cmake` build will be able to locate the installed packages via [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/). See [Installation](https://github.com/chaincodelabs/libmultiprocess/blob/master/doc/install.md) section of the libmultiprocess readme for install steps. See [build-unix.md](build-unix.md) and [build-osx.md](build-osx.md) for information about installing dependencies in general.
## Usage
diff --git a/doc/release-notes-28358.md b/doc/release-notes-28358.md
new file mode 100644
index 0000000000..336aaa59ed
--- /dev/null
+++ b/doc/release-notes-28358.md
@@ -0,0 +1,6 @@
+Updated settings
+------
+
+- The maximum allowed value for the `-dbcache` configuration option has been
+ dropped due to recent UTXO set growth. Note that before this change, large `-dbcache`
+ values were automatically reduced to 16 GiB (1 GiB on 32 bit systems). (#28358)
diff --git a/doc/release-notes-empty-template.md b/doc/release-notes-empty-template.md
index 96e28c3763..1ff55b5ccc 100644
--- a/doc/release-notes-empty-template.md
+++ b/doc/release-notes-empty-template.md
@@ -32,11 +32,18 @@ Upgrading directly from a version of Bitcoin Core that has reached its EOL is
possible, but it might take some time if the data directory needs to be migrated. Old
wallet versions of Bitcoin Core are generally supported.
+Running Bitcoin Core binaries on macOS requires self signing.
+```
+cd /path/to/bitcoin-core/bin
+xattr -d com.apple.quarantine bitcoin-cli bitcoin-qt bitcoin-tx bitcoin-util bitcoin-wallet bitcoind test_bitcoin
+codesign -s - bitcoin-cli bitcoin-qt bitcoin-tx bitcoin-util bitcoin-wallet bitcoind test_bitcoin
+```
+
Compatibility
==============
Bitcoin Core is supported and extensively tested on operating systems
-using the Linux Kernel 3.17+, macOS 11.0+, and Windows 7 and newer. Bitcoin
+using the Linux Kernel 3.17+, macOS 13.0+, and Windows 7 and newer. Bitcoin
Core should also work on most other Unix-like systems but is not as
frequently tested on them. It is not recommended to use Bitcoin Core on
unsupported systems.
diff --git a/doc/release-notes/release-notes-28.0.md b/doc/release-notes/release-notes-28.0.md
new file mode 100644
index 0000000000..d9e6a34d0f
--- /dev/null
+++ b/doc/release-notes/release-notes-28.0.md
@@ -0,0 +1,371 @@
+Bitcoin Core version 28.0 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-28.0/>
+
+This release includes new features, various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes in some cases), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on macOS)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the data directory needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Running Bitcoin Core binaries on macOS requires self signing.
+```
+cd /path/to/bitcoin-28.0/bin
+xattr -d com.apple.quarantine bitcoin-cli bitcoin-qt bitcoin-tx bitcoin-util bitcoin-wallet bitcoind test_bitcoin
+codesign -s - bitcoin-cli bitcoin-qt bitcoin-tx bitcoin-util bitcoin-wallet bitcoind test_bitcoin
+```
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems
+using the Linux Kernel 3.17+, macOS 11.0+, and Windows 7 and newer. Bitcoin
+Core should also work on most other UNIX-like systems but is not as
+frequently tested on them. It is not recommended to use Bitcoin Core on
+unsupported systems.
+
+Notable changes
+===============
+
+Testnet4/BIP94 support
+-----
+
+Support for Testnet4 as specified in [BIP94](https://github.com/bitcoin/bips/blob/master/bip-0094.mediawiki)
+has been added. The network can be selected with the `-testnet4` option and
+the section header is also named `[testnet4]`.
+
+While the intention is to phase out support for Testnet3 in an upcoming
+version, support for it is still available via the known options in this
+release. (#29775)
+
+Windows Data Directory
+----------------------
+
+The default data directory on Windows has been moved from `C:\Users\Username\AppData\Roaming\Bitcoin`
+to `C:\Users\Username\AppData\Local\Bitcoin`. Bitcoin Core will check the existence
+of the old directory first and continue to use that directory for backwards
+compatibility if it is present. (#27064)
+
+JSON-RPC 2.0 Support
+--------------------
+
+The JSON-RPC server now recognizes JSON-RPC 2.0 requests and responds with
+strict adherence to the [specification](https://www.jsonrpc.org/specification).
+See [JSON-RPC-interface.md](https://github.com/bitcoin/bitcoin/blob/master/doc/JSON-RPC-interface.md#json-rpc-11-vs-20) for details. (#27101)
+
+JSON-RPC clients may need to be updated to be compatible with the JSON-RPC server.
+Please open an issue on GitHub if any compatibility issues are found.
+
+libbitcoinconsensus Removal
+---------------------------
+
+The libbitcoin-consensus library was deprecated in 27.0 and is now completely removed. (#29648)
+
+P2P and Network Changes
+-----------------------
+
+- Previously if Bitcoin Core was listening for P2P connections, either using
+ default settings or via `bind=addr:port` it would always also bind to
+ `127.0.0.1:8334` to listen for Tor connections. It was not possible to switch
+ this off, even if the node didn't use Tor. This has been changed and now
+ `bind=addr:port` results in binding on `addr:port` only. The default behavior
+ of binding to `0.0.0.0:8333` and `127.0.0.1:8334` has not been changed.
+
+ If you are using a `bind=...` configuration without `bind=...=onion` and rely
+ on the previous implied behavior to accept incoming Tor connections at
+ `127.0.0.1:8334`, you need to now make this explicit by using
+ `bind=... bind=127.0.0.1:8334=onion`. (#22729)
+
+- Bitcoin Core will now fail to start up if any of its P2P binds fail, rather
+ than the previous behaviour where it would only abort startup if all P2P
+ binds had failed. (#22729)
+
+- UNIX domain sockets can now be used for proxy connections. Set `-onion` or `-proxy`
+ to the local socket path with the prefix `unix:` (e.g. `-onion=unix:/home/me/torsocket`).
+ (#27375)
+
+- UNIX socket paths are now accepted for `-zmqpubrawblock` and `-zmqpubrawtx` with
+ the format `-zmqpubrawtx=unix:/path/to/file` (#27679)
+
+- Additional "in" and "out" flags have been added to `-whitelist` to control whether
+ permissions apply to inbound connections and/or manual ones (default: inbound only). (#27114)
+
+- Transactions having a feerate that is too low will be opportunistically paired with
+ their child transactions and submitted as a package, thus enabling the node to download
+ 1-parent-1-child packages using the existing transaction relay protocol. Combined with
+ other mempool policies, this change allows limited "package relay" when a parent transaction
+ is below the mempool minimum feerate. Topologically Restricted Until Confirmation (TRUC)
+ parents are additionally allowed to be below the minimum relay feerate (i.e., pay 0 fees).
+ Use the `submitpackage` RPC to submit packages directly to the node. Warning: this P2P
+ feature is limited (unlike the `submitpackage` interface, a child with multiple unconfirmed
+ parents is not supported) and not yet reliable under adversarial conditions. (#28970)
+
+Mempool Policy Changes
+----------------------
+
+- Transactions with version number set to 3 are now treated as standard on all networks (#29496),
+ subject to opt-in Topologically Restricted Until Confirmation (TRUC) transaction policy as
+ described in [BIP 431](https://github.com/bitcoin/bips/blob/master/bip-0431.mediawiki). The
+ policy includes limits on spending unconfirmed outputs (#28948), eviction of a previous descendant
+ if a more incentive-compatible one is submitted (#29306), and a maximum transaction size of 10,000vB
+ (#29873). These restrictions simplify the assessment of incentive compatibility of accepting or
+ replacing TRUC transactions, thus ensuring any replacements are more profitable for the node and
+ making fee-bumping more reliable.
+
+- Pay To Anchor (P2A) is a new standard witness output type for spending,
+ a newly recognised output template. This allows for key-less anchor
+ outputs, with compact spending conditions for additional efficiencies on
+ top of an equivalent `sh(OP_TRUE)` output, in addition to the txid stability
+ of the spending transaction.
+ N.B. propagation of this output spending on the network will be limited
+ until a sufficient number of nodes on the network adopt this upgrade. (#30352)
+
+- Limited package RBF is now enabled, where the proposed conflicting package would result in
+ a connected component, aka cluster, of size 2 in the mempool. All clusters being conflicted
+ against must be of size 2 or lower. (#28984)
+
+- The default value of the `-mempoolfullrbf` configuration option has been changed from 0 to 1,
+ i.e. `mempoolfullrbf=1`. (#30493)
+
+Updated RPCs
+------------
+
+- The `dumptxoutset` RPC now returns the UTXO set dump in a new and
+ improved format. Correspondingly, the `loadtxoutset` RPC now expects
+ this new format in the dumps it tries to load. Dumps with the old
+ format are no longer supported and need to be recreated using the
+ new format to be usable. (#29612)
+
+- AssumeUTXO mainnet parameters have been added for height 840,000.
+ This means the `loadtxoutset` RPC can now be used on mainnet with
+ the matching UTXO set from that height. (#28553)
+
+- The `warnings` field in `getblockchaininfo`, `getmininginfo` and
+ `getnetworkinfo` now returns all the active node warnings as an array
+ of strings, instead of a single warning. The current behaviour
+ can be temporarily restored by running Bitcoin Core with the configuration
+ option `-deprecatedrpc=warnings`. (#29845)
+
+- Previously when using the `sendrawtransaction` RPC and specifying outputs
+ that are already in the UTXO set, an RPC error code of `-27` with the
+ message "Transaction already in block chain" was returned in response.
+ The error message has been changed to "Transaction outputs already in utxo set"
+ to more accurately describe the source of the issue. (#30212)
+
+- The default mode for the `estimatesmartfee` RPC has been updated from `conservative` to `economical`,
+ which is expected to reduce over-estimation for many users, particularly if Replace-by-Fee is an option.
+ For users that require high confidence in their fee estimates at the cost of potentially over-estimating,
+ the `conservative` mode remains available. (#30275)
+
+- RPC `scantxoutset` now returns 2 new fields in the "unspents" JSON array: `blockhash` and `confirmations`.
+ See the scantxoutset help for details. (#30515)
+
+- RPC `submitpackage` now allows 2 new arguments to be passed: `maxfeerate` and `maxburnamount`. See the
+ subtmitpackage help for details. (#28950)
+
+Changes to wallet-related RPCs can be found in the Wallet section below.
+
+Updated REST APIs
+-----------------
+- Parameter validation for `/rest/getutxos` has been improved by rejecting
+ truncated or overly large txids and malformed outpoint indices via raising
+ an HTTP_BAD_REQUEST "Parse error". These requests were previously handled
+ silently. (#30482, #30444)
+
+Build System
+------------
+
+- GCC 11.1 or later, or Clang 16.0 or later,
+are now required to compile Bitcoin Core. (#29091, #30263)
+
+- The minimum required glibc to run Bitcoin Core is now
+2.31. This means that RHEL 8 and Ubuntu 18.04 (Bionic)
+are no-longer supported. (#29987)
+
+- `--enable-lcov-branch-coverage` has been removed, given
+incompatibilities between lcov version 1 & 2. `LCOV_OPTS`
+should be used to set any options instead. (#30192)
+
+Updated Settings
+----------------
+
+- When running with `-alertnotify`, an alert can now be raised multiple
+times instead of just once. Previously, it was only raised when unknown
+new consensus rules were activated. Its scope has now been increased to
+include all kernel warnings. Specifically, alerts will now also be raised
+when an invalid chain with a large amount of work has been detected.
+Additional warnings may be added in the future. (#30058)
+
+Changes to GUI or wallet related settings can be found in the GUI or Wallet section below.
+
+Wallet
+------
+
+- The wallet now detects when wallet transactions conflict with the mempool. Mempool-conflicting
+ transactions can be seen in the `"mempoolconflicts"` field of `gettransaction`. The inputs
+ of mempool-conflicted transactions can now be respent without manually abandoning the
+ transactions when the parent transaction is dropped from the mempool, which can cause wallet
+ balances to appear higher. (#27307)
+
+- A new `max_tx_weight` option has been added to the RPCs `fundrawtransaction`, `walletcreatefundedpsbt`, and `send`.
+It specifies the maximum transaction weight. If the limit is exceeded during funding, the transaction will not be built.
+The default value is 4,000,000 WU. (#29523)
+
+- A new `createwalletdescriptor` RPC allows users to add new automatically generated
+ descriptors to their wallet. This can be used to upgrade wallets created prior to the
+ introduction of a new standard descriptor, such as taproot. (#29130)
+
+- A new RPC `gethdkeys` lists all of the BIP32 HD keys in use by all of the descriptors in the wallet.
+ These keys can be used in conjunction with `createwalletdescriptor` to create and add single key
+ descriptors to the wallet for a particular key that the wallet already knows. (#29130)
+
+- The `sendall` RPC can now spend unconfirmed change and will include additional fees as necessary
+ for the resulting transaction to bump the unconfirmed transactions' feerates to the specified feerate. (#28979)
+
+- In RPC `bumpfee`, if a `fee_rate` is specified, the feerate is no longer restricted
+ to following the wallet's incremental feerate of 5 sat/vb. The feerate must still be
+ at least the sum of the original fee and the mempool's incremental feerate. (#27969)
+
+GUI Changes
+-----------
+
+- The "Migrate Wallet" menu allows users to migrate any legacy wallet in their wallet
+directory, regardless of the wallets loaded. (gui#824)
+
+- The "Information" window now displays the maximum mempool size along with the
+mempool usage. (gui#825)
+
+Low-level Changes
+=================
+
+Tests
+-----
+
+- The BIP94 timewarp attack mitigation is now active on the `regtest` network. (#30681)
+
+- A new `-testdatadir` option has been added to `test_bitcoin` to allow specifying the
+ location of unit test data directories. (#26564)
+
+Blockstorage
+------------
+
+- Block files are now XOR'd by default with a key stored in the blocksdir.
+Previous releases of Bitcoin Core or previous external software will not be able to read the blocksdir with a non-zero XOR-key.
+Refer to the `-blocksxor` help for more details. (#28052)
+
+Chainstate
+----------
+
+- The chainstate database flushes that occur when blocks are pruned will no longer
+empty the database cache. The cache will remain populated longer, which significantly
+reduces the time for initial block download to complete. (#28280)
+
+Dependencies
+------------
+
+- The dependency on Boost.Process has been replaced with cpp-subprocess, which is contained in source.
+Builders will no longer need Boost.Process to build with external signer support. (#28981)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+- 0xb10c
+- Alfonso Roman Zubeldia
+- Andrew Toth
+- AngusP
+- Anthony Towns
+- Antoine Poinsot
+- Anton A
+- Ava Chow
+- Ayush Singh
+- Ben Westgate
+- Brandon Odiwuor
+- brunoerg
+- bstin
+- Charlie
+- Christopher Bergqvist
+- Cory Fields
+- crazeteam
+- Daniela Brozzoni
+- David Gumberg
+- dergoegge
+- Edil Medeiros
+- Epic Curious
+- Fabian Jahr
+- fanquake
+- furszy
+- glozow
+- Greg Sanders
+- hanmz
+- Hennadii Stepanov
+- Hernan Marino
+- Hodlinator
+- ishaanam
+- ismaelsadeeq
+- Jadi
+- Jon Atack
+- josibake
+- jrakibi
+- kevkevin
+- kevkevinpal
+- Konstantin Akimov
+- laanwj
+- Larry Ruane
+- Lőrinc
+- Luis Schwab
+- Luke Dashjr
+- MarcoFalke
+- marcofleon
+- Marnix
+- Martin Saposnic
+- Martin Zumsande
+- Matt Corallo
+- Matthew Zipkin
+- Matt Whitlock
+- Max Edwards
+- Michael Dietz
+- Murch
+- nanlour
+- pablomartin4btc
+- Peter Todd
+- Pieter Wuille
+- @RandyMcMillan
+- RoboSchmied
+- Roman Zeyde
+- Ryan Ofsky
+- Sebastian Falbesoner
+- Sergi Delgado Segura
+- Sjors Provoost
+- spicyzboss
+- StevenMia
+- stickies-v
+- stratospher
+- Suhas Daftuar
+- sunerok
+- tdb3
+- TheCharlatan
+- umiumi
+- Vasil Dimov
+- virtu
+- willcl-ark
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/translation_process.md b/doc/translation_process.md
index e5ed7f4e0a..f4f0add54f 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -18,8 +18,8 @@ We use automated scripts to help extract translations in both Qt, and non-Qt sou
To automatically regenerate the `bitcoin_en.ts` file, run the following commands:
```sh
-cd src/
-make translate
+cmake -B build --preset dev-mode -DWITH_BDB=ON -DBUILD_GUI=ON
+cmake --build build --target translate
```
**Example Qt translation**
diff --git a/doc/translation_strings_policy.md b/doc/translation_strings_policy.md
index 1931302dda..4aa4969209 100644
--- a/doc/translation_strings_policy.md
+++ b/doc/translation_strings_policy.md
@@ -1,10 +1,8 @@
-Translation Strings Policy
-===========================
+# Translation Strings Policy
This document provides guidelines for internationalization of the Bitcoin Core software.
-How to translate?
-------------------
+## How to translate?
To mark a message as translatable
@@ -14,8 +12,7 @@ To mark a message as translatable
No internationalization is used for e.g. developer scripts outside `src`.
-Strings to be translated
--------------------------
+## Strings to be translated
On a high level, these strings are to be translated:
@@ -27,8 +24,7 @@ Do not translate technical or extremely rare errors.
Anything else that appears to the user in the GUI is to be translated. This includes labels, menu items, button texts, tooltips and window titles.
This includes messages passed to the GUI through the UI interface through `InitMessage`, `ThreadSafeMessageBox` or `ShowProgress`.
-General recommendations
-------------------------
+## General recommendations
### Avoid unnecessary translation strings
@@ -97,4 +93,4 @@ The second example reduces the number of pluralized words that translators have
During a string freeze (often before a major release), no translation strings are to be added, modified or removed.
-This can be checked by executing `make translate` in the `src` directory, then verifying that `bitcoin_en.ts` remains unchanged.
+This can be checked by building the `translate` target with `cmake` ([instructions](translation_process.md)), then verifying that `bitcoin_en.ts` remains unchanged.
diff --git a/doc/zmq.md b/doc/zmq.md
index 07c340fb99..0a74d6eef9 100644
--- a/doc/zmq.md
+++ b/doc/zmq.md
@@ -46,11 +46,10 @@ operation.
## Enabling
-By default, the ZeroMQ feature is automatically compiled in if the
-necessary prerequisites are found. To disable, use --disable-zmq
-during the *configure* step of building bitcoind:
+By default, the ZeroMQ feature is not automatically compiled.
+To enable, use `-DWITH_ZMQ=ON` when configuring the build system:
- $ ./configure --disable-zmq (other options)
+ $ cmake -B build -DWITH_ZMQ=ON
To actually enable operation, one must set the appropriate options on
the command line or in the configuration file.
@@ -163,7 +162,7 @@ Note that for `*block` topics, when the block chain tip changes,
a reorganisation may occur and just the tip will be notified.
It is up to the subscriber to retrieve the chain from the last known
block to the new tip. Also note that no notification will occur if the tip
-was in the active chain--as would be the case after calling invalidateblock RPC.
+was in the active chain, as would be the case after calling the `invalidateblock` RPC.
In contrast, the `sequence` topic publishes all block connections and
disconnections.
diff --git a/libbitcoinkernel.pc.in b/libbitcoinkernel.pc.in
new file mode 100644
index 0000000000..a2cb7d3692
--- /dev/null
+++ b/libbitcoinkernel.pc.in
@@ -0,0 +1,11 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=${prefix}
+libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
+includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
+
+Name: @PACKAGE_NAME@ kernel library
+Description: Experimental library for the Bitcoin Core validation engine.
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lbitcoinkernel
+Libs.private: -L${libdir} @LIBS_PRIVATE@
+Cflags: -I${includedir}
diff --git a/share/qt/Info.plist.in b/share/qt/Info.plist.in
index b4e6f6a150..5ff736152f 100644
--- a/share/qt/Info.plist.in
+++ b/share/qt/Info.plist.in
@@ -3,7 +3,7 @@
<plist version="0.9">
<dict>
<key>LSMinimumSystemVersion</key>
- <string>11</string>
+ <string>13</string>
<key>LSArchitecturePriority</key>
<array>
diff --git a/share/qt/extract_strings_qt.py b/share/qt/extract_strings_qt.py
index 39acec8942..4297143023 100755
--- a/share/qt/extract_strings_qt.py
+++ b/share/qt/extract_strings_qt.py
@@ -56,7 +56,7 @@ files = sys.argv[1:]
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
- print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
+ print('Please install package "gettext" and re-run \'cmake -B build\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','--from-code=utf-8','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
diff --git a/src/.clang-tidy b/src/.clang-tidy
index 3569dd04b1..1cf270833a 100644
--- a/src/.clang-tidy
+++ b/src/.clang-tidy
@@ -14,6 +14,7 @@ modernize-use-emplace,
modernize-use-equals-default,
modernize-use-noexcept,
modernize-use-nullptr,
+modernize-use-starts-ends-with,
performance-*,
-performance-avoid-endl,
-performance-enum-size,
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 18b2b5745d..4a86465bba 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -5,26 +5,18 @@
include(GNUInstallDirs)
include(AddWindowsResources)
-configure_file(${PROJECT_SOURCE_DIR}/cmake/bitcoin-config.h.in config/bitcoin-config.h @ONLY)
+configure_file(${PROJECT_SOURCE_DIR}/cmake/bitcoin-build-config.h.in bitcoin-build-config.h USE_SOURCE_PERMISSIONS @ONLY)
include_directories(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
-# TODO: After the transition from Autotools to CMake, the obj/ subdirectory
-# could be dropped as its only purpose was to separate a generated header
-# from source files.
add_custom_target(generate_build_info
- BYPRODUCTS ${PROJECT_BINARY_DIR}/src/obj/build.h
- COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/src/obj
- COMMAND ${CMAKE_COMMAND} -DBUILD_INFO_HEADER_PATH=${PROJECT_BINARY_DIR}/src/obj/build.h -DSOURCE_DIR=${PROJECT_SOURCE_DIR} -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateBuildInfo.cmake
- COMMENT "Generating obj/build.h"
+ BYPRODUCTS ${PROJECT_BINARY_DIR}/src/bitcoin-build-info.h
+ COMMAND ${CMAKE_COMMAND} -DBUILD_INFO_HEADER_PATH=${PROJECT_BINARY_DIR}/src/bitcoin-build-info.h -DSOURCE_DIR=${PROJECT_SOURCE_DIR} -P ${PROJECT_SOURCE_DIR}/cmake/script/GenerateBuildInfo.cmake
+ COMMENT "Generating bitcoin-build-info.h"
VERBATIM
)
add_library(bitcoin_clientversion OBJECT EXCLUDE_FROM_ALL
clientversion.cpp
)
-target_compile_definitions(bitcoin_clientversion
- PRIVATE
- HAVE_BUILD_INFO
-)
target_link_libraries(bitcoin_clientversion
PRIVATE
core_interface
@@ -49,13 +41,21 @@ set(SECP256K1_ENABLE_MODULE_RECOVERY ON CACHE BOOL "" FORCE)
set(SECP256K1_BUILD_BENCHMARK OFF CACHE BOOL "" FORCE)
set(SECP256K1_BUILD_TESTS ${BUILD_TESTS} CACHE BOOL "" FORCE)
set(SECP256K1_BUILD_EXHAUSTIVE_TESTS ${BUILD_TESTS} CACHE BOOL "" FORCE)
+if(NOT BUILD_TESTS)
+ # Always skip the ctime tests, if we are building no other tests.
+ # Otherwise, they are built if Valgrind is available. See SECP256K1_VALGRIND.
+ set(SECP256K1_BUILD_CTIME_TESTS ${BUILD_TESTS} CACHE BOOL "" FORCE)
+endif()
set(SECP256K1_BUILD_EXAMPLES OFF CACHE BOOL "" FORCE)
include(GetTargetInterface)
# -fsanitize and related flags apply to both C++ and C,
-# so we can pass them down to libsecp256k1 as CFLAGS.
+# so we can pass them down to libsecp256k1 as CFLAGS and LDFLAGS.
get_target_interface(core_sanitizer_cxx_flags "" sanitize_interface COMPILE_OPTIONS)
-set(SECP256K1_LATE_CFLAGS ${core_sanitizer_cxx_flags} CACHE STRING "" FORCE)
+set(SECP256K1_APPEND_CFLAGS ${core_sanitizer_cxx_flags} CACHE STRING "" FORCE)
unset(core_sanitizer_cxx_flags)
+get_target_interface(core_sanitizer_linker_flags "" sanitize_interface LINK_OPTIONS)
+set(SECP256K1_APPEND_LDFLAGS ${core_sanitizer_linker_flags} CACHE STRING "" FORCE)
+unset(core_sanitizer_linker_flags)
# We want to build libsecp256k1 with the most tested RelWithDebInfo configuration.
enable_language(C)
foreach(config IN LISTS CMAKE_BUILD_TYPE CMAKE_CONFIGURATION_TYPES)
@@ -75,7 +75,6 @@ add_subdirectory(secp256k1)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
string(APPEND CMAKE_C_COMPILE_OBJECT " ${APPEND_CPPFLAGS} ${APPEND_CFLAGS}")
-# Stable, backwards-compatible consensus functionality.
add_library(bitcoin_consensus STATIC EXCLUDE_FROM_ALL
arith_uint256.cpp
consensus/merkle.cpp
@@ -115,6 +114,8 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL
common/init.cpp
common/interfaces.cpp
common/messages.cpp
+ common/netif.cpp
+ common/pcp.cpp
common/run_command.cpp
common/settings.cpp
common/signmessage.cpp
@@ -293,7 +294,6 @@ target_link_libraries(bitcoin_node
Boost::headers
$<TARGET_NAME_IF_EXISTS:libevent::libevent>
$<TARGET_NAME_IF_EXISTS:libevent::pthreads>
- $<TARGET_NAME_IF_EXISTS:NATPMP::NATPMP>
$<TARGET_NAME_IF_EXISTS:MiniUPnPc::MiniUPnPc>
$<TARGET_NAME_IF_EXISTS:bitcoin_zmq>
$<TARGET_NAME_IF_EXISTS:USDT::headers>
@@ -326,6 +326,22 @@ if(WITH_MULTIPROCESS)
$<TARGET_NAME_IF_EXISTS:bitcoin_wallet>
)
list(APPEND installable_targets bitcoin-node)
+
+ if(BUILD_TESTS)
+ # bitcoin_ipc_test library target is defined here in src/CMakeLists.txt
+ # instead of src/test/CMakeLists.txt so capnp files in src/test/ are able to
+ # reference capnp files in src/ipc/capnp/ by relative path. The Cap'n Proto
+ # compiler only allows importing by relative path when the importing and
+ # imported files are underneath the same compilation source prefix, so the
+ # source prefix must be src/, not src/test/
+ add_library(bitcoin_ipc_test STATIC EXCLUDE_FROM_ALL
+ test/ipc_test.cpp
+ )
+ target_capnp_sources(bitcoin_ipc_test ${PROJECT_SOURCE_DIR}
+ test/ipc_test.capnp
+ )
+ add_dependencies(bitcoin_ipc_test bitcoin_ipc_headers)
+ endif()
endif()
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index e9838d7222..4637906441 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <addrdb.h>
@@ -73,7 +73,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
remove(pathTmp);
return false;
}
- if (!FileCommit(fileout.Get())) {
+ if (!fileout.Commit()) {
fileout.fclose();
remove(pathTmp);
LogError("%s: Failed to flush file %s\n", __func__, fs::PathToString(pathTmp));
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 673d25efcf..358d4fc0a8 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <addrman.h>
#include <addrman_impl.h>
@@ -188,7 +188,7 @@ void AddrManImpl::Serialize(Stream& s_) const
int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
s << nUBuckets;
- std::unordered_map<int, int> mapUnkIds;
+ std::unordered_map<nid_type, int> mapUnkIds;
int nIds = 0;
for (const auto& entry : mapInfo) {
mapUnkIds[entry.first] = nIds;
@@ -398,7 +398,7 @@ void AddrManImpl::Unserialize(Stream& s_)
}
}
-AddrInfo* AddrManImpl::Find(const CService& addr, int* pnId)
+AddrInfo* AddrManImpl::Find(const CService& addr, nid_type* pnId)
{
AssertLockHeld(cs);
@@ -413,11 +413,11 @@ AddrInfo* AddrManImpl::Find(const CService& addr, int* pnId)
return nullptr;
}
-AddrInfo* AddrManImpl::Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId)
+AddrInfo* AddrManImpl::Create(const CAddress& addr, const CNetAddr& addrSource, nid_type* pnId)
{
AssertLockHeld(cs);
- int nId = nIdCount++;
+ nid_type nId = nIdCount++;
mapInfo[nId] = AddrInfo(addr, addrSource);
mapAddr[addr] = nId;
mapInfo[nId].nRandomPos = vRandom.size();
@@ -438,8 +438,8 @@ void AddrManImpl::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
assert(nRndPos1 < vRandom.size() && nRndPos2 < vRandom.size());
- int nId1 = vRandom[nRndPos1];
- int nId2 = vRandom[nRndPos2];
+ nid_type nId1 = vRandom[nRndPos1];
+ nid_type nId2 = vRandom[nRndPos2];
const auto it_1{mapInfo.find(nId1)};
const auto it_2{mapInfo.find(nId2)};
@@ -453,7 +453,7 @@ void AddrManImpl::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
vRandom[nRndPos2] = nId1;
}
-void AddrManImpl::Delete(int nId)
+void AddrManImpl::Delete(nid_type nId)
{
AssertLockHeld(cs);
@@ -476,7 +476,7 @@ void AddrManImpl::ClearNew(int nUBucket, int nUBucketPos)
// if there is an entry in the specified bucket, delete it.
if (vvNew[nUBucket][nUBucketPos] != -1) {
- int nIdDelete = vvNew[nUBucket][nUBucketPos];
+ nid_type nIdDelete = vvNew[nUBucket][nUBucketPos];
AddrInfo& infoDelete = mapInfo[nIdDelete];
assert(infoDelete.nRefCount > 0);
infoDelete.nRefCount--;
@@ -488,7 +488,7 @@ void AddrManImpl::ClearNew(int nUBucket, int nUBucketPos)
}
}
-void AddrManImpl::MakeTried(AddrInfo& info, int nId)
+void AddrManImpl::MakeTried(AddrInfo& info, nid_type nId)
{
AssertLockHeld(cs);
@@ -515,7 +515,7 @@ void AddrManImpl::MakeTried(AddrInfo& info, int nId)
// first make space to add it (the existing tried entry there is moved to new, deleting whatever is there).
if (vvTried[nKBucket][nKBucketPos] != -1) {
// find an item to evict
- int nIdEvict = vvTried[nKBucket][nKBucketPos];
+ nid_type nIdEvict = vvTried[nKBucket][nKBucketPos];
assert(mapInfo.count(nIdEvict) == 1);
AddrInfo& infoOld = mapInfo[nIdEvict];
@@ -554,7 +554,7 @@ bool AddrManImpl::AddSingle(const CAddress& addr, const CNetAddr& source, std::c
if (!addr.IsRoutable())
return false;
- int nId;
+ nid_type nId;
AddrInfo* pinfo = Find(addr, &nId);
// Do not set a penalty for a source's self-announcement
@@ -627,7 +627,7 @@ bool AddrManImpl::Good_(const CService& addr, bool test_before_evict, NodeSecond
{
AssertLockHeld(cs);
- int nId;
+ nid_type nId;
m_last_good = time;
@@ -710,7 +710,7 @@ void AddrManImpl::Attempt_(const CService& addr, bool fCountFailure, NodeSeconds
}
}
-std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, const std::unordered_set<Network>& networks) const
{
AssertLockHeld(cs);
@@ -719,13 +719,18 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option
size_t new_count = nNew;
size_t tried_count = nTried;
- if (network.has_value()) {
- auto it = m_network_counts.find(*network);
- if (it == m_network_counts.end()) return {};
-
- auto counts = it->second;
- new_count = counts.n_new;
- tried_count = counts.n_tried;
+ if (!networks.empty()) {
+ new_count = 0;
+ tried_count = 0;
+ for (auto& network : networks) {
+ auto it = m_network_counts.find(network);
+ if (it == m_network_counts.end()) {
+ continue;
+ }
+ auto counts = it->second;
+ new_count += counts.n_new;
+ tried_count += counts.n_tried;
+ }
}
if (new_only && new_count == 0) return {};
@@ -753,14 +758,15 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option
// Iterate over the positions of that bucket, starting at the initial one,
// and looping around.
- int i, position, node_id;
+ int i, position;
+ nid_type node_id;
for (i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) {
position = (initial_position + i) % ADDRMAN_BUCKET_SIZE;
node_id = GetEntry(search_tried, bucket, position);
if (node_id != -1) {
- if (network.has_value()) {
+ if (!networks.empty()) {
const auto it{mapInfo.find(node_id)};
- if (Assume(it != mapInfo.end()) && it->second.GetNetwork() == *network) break;
+ if (Assume(it != mapInfo.end()) && networks.contains(it->second.GetNetwork())) break;
} else {
break;
}
@@ -786,7 +792,7 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::Select_(bool new_only, std::option
}
}
-int AddrManImpl::GetEntry(bool use_tried, size_t bucket, size_t position) const
+nid_type AddrManImpl::GetEntry(bool use_tried, size_t bucket, size_t position) const
{
AssertLockHeld(cs);
@@ -849,7 +855,7 @@ std::vector<std::pair<AddrInfo, AddressPosition>> AddrManImpl::GetEntries_(bool
std::vector<std::pair<AddrInfo, AddressPosition>> infos;
for (int bucket = 0; bucket < bucket_count; ++bucket) {
for (int position = 0; position < ADDRMAN_BUCKET_SIZE; ++position) {
- int id = GetEntry(from_tried, bucket, position);
+ nid_type id = GetEntry(from_tried, bucket, position);
if (id >= 0) {
AddrInfo info = mapInfo.at(id);
AddressPosition location = AddressPosition(
@@ -904,8 +910,8 @@ void AddrManImpl::ResolveCollisions_()
{
AssertLockHeld(cs);
- for (std::set<int>::iterator it = m_tried_collisions.begin(); it != m_tried_collisions.end();) {
- int id_new = *it;
+ for (std::set<nid_type>::iterator it = m_tried_collisions.begin(); it != m_tried_collisions.end();) {
+ nid_type id_new = *it;
bool erase_collision = false;
@@ -923,7 +929,7 @@ void AddrManImpl::ResolveCollisions_()
} else if (vvTried[tried_bucket][tried_bucket_pos] != -1) { // The position in the tried bucket is not empty
// Get the to-be-evicted address that is being tested
- int id_old = vvTried[tried_bucket][tried_bucket_pos];
+ nid_type id_old = vvTried[tried_bucket][tried_bucket_pos];
AddrInfo& info_old = mapInfo[id_old];
const auto current_time{Now<NodeSeconds>()};
@@ -969,11 +975,11 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::SelectTriedCollision_()
if (m_tried_collisions.size() == 0) return {};
- std::set<int>::iterator it = m_tried_collisions.begin();
+ std::set<nid_type>::iterator it = m_tried_collisions.begin();
// Selects a random element from m_tried_collisions
std::advance(it, insecure_rand.randrange(m_tried_collisions.size()));
- int id_new = *it;
+ nid_type id_new = *it;
// If id_new not found in mapInfo remove it from m_tried_collisions
if (mapInfo.count(id_new) != 1) {
@@ -1058,15 +1064,15 @@ int AddrManImpl::CheckAddrman() const
LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE(
strprintf("new %i, tried %i, total %u", nNew, nTried, vRandom.size()), BCLog::ADDRMAN);
- std::unordered_set<int> setTried;
- std::unordered_map<int, int> mapNew;
+ std::unordered_set<nid_type> setTried;
+ std::unordered_map<nid_type, int> mapNew;
std::unordered_map<Network, NewTriedCount> local_counts;
if (vRandom.size() != (size_t)(nTried + nNew))
return -7;
for (const auto& entry : mapInfo) {
- int n = entry.first;
+ nid_type n = entry.first;
const AddrInfo& info = entry.second;
if (info.fInTried) {
if (!TicksSinceEpoch<std::chrono::seconds>(info.m_last_success)) {
@@ -1208,11 +1214,11 @@ std::pair<CAddress, NodeSeconds> AddrManImpl::SelectTriedCollision()
return ret;
}
-std::pair<CAddress, NodeSeconds> AddrManImpl::Select(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrManImpl::Select(bool new_only, const std::unordered_set<Network>& networks) const
{
LOCK(cs);
Check();
- auto addrRet = Select_(new_only, network);
+ auto addrRet = Select_(new_only, networks);
Check();
return addrRet;
}
@@ -1315,9 +1321,9 @@ std::pair<CAddress, NodeSeconds> AddrMan::SelectTriedCollision()
return m_impl->SelectTriedCollision();
}
-std::pair<CAddress, NodeSeconds> AddrMan::Select(bool new_only, std::optional<Network> network) const
+std::pair<CAddress, NodeSeconds> AddrMan::Select(bool new_only, const std::unordered_set<Network>& networks) const
{
- return m_impl->Select(new_only, network);
+ return m_impl->Select(new_only, networks);
}
std::vector<CAddress> AddrMan::GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered) const
diff --git a/src/addrman.h b/src/addrman.h
index be2ee8c2cb..ba6e13bf97 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -15,6 +15,7 @@
#include <cstdint>
#include <memory>
#include <optional>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -154,12 +155,12 @@ public:
* an address from the new table or an empty pair. Passing `false` will return an
* empty pair or an address from either the new or tried table (it does not
* guarantee a tried entry).
- * @param[in] network Select only addresses of this network (nullopt = all). Passing a network may
+ * @param[in] networks Select only addresses of these networks (empty = all). Passing networks may
* slow down the search.
* @return CAddress The record for the selected peer.
* seconds The last time we attempted to connect to that peer.
*/
- std::pair<CAddress, NodeSeconds> Select(bool new_only = false, std::optional<Network> network = std::nullopt) const;
+ std::pair<CAddress, NodeSeconds> Select(bool new_only = false, const std::unordered_set<Network>& networks = {}) const;
/**
* Return all or many randomly selected addresses, optionally by network.
diff --git a/src/addrman_impl.h b/src/addrman_impl.h
index dd7f7b318f..a0390b7154 100644
--- a/src/addrman_impl.h
+++ b/src/addrman_impl.h
@@ -33,6 +33,13 @@ static constexpr int32_t ADDRMAN_BUCKET_SIZE_LOG2{6};
static constexpr int ADDRMAN_BUCKET_SIZE{1 << ADDRMAN_BUCKET_SIZE_LOG2};
/**
+ * User-defined type for the internally used nIds
+ * This used to be int, making it feasible for attackers to cause an overflow,
+ * see https://bitcoincore.org/en/2024/07/31/disclose-addrman-int-overflow/
+ */
+using nid_type = int64_t;
+
+/**
* Extended statistics about a CAddress
*/
class AddrInfo : public CAddress
@@ -125,7 +132,7 @@ public:
std::pair<CAddress, NodeSeconds> SelectTriedCollision() EXCLUSIVE_LOCKS_REQUIRED(!cs);
- std::pair<CAddress, NodeSeconds> Select(bool new_only, std::optional<Network> network) const
+ std::pair<CAddress, NodeSeconds> Select(bool new_only, const std::unordered_set<Network>& networks) const
EXCLUSIVE_LOCKS_REQUIRED(!cs);
std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered = true) const
@@ -179,36 +186,36 @@ private:
static constexpr uint8_t INCOMPATIBILITY_BASE = 32;
//! last used nId
- int nIdCount GUARDED_BY(cs){0};
+ nid_type nIdCount GUARDED_BY(cs){0};
//! table with information about all nIds
- std::unordered_map<int, AddrInfo> mapInfo GUARDED_BY(cs);
+ std::unordered_map<nid_type, AddrInfo> mapInfo GUARDED_BY(cs);
//! find an nId based on its network address and port.
- std::unordered_map<CService, int, CServiceHash> mapAddr GUARDED_BY(cs);
+ std::unordered_map<CService, nid_type, CServiceHash> mapAddr GUARDED_BY(cs);
//! randomly-ordered vector of all nIds
//! This is mutable because it is unobservable outside the class, so any
//! changes to it (even in const methods) are also unobservable.
- mutable std::vector<int> vRandom GUARDED_BY(cs);
+ mutable std::vector<nid_type> vRandom GUARDED_BY(cs);
// number of "tried" entries
int nTried GUARDED_BY(cs){0};
//! list of "tried" buckets
- int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
+ nid_type vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
//! number of (unique) "new" entries
int nNew GUARDED_BY(cs){0};
//! list of "new" buckets
- int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
+ nid_type vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
//! last time Good was called (memory only). Initially set to 1 so that "never" is strictly worse.
NodeSeconds m_last_good GUARDED_BY(cs){1s};
//! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions.
- std::set<int> m_tried_collisions;
+ std::set<nid_type> m_tried_collisions;
/** Perform consistency checks every m_consistency_check_ratio operations (if non-zero). */
const int32_t m_consistency_check_ratio;
@@ -225,22 +232,22 @@ private:
std::unordered_map<Network, NewTriedCount> m_network_counts GUARDED_BY(cs);
//! Find an entry.
- AddrInfo* Find(const CService& addr, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ AddrInfo* Find(const CService& addr, nid_type* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom.
- AddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ AddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, nid_type* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Swap two elements in vRandom.
void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) const EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Delete an entry. It must not be in tried, and have refcount 0.
- void Delete(int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void Delete(nid_type nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Clear a position in a "new" table. This is the only place where entries are actually deleted.
void ClearNew(int nUBucket, int nUBucketPos) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Move an entry from the "new" table(s) to the "tried" table
- void MakeTried(AddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void MakeTried(AddrInfo& info, nid_type nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Attempt to add a single address to addrman's new table.
* @see AddrMan::Add() for parameters. */
@@ -252,13 +259,13 @@ private:
void Attempt_(const CService& addr, bool fCountFailure, NodeSeconds time) EXCLUSIVE_LOCKS_REQUIRED(cs);
- std::pair<CAddress, NodeSeconds> Select_(bool new_only, std::optional<Network> network) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ std::pair<CAddress, NodeSeconds> Select_(bool new_only, const std::unordered_set<Network>& networks) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Helper to generalize looking up an addrman entry from either table.
*
- * @return int The nid of the entry. If the addrman position is empty or not found, returns -1.
+ * @return nid_type The nid of the entry. If the addrman position is empty or not found, returns -1.
* */
- int GetEntry(bool use_tried, size_t bucket, size_t position) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ nid_type GetEntry(bool use_tried, size_t bucket, size_t position) const EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<CAddress> GetAddr_(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered = true) const EXCLUSIVE_LOCKS_REQUIRED(cs);
diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt
index 61a1126904..8a52980e07 100644
--- a/src/bench/CMakeLists.txt
+++ b/src/bench/CMakeLists.txt
@@ -3,12 +3,11 @@
# file COPYING or https://opensource.org/license/mit/.
include(GenerateHeaders)
-generate_header_from_raw(data/block413567.raw)
+generate_header_from_raw(data/block413567.raw benchmark::data)
add_executable(bench_bitcoin
bench_bitcoin.cpp
bench.cpp
- data.cpp
nanobench.cpp
${CMAKE_CURRENT_BINARY_DIR}/data/block413567.raw.h
# Benchmarks:
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index c0ef7b2279..ceef6c29ab 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -133,7 +133,7 @@ static void AddrManSelectByNetwork(benchmark::Bench& bench)
FillAddrMan(addrman);
bench.run([&] {
- (void)addrman.Select(/*new_only=*/false, NET_I2P);
+ (void)addrman.Select(/*new_only=*/false, {NET_I2P});
});
}
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 580265fc52..9558d64f19 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <bench/data.h>
+#include <bench/data/block413567.raw.h>
#include <chainparams.h>
#include <common/args.h>
#include <consensus/validation.h>
diff --git a/src/bench/cluster_linearize.cpp b/src/bench/cluster_linearize.cpp
index de85741909..7d011975dd 100644
--- a/src/bench/cluster_linearize.cpp
+++ b/src/bench/cluster_linearize.cpp
@@ -4,7 +4,9 @@
#include <bench/bench.h>
#include <cluster_linearize.h>
+#include <test/util/cluster_linearize.h>
#include <util/bitset.h>
+#include <util/strencodings.h>
#include <algorithm>
#include <cassert>
@@ -12,6 +14,7 @@
#include <vector>
using namespace cluster_linearize;
+using namespace util::hex_literals;
namespace {
@@ -25,7 +28,7 @@ DepGraph<SetType> MakeLinearGraph(ClusterIndex ntx)
DepGraph<SetType> depgraph;
for (ClusterIndex i = 0; i < ntx; ++i) {
depgraph.AddTransaction({-int32_t(i), 1});
- if (i > 0) depgraph.AddDependency(i - 1, i);
+ if (i > 0) depgraph.AddDependencies(SetType::Singleton(i - 1), i);
}
return depgraph;
}
@@ -40,13 +43,13 @@ DepGraph<SetType> MakeWideGraph(ClusterIndex ntx)
DepGraph<SetType> depgraph;
for (ClusterIndex i = 0; i < ntx; ++i) {
depgraph.AddTransaction({int32_t(i) + 1, 1});
- if (i > 0) depgraph.AddDependency(0, i);
+ if (i > 0) depgraph.AddDependencies(SetType::Singleton(0), i);
}
return depgraph;
}
-// Construct a difficult graph. These need at least sqrt(2^(n-1)) iterations in the best
-// known algorithms (purely empirically determined).
+// Construct a difficult graph. These need at least sqrt(2^(n-1)) iterations in the implemented
+// algorithm (purely empirically determined).
template<typename SetType>
DepGraph<SetType> MakeHardGraph(ClusterIndex ntx)
{
@@ -67,19 +70,19 @@ DepGraph<SetType> MakeHardGraph(ClusterIndex ntx)
depgraph.AddTransaction({1, 2});
} else if (i == 1) {
depgraph.AddTransaction({14, 2});
- depgraph.AddDependency(0, 1);
+ depgraph.AddDependencies(SetType::Singleton(0), 1);
} else if (i == 2) {
depgraph.AddTransaction({6, 1});
- depgraph.AddDependency(2, 1);
+ depgraph.AddDependencies(SetType::Singleton(2), 1);
} else if (i == 3) {
depgraph.AddTransaction({5, 1});
- depgraph.AddDependency(2, 3);
+ depgraph.AddDependencies(SetType::Singleton(2), 3);
} else if ((i & 1) == 0) {
depgraph.AddTransaction({7, 1});
- depgraph.AddDependency(i - 1, i);
+ depgraph.AddDependencies(SetType::Singleton(i - 1), i);
} else {
depgraph.AddTransaction({5, 1});
- depgraph.AddDependency(i, 4);
+ depgraph.AddDependencies(SetType::Singleton(i), 4);
}
} else {
// Even cluster size.
@@ -95,33 +98,34 @@ DepGraph<SetType> MakeHardGraph(ClusterIndex ntx)
depgraph.AddTransaction({1, 1});
} else if (i == 1) {
depgraph.AddTransaction({3, 1});
- depgraph.AddDependency(0, 1);
+ depgraph.AddDependencies(SetType::Singleton(0), 1);
} else if (i == 2) {
depgraph.AddTransaction({1, 1});
- depgraph.AddDependency(0, 2);
+ depgraph.AddDependencies(SetType::Singleton(0), 2);
} else if (i & 1) {
depgraph.AddTransaction({4, 1});
- depgraph.AddDependency(i - 1, i);
+ depgraph.AddDependencies(SetType::Singleton(i - 1), i);
} else {
depgraph.AddTransaction({0, 1});
- depgraph.AddDependency(i, 3);
+ depgraph.AddDependencies(SetType::Singleton(i), 3);
}
}
}
return depgraph;
}
-/** Benchmark that does search-based candidate finding with 10000 iterations.
+/** Benchmark that does search-based candidate finding with a specified number of iterations.
*
- * Its goal is measuring how much time every additional search iteration in linearization costs.
+ * Its goal is measuring how much time every additional search iteration in linearization costs,
+ * by running with a low and a high count, subtracting the results, and divided by the number
+ * iterations difference.
*/
template<typename SetType>
-void BenchLinearizePerIterWorstCase(ClusterIndex ntx, benchmark::Bench& bench)
+void BenchLinearizeWorstCase(ClusterIndex ntx, benchmark::Bench& bench, uint64_t iter_limit)
{
const auto depgraph = MakeHardGraph<SetType>(ntx);
- const auto iter_limit = std::min<uint64_t>(10000, uint64_t{1} << (ntx / 2 - 1));
uint64_t rng_seed = 0;
- bench.batch(iter_limit).unit("iters").run([&] {
+ bench.run([&] {
SearchCandidateFinder finder(depgraph, rng_seed++);
auto [candidate, iters_performed] = finder.FindCandidateSet(iter_limit, {});
assert(iters_performed == iter_limit);
@@ -132,11 +136,12 @@ void BenchLinearizePerIterWorstCase(ClusterIndex ntx, benchmark::Bench& bench)
*
* Its goal is measuring how much time linearization may take without any search iterations.
*
- * If P is the resulting time of BenchLinearizePerIterWorstCase, and N is the resulting time of
- * BenchLinearizeNoItersWorstCase*, then an invocation of Linearize with max_iterations=m should
- * take no more than roughly N+m*P time. This may however be an overestimate, as the worst cases
- * do not coincide (the ones that are worst for linearization without any search happen to be ones
- * that do not need many search iterations).
+ * If P is the benchmarked per-iteration count (obtained by running BenchLinearizeWorstCase for a
+ * high and a low iteration count, subtracting them, and dividing by the difference in count), and
+ * N is the resulting time of BenchLinearizeNoItersWorstCase*, then an invocation of Linearize with
+ * max_iterations=m should take no more than roughly N+m*P time. This may however be an
+ * overestimate, as the worst cases do not coincide (the ones that are worst for linearization
+ * without any search happen to be ones that do not need many search iterations).
*
* This benchmark exercises a worst case for AncestorCandidateFinder, but for which improvement is
* cheap.
@@ -190,7 +195,7 @@ void BenchMergeLinearizationsWorstCase(ClusterIndex ntx, benchmark::Bench& bench
DepGraph<SetType> depgraph;
for (ClusterIndex i = 0; i < ntx; ++i) {
depgraph.AddTransaction({i, 1});
- if (i) depgraph.AddDependency(0, i);
+ if (i) depgraph.AddDependencies(SetType::Singleton(0), i);
}
std::vector<ClusterIndex> lin1;
std::vector<ClusterIndex> lin2;
@@ -205,14 +210,57 @@ void BenchMergeLinearizationsWorstCase(ClusterIndex ntx, benchmark::Bench& bench
});
}
+template<size_t N>
+void BenchLinearizeOptimally(benchmark::Bench& bench, const std::array<uint8_t, N>& serialized)
+{
+ // Determine how many transactions the serialized cluster has.
+ ClusterIndex num_tx{0};
+ {
+ SpanReader reader{serialized};
+ DepGraph<BitSet<128>> depgraph;
+ reader >> Using<DepGraphFormatter>(depgraph);
+ num_tx = depgraph.TxCount();
+ assert(num_tx < 128);
+ }
+
+ SpanReader reader{serialized};
+ auto runner_fn = [&]<typename SetType>() noexcept {
+ DepGraph<SetType> depgraph;
+ reader >> Using<DepGraphFormatter>(depgraph);
+ uint64_t rng_seed = 0;
+ bench.run([&] {
+ auto res = Linearize(depgraph, /*max_iterations=*/10000000, rng_seed++);
+ assert(res.second);
+ });
+ };
+
+ if (num_tx <= 32) {
+ runner_fn.template operator()<BitSet<32>>();
+ } else if (num_tx <= 64) {
+ runner_fn.template operator()<BitSet<64>>();
+ } else if (num_tx <= 96) {
+ runner_fn.template operator()<BitSet<96>>();
+ } else if (num_tx <= 128) {
+ runner_fn.template operator()<BitSet<128>>();
+ } else {
+ assert(false);
+ }
+}
+
} // namespace
-static void LinearizePerIter16TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<16>>(16, bench); }
-static void LinearizePerIter32TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<32>>(32, bench); }
-static void LinearizePerIter48TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<48>>(48, bench); }
-static void LinearizePerIter64TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<64>>(64, bench); }
-static void LinearizePerIter75TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<75>>(75, bench); }
-static void LinearizePerIter99TxWorstCase(benchmark::Bench& bench) { BenchLinearizePerIterWorstCase<BitSet<99>>(99, bench); }
+static void Linearize16TxWorstCase20Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<16>>(16, bench, 20); }
+static void Linearize16TxWorstCase120Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<16>>(16, bench, 120); }
+static void Linearize32TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<32>>(32, bench, 5000); }
+static void Linearize32TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<32>>(32, bench, 15000); }
+static void Linearize48TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<48>>(48, bench, 5000); }
+static void Linearize48TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<48>>(48, bench, 15000); }
+static void Linearize64TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<64>>(64, bench, 5000); }
+static void Linearize64TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<64>>(64, bench, 15000); }
+static void Linearize75TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<75>>(75, bench, 5000); }
+static void Linearize75TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<75>>(75, bench, 15000); }
+static void Linearize99TxWorstCase5000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<99>>(99, bench, 5000); }
+static void Linearize99TxWorstCase15000Iters(benchmark::Bench& bench) { BenchLinearizeWorstCase<BitSet<99>>(99, bench, 15000); }
static void LinearizeNoIters16TxWorstCaseAnc(benchmark::Bench& bench) { BenchLinearizeNoItersWorstCaseAnc<BitSet<16>>(16, bench); }
static void LinearizeNoIters32TxWorstCaseAnc(benchmark::Bench& bench) { BenchLinearizeNoItersWorstCaseAnc<BitSet<32>>(32, bench); }
@@ -242,12 +290,84 @@ static void MergeLinearizations64TxWorstCase(benchmark::Bench& bench) { BenchMer
static void MergeLinearizations75TxWorstCase(benchmark::Bench& bench) { BenchMergeLinearizationsWorstCase<BitSet<75>>(75, bench); }
static void MergeLinearizations99TxWorstCase(benchmark::Bench& bench) { BenchMergeLinearizationsWorstCase<BitSet<99>>(99, bench); }
-BENCHMARK(LinearizePerIter16TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter32TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter48TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter64TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter75TxWorstCase, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LinearizePerIter99TxWorstCase, benchmark::PriorityLevel::HIGH);
+// The following example clusters were constructed by replaying historical mempool activity, and
+// selecting for ones that take many iterations (after the introduction of some but not all
+// linearization algorithm optimizations).
+
+/* 2023-05-05T23:12:21Z 71, 521780, 543141,*/
+static constexpr auto BENCH_EXAMPLE_00 = "801081a5360092239efc6201810982ab58029b6b98c86803800eed7804800ecb7e058f2f878778068030d43407853e81902a08962a81d176098010b6620a8010b2280b8010da3a0c9f069da9580d800db11e0e9d719ad37a0f967897ed5210990e99fc0e11812c81982012804685823e0f0a893982b6040a10804682c146110a6e80db5c120a8010819806130a8079858f0c140a8054829a120c12803483a1760c116f81843c0d11718189000e11800d81ac2c0f11800d81e50e10117181c77c1111822e87f2601012815983d17211127180f2121212811584a21e1312800e80d1781412813c83e81815126f80ef5016126f80ff6c16126f80f66017126e80fd541812800d81942a1912800e80dd781a12800d81f96c1b12805282e7581b127180fd721c1271a918230b805fc11a220d8118a15a2d036f80e5002011817684d8241e346f80e1181c37805082fc04260024800d81f8621734803382b354270b12805182ca2e162f800e80d52e0d32803dc360201b850e818c400b318c49808a5a290210805181d65823142a800d81a34e0850800e81fb3c0851886994fc0a280b00082c805482d208032e28805e83ba380059801081cd4a0159811884f770002e0015e17280e49024300a0000000000000031803dcb48014200"_hex_u8;
+/* 2023-12-06T09:30:01Z 81, 141675, 647053,*/
+static constexpr auto BENCH_EXAMPLE_01 = "b348f1fc4000f365818a9e2c01b44cf7ca0002b004f0b02003b33ef8ae3004b334f9e87005800d81c85e06b368fae26007b05ef2e14208be1a8093a50409b15cf5ee500a802c80a1420b802dea440c802ce50a0d802cdc320e802cd7220f802dd72210805380f74a118174f370126e96b32812127182c4701312817389d26414128035848c221512800e82bf3816126f81e4341712801082b228181280518af57418128040859a0019127182d0401a12803e858b641b127182c4421c126f82b3481d12811486b6301e12821d89e7281f126e8a8b421f127182d6642012806284c12021126e81d34822126e86a76222126e86d8102212805187b6542312800d82fc002412803d848e0e2512801082d27a26126e8589642612800e83a9602712800e83bd0028126e81ef1a29116e858d7228126f82db5e2912801083843c2a127181c93c2b126e85d0162b127181c5622c126e84f8262c12800f8392202d12800e82b66c2e126e81d0082f12803282d50430126e84f9003012805f84be6c3112846e88df0e2b12804080d44c340a8b31898808350a800ed760350b801083a1182b517182817e2a51800e82b6582951803583cb52420030806284cb6c204f7181d300204f82688ce0303e001d800e82bb200f488010808a182822a3289cd63041000a6fcd100a408a7caaa7024800002f803584e0741e27288f3386dd783b001000802683f27e004b8c44bcd0763f0000000000000000000100000e00"_hex_u8;
+/* 2023-04-04T00:26:50Z 90, 99930, 529375,*/
+static constexpr auto BENCH_EXAMPLE_02 = "815b80b61e00800da63001cd378da70e028010991a03800e9d3e0480109708058010991a068010973a07da738fa72408de7491831009b35b88f0080a9d4485de180b71974e0c71974e0d80108e500eb27988a75a0f719632108061a56c11801087761280108a1413807893441480538c1415a606828806168010893e1780548c40188e4b80bb2c196eab3e1718805ed60e18188051c97a19188010cf781a1871b11e1b1871c5281c1880508080581d186e80b13c1e188035cf421f18805fe0482018804caa661f198035a9001f156e80cb701d1871a2281e1871ad281f18817380a16020186f98642118805ee04821198010b6702219800ea12623196eb67024198035808b0025196fa65c26198054ba1c2719807680bf7c28198053cd782919803d80b80429198051db5a2a198040d3742b19976584bb1c28196efc1c281971b21a29198052bc762a1971a2502b196eb73c2c19976381ab0c2a18806290543409862081c3423b00336fbc70224d80109e7c1c52805ebd5c1942800eb57016468034ba423405158118da28350416927480f4743000159f6a81c9462e00188051ec5e380e00800e9e420775800d9e26007c906c82f754251d0025870480f12c14280023800d9e26027e9e1385ed08102900001a804fac7a018001719856028001800da87e0180039b1a868b60064102246e9f42018005800da87e028005850d81d600026d862381a2200e0008230015831480a5480342000524803eeb32006e873582a4700a0100351300"_hex_u8;
+/* 2023-05-08T15:51:59Z 87, 76869, 505222,*/
+static constexpr auto BENCH_EXAMPLE_03 = "c040b9e15a00b10eac842601805f85931802c104bae17403ae50aaa336049d76a9bf7005c55bbeab6606ae2aa9c72c07805e81992e08af7dab817a096e80a7e4520909803e92bd780a097185c76c0b096e98e7380b09850bb9953c0c09803389f6260d096f859d620e09803f88d3000f0971829c6e1009837690f6481109806285931811097181f56814076ea09b74120980408eb73213096f87853214096f86e2701509803f8c860016098a6fe6c3721709814f92a204180980628a8a441909803285df681a0980348498661b096e8290781c096e978e081c097187da1a1d097186c05c1e097185893c1f09805f8ad9002009800d84e74e21097183a67a22097182e23423097184b53a23096ea393062309840faddd46240980618eb732250980548bee6a2609807986883c2709718298402809815388b6582909805384ec742a097181b9142b096e97b5262b096e85e14e2c0980518abb5c2d09805489e75a2e09803187e3382f097180eb1c34046f87c34a2f098309a5c54430097186911831098054899c083209801083bc1033097081e02a3409805f848f0c35096e80d4343a057180c37040006f80a22438097180a0503f03816f8381444003803f80ef003f05800580a4283f066ef72845016efb91663e09923d808d8216470041803584837c46012f9247dc86684501268267a09610450222862184db68440712803585ea40440113835d97887805800b8723c7a40a4b00022f81529ae2143c0c1f80548b8f381b311980408e955c055e802589dc10037e801083b54602658010848130006700"_hex_u8;
+/* 2023-05-01T19:32:10Z 35, 55747, 504128,*/
+static constexpr auto BENCH_EXAMPLE_04 = "801af95c00801af72801801af95c02873e85f2180202873e85f2180202873e85f21802028018fb2802068018fb2803068018fb2804068018fb2805068018fb2806068018fb2807068018fb2808068018fb2809068018fb280a068018fb280a058018fb280b058018fb280c058018fb280d058018fb280e058018fb280f058018fb2810058018fb2811058018fb2812058018fb2813058018fb2814058018fb2815058018fb2815048018fb2816048018fb2817048018fb2818048018fb2819048018fb281a048018fb281b04810d80d9481f00000100"_hex_u8;
+/* 2023-02-27T17:06:38Z 60, 55680, 502749,*/
+static constexpr auto BENCH_EXAMPLE_05 = "b5108ab56600b26d89f85601b07383b01602b22683c96003b34a83d82e04b12f83b53a05b20e83c75a066e80840a06068040be0007066fb10608066fb2120906800eba320a06842b80b05a0a066eff420b067199300b068124c3140c0680618085180d066faa1c0e068010b4440f068051af541006800da1781106857881946812066eee1613068052b31014068324808d361506806180885c150671b03216066ef11017068052b63218066ef3521806803f80865419066e93441a068035a13e1b0680628085181c06806ec4481d068117e72c1e06719c721f068077c42420068159808d1821066eef0c21058010b90022056f9908230571993024058010b00a25058010b00a260580608087402705803fc10027068032b42828068051b6322906800db11e212a8324808d361933803ff400192f826381a7141a2f8032ac08152a800db54c044e8323808d3630010002018158d84000042d821cea12002807853580d462002d01891181d022002e00"_hex_u8;
+/* 2023-04-20T22:25:49Z 99, 49100, 578622,*/
+static constexpr auto BENCH_EXAMPLE_06 = "bf3c87c14c008010955a01b21d85e07002800d946c036e8e3404b77f86c26605b33c85f55e06bd06879852078010970a08bd4b87cf00098123a7720ab2158687680b8054d4440b0a8062fa4c0c0a71ac400d0a80628081540e0a8010a2580f0a8054b676100a8032b85c110a6e9a40120a6e809012130a817f80c31e140a8175808674150a719d46160a8172d86415098033c1481609800da4181709800ada2e1809803dc85219098034b4041a096ef5501b098052d67c1c098051d3281d09800ebc4a1e098175808c641f098061c55020098078c85021096e8081141f0b6faf1e200b8061da68210b8062f000220b800ebc20230b8035d058240b8053de32250b8050b610250b6fad32260b803dc276270b803d80a610280b6ef812290b8052b6322a0b800eb57e2b0b8052bd062c0b719e522d0b71a3762e0b8010bb1e2f0b80109a78310a80109962320a8051a60c330a6f9f3e320b6e808b24330b719e40340b8117cc50350b803d80971a360b8051b930370b6f9e0a380b719b10390b8052a6003a0b6e808c76390a7195603a0a6f935c3b0a8054a31a3c0a803ce30c3b0b803fa3003c0b800dbe2a3d0b8f3480a84244058005851a44069d1bf824400b83098f284507719c723d4f6f9c1c3449719c722f4f6eb23c304f8061c5502e528061da682b4e8118bb724e022a8054b35028476e941c1d51815be02c4f01148557808e3a4f070e8104af464e001180329d364e010d805f9f6a421b9c3387aa744c0d4d71ac400b800881748098444710338173809b780b80008054d444292c12821dc040550403078b4682b4664517003f00"_hex_u8;
+/* 2023-06-05T19:56:12Z 52, 44896, 540514,*/
+static constexpr auto BENCH_EXAMPLE_07 = "b317998a4000b40098d53e01b45b99814802b7289b940003b3699a9d1204b6619a807a05814682cb78050571d854060571d8540705800e808d7a0805803480c06a09056e8189280a056ffd060b05800d80ea7a0c05803c80b80c0c03803e80d86e0d036ed2280e03811581804a0f036fd34e1003805380eb6811036e81f60e12038010ec101204805f80e83a13048033809534140471e00a15048010f95816046e81fa301704805180a74c1705800d808f1018056fd55c1905800e8091481a056e80a76e1b05805f80e2741c0571809b021c05826382c8401d0571df201e05800e809d2c1f05850083e87c1f05811580af68200571f20a21056ff9042205803e80df1e23056e81956c24056e9f542604805180e83829000e800e8080621325803380b0402a020d6ef8100e2c8c4889a96a2c000f803580ce4c2c000b6e9f54062a803480c96406260500"_hex_u8;
+/* 2023-12-05T23:48:44Z 69, 44283, 586734,*/
+static constexpr auto BENCH_EXAMPLE_08 = "83728ce80000b90befca1001806083b24002b40de6da3203b545e9c35c04b34beede3005b068e8883006d41c80b1e14c07b337e7841208b26beadb2e096e83892e090980518487380a096e82815c0a096e81ce3c0b097181db200c097181d4020d09810084ed600e096e96b0100f0971819a0210086e93da2e0f09803583ee5e1009803583c66c1109800d82bb6e1209800d81d56a1309803c82e622140971819f521509803d84a55c15057181d6161605806283ac5217056e949c5a18056e89e8641806815889e23419067181de321a066e8af2641a076e82a70a1b07803583f2081c076f81e76e1d076e81d33e1e07800d83b8761e086e82a5541f087181de302008805f84ad0021086e81c74022086e81bd3e23086e9288182408806184b3102409803283816025096e91ed662609830a88e70827096e81d14a27097181ce6028096e8cf03829097181883832016f81835c3103806181e0103203804180b8103204863584fe183304800de66434046e9e4c34056e81d6742f429213c0eb602e3d6483b06c283a6e81d73c263d6e82f9581831805485ab360e37805080c62609398b3189880838010603916db1f3583a03000110873199f8623c000000011100"_hex_u8;
+/* 2023-04-14T19:36:52Z 77, 20418, 501117,*/
+static constexpr auto BENCH_EXAMPLE_09 = "bf2989d00400815bca5c01af1e86f97602800d9d6c03800d8a3404b47988866e05b36287f92e0680109f68078010991a08805ecf1208076e80933e09078062d01c0a078054b6760b078053b6760c076f9c1c0d078054b6760e0771af260f0771b17e10078032f57011078035d56812078054e1581307886b83dc301407817480d13013068005a6001406803d80821a15066ef3201606800ea2181706800da628180671ab1219068054db0c1a06719b001b06815b80a11c1c068050b9301d066fac2a1e068033ab481f06719b1020068035ab721e07803dc2761f0771ae3c20078040f60e210771ce282207800ea4322307882a81a66024078035ad4625076efe7e26078162808e1827078118bb7228076eac7428088010bf58290871a04c2a0871bc722b086fa8382c08803d80a0142d088035d6282e088051c30c2f086efc623008800d9f6231086f986432088117bb7237028010a63034068010c84e2740800ea64c2237832c80933e1f3b830880c454390208813c80955c3905068032c73611348010a03c093c837a808a101b278050ac34093a8051ac34291b8f3b8187401d28881a82cb3a3a0a37977b86d20843000028996686a7083f030f8078d3761b27106e995a08499070839b5a1131000b00"_hex_u8;
+/* 2023-11-07T17:59:35Z 48, 4792, 498995,*/
+static constexpr auto BENCH_EXAMPLE_10 = "875f89aa1000b51ec09d7201c55cc7a72e02a11aa1fb3203b233a7f95204800ef56205b33ea9d13006803e80b26e07d90ec9dd4008b45eabbe6c09806080ca000a815984e8680a0a6f80925e0a0a803f80e1660c09937c94b7420d086e82f5640a086e80997e0b086f808d320c08800580a5640d086f8089100e08804080c9060f088115819a1c10086e82961a0f0a805f81bc0a100a6ff826110a6ef53e120a807584c60c110a6e818f32120a803c81c246130a805481d508140a8159838410150a7180a55c160a6f80821c170a6fe6101c066fe6101d06805080f854190a6e81b27c1a0a8155819c701e06805180ae0c21046e8b9a222501805180f53422001680f26880f8a62a220116803580da582007058153838e6e21000c800d80a712033a807681ae1c23000308834a82d36023020205815981e03a051a08001700"_hex_u8;
+/* 2023-11-16T10:47:08Z 77, 473962, 486863,*/
+static constexpr auto BENCH_EXAMPLE_11 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801980c06008801980c06009801980c0600a801980c0600b801980c0600c801980c0600d801980c0600e801980c0600f801980c060108019d12c11800f80b1601111800f80b1601111801080b1601111800f80b160100e800f80b160100f801980c060110f800f80b160140d801180b1601111801180b160100d801180b160120c801180b1600f10801180b1600f11801980c0601011800f80b160140e800f80b160110f801980c060170a801180b1601210801980c060140f800f80b1601311801980c0602005801180b1601f07800f80b1601b0c800fca7c1611812081f9601638812081f9601637812081fb001636801080b160142f801980c0600e2a801080b1600f2a801180b1600d25801980c0600e25800f80b1600d27801980c0600e27801980c0600d27801180b1600e26812080b1500c27812081f960201025812081f960200f27812081fc201d101c812081fc201d101d812081fc201d0f1f812081fc201d0f20812081f9601b1016800f80b1600a35800f80b1600a36800f80b1600e32801080b160122f812081f960280040812081fc20121d1b812081f960112713812081f960160d37812081fc20140d2b812081f960130d2d812081fc20130c2c812081fb001b0157812081fb001a0245812081fc20140030812081fc20092747812081fb000b152500"_hex_u8;
+/* 2023-10-06T20:44:09Z 40, 341438, 341438,*/
+static constexpr auto BENCH_EXAMPLE_12 = "80318f4c0080318f4c0180318f4c0280318f4c0380318f4c0480318f4c0580318f4c0680318f4c078033a57807078033a57807078033a57807078033a57807078033a57807078033a57807078033a57807078033a578070780318f4c0e0180318f4c0d0380318f4c0c0580318f4c0b078033a57803128033a57803128033a57803128033a578031280318f4c0412810b9c28140300810c9c281303028033a57802188033a57802188033a5780218810c9c280b01108033a578001c810c9c2807050f8033a578001b810c98040700158033a578001c810c98040301158033a5780019806ca1240101118033a578001300"_hex_u8;
+/* 2023-11-15T21:40:46Z 96, 23608, 138286,*/
+static constexpr auto BENCH_EXAMPLE_13 = "8060829f4000b157bab07a01b27cc2b16802b22fbce54603826480a95804803da81a05bc7bcac93806800de55207800daf0608805bc71809805bc7180a800d9d4a0b805bbc700c8152d7180d805bb9380e850a8886260f800d80d33410bf38d3d55011b41dc4eb6012bd70d2ce2e138d3596af7812137180cd501313805e81f7281413718092001513803d81f90016136e8b916c1713801081861a17106e80cd2a18106f80cc3c19106e80cf161911800d80fe781b107180d87c1c106e80fb081d10803e8286701d11800d81c4781f10804082a6002010801081912e21107180ff0021116e81da4a2310850b8b864023116e89db3224116e84ff7e2610897c95993427106f80bb1a240b803581c272250b8032828c10260b6e80d42a270b804082b35a280b800d80fe3e290b805cc0282312821d8697022b0b6e8add562c0b805281c8063007811883f1082313800d80fe3e24137180c9142513800d8380102613803382c00e2713805eb32228136e8494542913800e8186742913806082b74c2a1380528285782b13800d818f7a2c136e84a5562d1380508286702e136f80a46e3e04803f8191364102805481ad4c3d076e809a5a3e077180fe4032136e838b7233138c4790cf384106853584ab624206805b80932a4801806280966c48028168ef04400b7181bd524903806282db5c375b9316acbf703a599c68c5a454385c6e81d63e364a6f80ff64334e817485a6784f023171819536234e800d81826e1e498053829a12420018834c87cb14291d2e840e8bc94c1d2825800d81b7220368811783fe0e271f1f811783e758380f001ecd55809edf6e56000000003a815984ba76008010d54d80aebb4e2c22000000000000002c807682f150007a00"_hex_u8;
+/* 2023-12-06T09:18:20Z 93, 68130, 122830,*/
+static constexpr auto BENCH_EXAMPLE_14 = "b26beadb2e00800d80ca0a01d41c80b1e14c02b068e8883003800d81af1604b34beede30056e80b14006b151f5d46c07b93e8085b02608b30cf98b1009b14ef6b3040ab176f6ab480bb7078082b8640c800d81c6460d802c80a8080e802c80a8080f802c80a14210802ce50a11802cd722127181ce6012126e81d14a13126e9b8b00141282428dd42c15128051828408150e6e81bd3e150f805f84ad00160f7181de30170f6e81c740180f800d83b876190f6e82a5541a0f6e81d33e1a106e82a70a1b106f81e76e1c10803583f2081d106e82d9401e106e96e4441f107181de321e12815889e2341f127182d60c20126e979d4e21126e8282262410800d82972c25106f838a5822126f82842a23127182d24a2412803e84bc2a2512800d83c81a26126e84f8142712805085a22c27126e889e6a2812801083aa50281280348598102912801082d5522a126e85865c2b127182c7602b1282468c82042c126e84972c2d12805485d93a2d12801083c7322e12815386e1582f126e84fb0c30126f82eb6c3011813a85b47a3111803f869f5c3211805181ed30370d6e84bf0a3411804180e1383809815883aa183a08815a8392203e05807681f140380c6e9e4c4005805485ab363255805183856030406e82f9582c45805185c1001b4f82418df1001a4e803283c50e430026800d83a6201a4b836886be3044010b8b318988084c0101803183a6120776800d828a1e087682338ae050301c33873199f8624d010032813986bc663c1034800d83a5220a6f800d82be52048000805183e364084907800d83cc4a018005815987b41e1832000017884b9dce72035035803284c11e00800885769d9538192f0000000002001000"_hex_u8;
+/* 2023-12-14T02:02:29Z 55, 247754, 247754,*/
+static constexpr auto BENCH_EXAMPLE_15 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801980c06008801980c06009801980c0600a801980c0600b801980c0600c801980c0600d801980c0600e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600e0e801180b1600d07801180b1600f06801180b1600c0a801180b1600f08801180b1600c0c801180b1600c0d801180b1600c0e801180b160100b801180b1601309812081fc200e2a812081fc200e29812081fc200e28812081fc200e0e18812081fc200e0e17801980c060042e812081fc200e0d07812081fc200e0d08812081fc200e0c0a812081fc200e0d0a801980c060081e812081fc200f0c0c812081fc200f0c0d812081fc200f0c0e801180b160083a801180b1600426801980c0600b20801980c0600a22812081fc200f0b30801180b160022b801180b160022b812081fc20062422812081fc2006220b812081fc200c0a1e812081fc2012041a00"_hex_u8;
+/* 2023-12-14T15:17:20Z 76, 102600, 103935,*/
+static constexpr auto BENCH_EXAMPLE_16 = "801980c06000801980c06001801980c06002801980c06003801980c06004801180b1600404801180b1600404801180b1600404801980c0600504801980c0600802801980c0600803801180b1600704801980c0600804801280b1600804812081fc200810812081fc20080f812081fc20080e801180b160080c800f80b160080d801980c060090d801180b160090e801980c0600a0e812181fc200a0c801180b1600a0d812181fd400a0c801980c0600a1c801980c0600916801180b1600719801180b160061b801980c0600d15801980c0600717812081fc200718801980c0600716801180b160072d801180b1600722801180b1600525801980c060091b801980c060071e801080b160071f801280b160061d812081fc20063a812181f960160815801280b1600525801980c0600625801180b1600626801980c0600726801980c0600536801180b160032b801980c060042b801280b160032d801980c060033e801180b160043e812181fc20100c27801080b160042f801980c0600342801180b1600442812081fc20150d25800f80b1600245812081fd40120619812081fc20040243812081fc20120c2c812081fd40120a1d812181fb00100623812081fc20030347812081fc20072126801980c0600236812081fc20040d2b812081fc20120328801980c0600237801180b1600337812081fc20052230801180b1600239812081fc2008242c812081fd4005112d812081fb00070b32812081f96011034700"_hex_u8;
+/* 2023-12-15T07:12:29Z 98, 112693, 112730,*/
+static constexpr auto BENCH_EXAMPLE_17 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801180b1600606801180b1600606801180b1600606801180b1600606801280b1600606801180b1600606801180b1600606801980c0600d00801980c0600b03801980c0600b04801980c0600f01812081fc200a16812081fc200a15812081fc200a14812081fc200a13812081fd400a12812181fc200a11812181fc200a0f801180b1600a10801180b1600a10801980c0600a10801180b1600b10801180b1600b10801980c0600621801980c0600915801980c060041b801180b160051b801980c0600f12801980c0600f13801980c0600d15801980c0600c17801980c060072e800f80b160082e812181fc200d150e801980c0600922801180b1600923801980c0600823801180b1600623801180b1600a20801180b1600e1c801180b1600b20801180b1600b21801980c0600a3e800f80b1600b3e801980c0600931801180b1600a31812181fc20140325801180b1600a30801180b160054c801180b160043b801980c0600336812181fc200253812081f960090944812081fc2007003c801980c0600339801180b1600433801980c0600453801980c0600340801980c060033d801080b160043d812081f960070854801980c060045a801180b160055a801180b1600545801980c0600643801980c0600641801280b1600739801180b1600562812081fc20121f27812181fc20210137812181fc2016112f801980c0600259801980c0600156812181fc20053a31801180b160025c801180b1600257801980c0600357812081fc200d2d1e812181fc20102444812181fc20035a801180b160035b801980c0600751812181fc2007392a812181fc20025f801980c060045e801180b1600350812081fc20070f6f801180b1600263812181fc201b1322812181fc2011283b812081fc2002442100"_hex_u8;
+/* 2023-12-16T02:25:33Z 99, 112399, 112399,*/
+static constexpr auto BENCH_EXAMPLE_18 = "801980c06000801980c06001801980c06002801980c06003801980c06004801980c06005801980c06006801980c06007801180b16008801180b16009801180b1600a801180b1600a0a801180b1600a0a801180b1600a0a801180b1600a0a801980c0600d06801180b1600b09801980c0601005801180b1600c0a801980c0600d0a801980c0601106801180b1600e0a801980c0601207801980c0601207801180b160100a812081e668100a812081e668100a812081e668100a801980c0601407801980c0601606812081fc201226812081fc201225812081fc201224812081fc201223801180b1600e21801980c0600b1e801180b1600c1e801180b1601316801980c060091b801980c0601312801980c0600a1c801180b160190e801180b1601315801180b1600e1b801180b1601713801180b1600f1c801980c0600d34801980c0600d30801980c060102e801980c060122d801980c0600b2a801980c0600b2a801980c0600b2b801180b1601122801180b1600e26801180b1601025801180b1600f26812081fc20280032812081fc20270034812081fc20250034801180b1600d4b801980c0600d457a809a000d46801980c0601044801980c0600e46801180b1600f43801180b160123f801180b160123e801180b1601130801180b1601131801180b1601131812081fc20230a36801980c0600a5a801180b1600a5b801980c0600a5b801180b1600b5b801980c0600b5a801180b1600f57801180b1600d3f801980c0600669801980c0600568801980c0600466801180b1600945801180b1600649801180b1600945812081fc2018234b812081fc20142534812081fc20142532812081fc20142530801180b160074d801180b1600a4b801180b1600a4a812081fc20221662812081fc200c0472812081fc20072e42812081fc20062c23812081fc20100572812081fc200f036c812081fc2001345100"_hex_u8;
+/* 2023-03-31T19:24:02Z 78, 90393, 152832,*/
+static constexpr auto BENCH_EXAMPLE_19 = "800dd042008028b13c018028b13c028028b13c038029b13c048029b13c058029b13c0680299948078029b13c088029b13c09802899480a802899480b8028b13c0c80299e700d802899480e802999480f8029b13c10802999481180299948128028b13c138029b13c1480289e701580289948168028b13c1780289948188028994819802899481a802999481b802999481c802899481d802999481e8028b13c1f8029b13c20802999482180299948228028b13c2380298c242480289948258029b13c2680288c242780298c242880299e70298f5a80ea762a824780aa00292a82038090402429813fcf00152a8203809040142a813ff700112982038090402d002d813ff70028002c8203809040270024824780aa00270025820380904025002882038090401e022a82038090401d042782038090401c01298203809040190029813ff700170028813ff700140128807b9258120128841280f6402c01002e82038090402b00062b820380904027000031813ff70011192d82038090401d000129851981a9403a0000003b82038090400c182e813ff7000b0f2982038090401314141b807b925805192b84568190001121000334807bdd400149824780aa00001f2a813ff700003d0b8203809040050d1915807bdd4001498728828f400b010004050501000a050c851981a9400104050b061a0400"_hex_u8;
+
+static void LinearizeOptimallyExample00(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_00); }
+static void LinearizeOptimallyExample01(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_01); }
+static void LinearizeOptimallyExample02(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_02); }
+static void LinearizeOptimallyExample03(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_03); }
+static void LinearizeOptimallyExample04(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_04); }
+static void LinearizeOptimallyExample05(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_05); }
+static void LinearizeOptimallyExample06(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_06); }
+static void LinearizeOptimallyExample07(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_07); }
+static void LinearizeOptimallyExample08(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_08); }
+static void LinearizeOptimallyExample09(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_09); }
+static void LinearizeOptimallyExample10(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_10); }
+static void LinearizeOptimallyExample11(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_11); }
+static void LinearizeOptimallyExample12(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_12); }
+static void LinearizeOptimallyExample13(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_13); }
+static void LinearizeOptimallyExample14(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_14); }
+static void LinearizeOptimallyExample15(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_15); }
+static void LinearizeOptimallyExample16(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_16); }
+static void LinearizeOptimallyExample17(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_17); }
+static void LinearizeOptimallyExample18(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_18); }
+static void LinearizeOptimallyExample19(benchmark::Bench& bench) { BenchLinearizeOptimally(bench, BENCH_EXAMPLE_19); }
+
+BENCHMARK(Linearize16TxWorstCase20Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize16TxWorstCase120Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize32TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize32TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize48TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize48TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize64TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize64TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize75TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize75TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize99TxWorstCase5000Iters, benchmark::PriorityLevel::HIGH);
+BENCHMARK(Linearize99TxWorstCase15000Iters, benchmark::PriorityLevel::HIGH);
BENCHMARK(LinearizeNoIters16TxWorstCaseAnc, benchmark::PriorityLevel::HIGH);
BENCHMARK(LinearizeNoIters32TxWorstCaseAnc, benchmark::PriorityLevel::HIGH);
@@ -276,3 +396,24 @@ BENCHMARK(MergeLinearizations48TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations64TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations75TxWorstCase, benchmark::PriorityLevel::HIGH);
BENCHMARK(MergeLinearizations99TxWorstCase, benchmark::PriorityLevel::HIGH);
+
+BENCHMARK(LinearizeOptimallyExample00, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample01, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample02, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample03, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample04, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample05, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample06, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample07, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample08, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample09, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample10, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample11, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample12, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample13, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample14, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample15, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample16, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample17, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample18, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LinearizeOptimallyExample19, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/data.cpp b/src/bench/data.cpp
deleted file mode 100644
index 8c5bb13f75..0000000000
--- a/src/bench/data.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2019-2021 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <bench/data.h>
-
-#include <iterator>
-
-namespace benchmark {
-namespace data {
-
-#include <bench/data/block413567.raw.h>
-const std::vector<uint8_t> block413567{std::begin(block413567_raw), std::end(block413567_raw)};
-
-} // namespace data
-} // namespace benchmark
diff --git a/src/bench/data.h b/src/bench/data.h
deleted file mode 100644
index 5f13d766ea..0000000000
--- a/src/bench/data.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_BENCH_DATA_H
-#define BITCOIN_BENCH_DATA_H
-
-#include <cstdint>
-#include <vector>
-
-namespace benchmark {
-namespace data {
-
-extern const std::vector<uint8_t> block413567;
-
-} // namespace data
-} // namespace benchmark
-
-#endif // BITCOIN_BENCH_DATA_H
diff --git a/src/bench/load_external.cpp b/src/bench/load_external.cpp
index 2ed5a3979d..8f9399c60d 100644
--- a/src/bench/load_external.cpp
+++ b/src/bench/load_external.cpp
@@ -3,7 +3,7 @@
// file COPYING or https://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <bench/data.h>
+#include <bench/data/block413567.raw.h>
#include <chainparams.h>
#include <flatfile.h>
#include <node/blockstorage.h>
diff --git a/src/bench/logging.cpp b/src/bench/logging.cpp
index 86891af8fe..3bf2b7edb2 100644
--- a/src/bench/logging.cpp
+++ b/src/bench/logging.cpp
@@ -28,18 +28,6 @@ static void Logging(benchmark::Bench& bench, const std::vector<const char*>& ext
bench.run([&] { log(); });
}
-static void LogPrintLevelWithThreadNames(benchmark::Bench& bench)
-{
- Logging(bench, {"-logthreadnames=1", "-debug=net"}, [] {
- LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", "test"); });
-}
-
-static void LogPrintLevelWithoutThreadNames(benchmark::Bench& bench)
-{
- Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] {
- LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", "test"); });
-}
-
static void LogWithDebug(benchmark::Bench& bench)
{
Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] { LogDebug(BCLog::NET, "%s\n", "test"); });
@@ -50,45 +38,27 @@ static void LogWithoutDebug(benchmark::Bench& bench)
Logging(bench, {"-logthreadnames=0", "-debug=0"}, [] { LogDebug(BCLog::NET, "%s\n", "test"); });
}
-static void LogPrintfCategoryWithThreadNames(benchmark::Bench& bench)
-{
- Logging(bench, {"-logthreadnames=1", "-debug=net"}, [] {
- LogPrintfCategory(BCLog::NET, "%s\n", "test");
- });
-}
-
-static void LogPrintfCategoryWithoutThreadNames(benchmark::Bench& bench)
-{
- Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] {
- LogPrintfCategory(BCLog::NET, "%s\n", "test");
- });
-}
-
-static void LogPrintfWithThreadNames(benchmark::Bench& bench)
+static void LogWithThreadNames(benchmark::Bench& bench)
{
- Logging(bench, {"-logthreadnames=1"}, [] { LogPrintf("%s\n", "test"); });
+ Logging(bench, {"-logthreadnames=1"}, [] { LogInfo("%s\n", "test"); });
}
-static void LogPrintfWithoutThreadNames(benchmark::Bench& bench)
+static void LogWithoutThreadNames(benchmark::Bench& bench)
{
- Logging(bench, {"-logthreadnames=0"}, [] { LogPrintf("%s\n", "test"); });
+ Logging(bench, {"-logthreadnames=0"}, [] { LogInfo("%s\n", "test"); });
}
static void LogWithoutWriteToFile(benchmark::Bench& bench)
{
// Disable writing the log to a file, as used for unit tests and fuzzing in `MakeNoLogFileContext`.
Logging(bench, {"-nodebuglogfile", "-debug=1"}, [] {
- LogPrintf("%s\n", "test");
+ LogInfo("%s\n", "test");
LogDebug(BCLog::NET, "%s\n", "test");
});
}
-BENCHMARK(LogPrintLevelWithThreadNames, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LogPrintLevelWithoutThreadNames, benchmark::PriorityLevel::HIGH);
BENCHMARK(LogWithDebug, benchmark::PriorityLevel::HIGH);
BENCHMARK(LogWithoutDebug, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LogPrintfCategoryWithThreadNames, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LogPrintfCategoryWithoutThreadNames, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LogPrintfWithThreadNames, benchmark::PriorityLevel::HIGH);
-BENCHMARK(LogPrintfWithoutThreadNames, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LogWithThreadNames, benchmark::PriorityLevel::HIGH);
+BENCHMARK(LogWithoutThreadNames, benchmark::PriorityLevel::HIGH);
BENCHMARK(LogWithoutWriteToFile, benchmark::PriorityLevel::HIGH);
diff --git a/src/bench/readblock.cpp b/src/bench/readblock.cpp
index 0b88663db6..058d953b4e 100644
--- a/src/bench/readblock.cpp
+++ b/src/bench/readblock.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <bench/data.h>
+#include <bench/data/block413567.raw.h>
#include <flatfile.h>
#include <node/blockstorage.h>
#include <primitives/block.h>
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index 54356598e7..7e3e2d8e48 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <bench/data.h>
+#include <bench/data/block413567.raw.h>
#include <chain.h>
#include <core_io.h>
#include <primitives/block.h>
diff --git a/src/bench/streams_findbyte.cpp b/src/bench/streams_findbyte.cpp
index 5098262e9a..004bf8ffc9 100644
--- a/src/bench/streams_findbyte.cpp
+++ b/src/bench/streams_findbyte.cpp
@@ -19,7 +19,7 @@ static void FindByte(benchmark::Bench& bench)
uint8_t data[file_size] = {0};
data[file_size-1] = 1;
file << data;
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
BufferedFile bf{file, /*nBufSize=*/file_size + 1, /*nRewindIn=*/file_size};
bench.run([&] {
diff --git a/src/bench/strencodings.cpp b/src/bench/strencodings.cpp
index 72eb6b442b..dd5829caf2 100644
--- a/src/bench/strencodings.cpp
+++ b/src/bench/strencodings.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <bench/data.h>
+#include <bench/data/block413567.raw.h>
#include <span.h>
#include <util/strencodings.h>
diff --git a/src/bench/wallet_create.cpp b/src/bench/wallet_create.cpp
index 43b5b5c91e..3b916d7c39 100644
--- a/src/bench/wallet_create.cpp
+++ b/src/bench/wallet_create.cpp
@@ -3,7 +3,7 @@
// file COPYING or https://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <random.h>
#include <support/allocators/secure.h>
#include <test/util/setup_common.h>
diff --git a/src/bench/wallet_ismine.cpp b/src/bench/wallet_ismine.cpp
index 29e370ce29..5343814ab2 100644
--- a/src/bench/wallet_ismine.cpp
+++ b/src/bench/wallet_ismine.cpp
@@ -4,7 +4,7 @@
#include <addresstype.h>
#include <bench/bench.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <key.h>
#include <key_io.h>
#include <script/descriptor.h>
diff --git a/src/bench/wallet_loading.cpp b/src/bench/wallet_loading.cpp
index 03459d37c1..5d92cfa0de 100644
--- a/src/bench/wallet_loading.cpp
+++ b/src/bench/wallet_loading.cpp
@@ -4,7 +4,7 @@
#include <addresstype.h>
#include <bench/bench.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <consensus/amount.h>
#include <outputtype.h>
#include <primitives/transaction.h>
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index ebe013b638..9cbafa233d 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -283,8 +283,6 @@ int main(int argc, char* argv[])
epilogue:
// Without this precise shutdown sequence, there will be a lot of nullptr
// dereferencing and UB.
- if (chainman.m_thread_load.joinable()) chainman.m_thread_load.join();
-
validation_signals.FlushBackgroundCallbacks();
{
LOCK(cs_main);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 934b5fb6dc..23136f92df 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparamsbase.h>
#include <clientversion.h>
@@ -87,10 +87,10 @@ static void SetupCliArgs(ArgsManager& argsman)
"arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to "
"RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000",
DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES),
- ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-addrinfo", "Get the number of addresses known to the node, per network and total, after filtering for quality and recency. The total number of addresses known to the node may be higher.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the output of -getinfo is the result of multiple non-atomic requests. Some entries in the output may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-netinfo", "Get network peer connection information from the remote server. An optional integer argument from 0 to 4 can be passed for different peers listings (default: 0). Pass \"help\" for detailed help documentation.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ ArgsManager::ALLOW_ANY, OptionsCategory::CLI_COMMANDS);
+ argsman.AddArg("-addrinfo", "Get the number of addresses known to the node, per network and total, after filtering for quality and recency. The total number of addresses known to the node may be higher.", ArgsManager::ALLOW_ANY, OptionsCategory::CLI_COMMANDS);
+ argsman.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the output of -getinfo is the result of multiple non-atomic requests. Some entries in the output may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::CLI_COMMANDS);
+ argsman.AddArg("-netinfo", "Get network peer connection information from the remote server. An optional integer argument from 0 to 4 can be passed for different peers listings (default: 0). Pass \"help\" for detailed help documentation.", ArgsManager::ALLOW_ANY, OptionsCategory::CLI_COMMANDS);
SetupChainParamsBaseOptions(argsman);
argsman.AddArg("-color=<when>", strprintf("Color setting for CLI output (default: %s). Valid values: always, auto (add color codes when standard output is connected to a terminal and OS is not WIN32), never.", DEFAULT_COLOR_SETTING), ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_NEGATION, OptionsCategory::OPTIONS);
@@ -950,7 +950,8 @@ static void ParseError(const UniValue& error, std::string& strPrint, int& nRet)
strPrint += ("error message:\n" + err_msg.get_str());
}
if (err_code.isNum() && err_code.getInt<int>() == RPC_WALLET_NOT_SPECIFIED) {
- strPrint += "\nTry adding \"-rpcwallet=<filename>\" option to bitcoin-cli command line.";
+ strPrint += " Or for the CLI, specify the \"-rpcwallet=<walletname>\" option before the command";
+ strPrint += " (run \"bitcoin-cli -h\" for help or \"bitcoin-cli listwallets\" to see which wallets are currently loaded).";
}
} else {
strPrint = "error: " + error.write();
@@ -1212,6 +1213,7 @@ static int CommandLineRPC(int argc, char *argv[])
fputc('\n', stdout);
}
}
+ gArgs.CheckMultipleCLIArgs();
std::unique_ptr<BaseRequestHandler> rh;
std::string method;
if (gArgs.IsArgSet("-getinfo")) {
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 89c03c1647..b3329afba4 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparamsbase.h>
#include <clientversion.h>
diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp
index c8f5bc5026..46ba136d81 100644
--- a/src/bitcoin-util.cpp
+++ b/src/bitcoin-util.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <arith_uint256.h>
#include <chain.h>
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index 7d030abe97..00f39be794 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparams.h>
#include <chainparamsbase.h>
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index a09bb5c9da..192676a10b 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparams.h>
#include <clientversion.h>
@@ -109,10 +109,11 @@ int fork_daemon(bool nochdir, bool noclose, TokenPipeEnd& endpoint)
#endif
-static bool ParseArgs(ArgsManager& args, int argc, char* argv[])
+static bool ParseArgs(NodeContext& node, int argc, char* argv[])
{
+ ArgsManager& args{*Assert(node.args)};
// If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main()
- SetupServerArgs(args);
+ SetupServerArgs(args, node.init->canListenIpc());
std::string error;
if (!args.ParseParameters(argc, argv, error)) {
return InitError(Untranslated(strprintf("Error parsing command line arguments: %s", error)));
@@ -268,12 +269,12 @@ MAIN_FUNCTION
// Interpret command line arguments
ArgsManager& args = *Assert(node.args);
- if (!ParseArgs(args, argc, argv)) return EXIT_FAILURE;
+ if (!ParseArgs(node, argc, argv)) return EXIT_FAILURE;
// Process early info return commands such as -help or -version
if (ProcessInitCommands(args)) return EXIT_SUCCESS;
// Start application
- if (!AppInit(node) || !Assert(node.shutdown)->wait()) {
+ if (!AppInit(node) || !Assert(node.shutdown_signal)->wait()) {
node.exit_status = EXIT_FAILURE;
}
Interrupt(node);
diff --git a/src/chain.h b/src/chain.h
index c46392c535..13f7582385 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -178,7 +178,7 @@ public:
//! Verification status of this block. See enum BlockStatus
//!
//! Note: this value is modified to show BLOCK_OPT_WITNESS during UTXO snapshot
- //! load to avoid the block index being spuriously rewound.
+ //! load to avoid a spurious startup failure requiring -reindex.
//! @sa NeedsRedownload
//! @sa ActivateSnapshot
uint32_t nStatus GUARDED_BY(::cs_main){0};
diff --git a/src/clientversion.cpp b/src/clientversion.cpp
index 017366543d..3943c4fb1d 100644
--- a/src/clientversion.cpp
+++ b/src/clientversion.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <clientversion.h>
#include <util/string.h>
@@ -23,14 +23,12 @@ using util::Join;
const std::string CLIENT_NAME("Satoshi");
-#ifdef HAVE_BUILD_INFO
-#include <obj/build.h>
-// The <obj/build.h>, which is generated by the build environment (cmake/script/GenerateBuildInfo.cmake),
+#include <bitcoin-build-info.h>
+// The <bitcoin-build-info.h>, which is generated by the build environment (cmake/script/GenerateBuildInfo.cmake),
// could contain only one line of the following:
// - "#define BUILD_GIT_TAG ...", if the top commit is tagged
// - "#define BUILD_GIT_COMMIT ...", if the top commit is not tagged
// - "// No build information available", if proper git information is not available
-#endif
//! git will put "#define GIT_COMMIT_ID ..." on the next line inside archives. $Format:%n#define GIT_COMMIT_ID "%H"$
diff --git a/src/clientversion.h b/src/clientversion.h
index 73aaf868e4..d1202b1259 100644
--- a/src/clientversion.h
+++ b/src/clientversion.h
@@ -7,11 +7,11 @@
#include <util/macros.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
// Check that required client information is defined
#if !defined(CLIENT_VERSION_MAJOR) || !defined(CLIENT_VERSION_MINOR) || !defined(CLIENT_VERSION_BUILD) || !defined(CLIENT_VERSION_IS_RELEASE) || !defined(COPYRIGHT_YEAR)
-#error Client version information missing: version is not defined by bitcoin-config.h or in any other way
+#error Client version information missing: version is not defined by bitcoin-build-config.h or in any other way
#endif
//! Copyright string used in Windows .rc files
diff --git a/src/cluster_linearize.h b/src/cluster_linearize.h
index 607ae681d2..757c81f108 100644
--- a/src/cluster_linearize.h
+++ b/src/cluster_linearize.h
@@ -19,14 +19,6 @@
namespace cluster_linearize {
-/** Data type to represent cluster input.
- *
- * cluster[i].first is tx_i's fee and size.
- * cluster[i].second[j] is true iff tx_i spends one or more of tx_j's outputs.
- */
-template<typename SetType>
-using Cluster = std::vector<std::pair<FeeFrac, SetType>>;
-
/** Data type to represent transaction indices in clusters. */
using ClusterIndex = uint32_t;
@@ -54,12 +46,23 @@ class DepGraph
Entry(const FeeFrac& f, const SetType& a, const SetType& d) noexcept : feerate(f), ancestors(a), descendants(d) {}
};
- /** Data for each transaction, in the same order as the Cluster it was constructed from. */
+ /** Data for each transaction. */
std::vector<Entry> entries;
+ /** Which positions are used. */
+ SetType m_used;
+
public:
/** Equality operator (primarily for testing purposes). */
- friend bool operator==(const DepGraph&, const DepGraph&) noexcept = default;
+ friend bool operator==(const DepGraph& a, const DepGraph& b) noexcept
+ {
+ if (a.m_used != b.m_used) return false;
+ // Only compare the used positions within the entries vector.
+ for (auto idx : a.m_used) {
+ if (a.entries[idx] != b.entries[idx]) return false;
+ }
+ return true;
+ }
// Default constructors.
DepGraph() noexcept = default;
@@ -68,58 +71,51 @@ public:
DepGraph& operator=(const DepGraph&) noexcept = default;
DepGraph& operator=(DepGraph&&) noexcept = default;
- /** Construct a DepGraph object for ntx transactions, with no dependencies.
+ /** Construct a DepGraph object given another DepGraph and a mapping from old to new.
*
- * Complexity: O(N) where N=ntx.
- **/
- explicit DepGraph(ClusterIndex ntx) noexcept
- {
- Assume(ntx <= SetType::Size());
- entries.resize(ntx);
- for (ClusterIndex i = 0; i < ntx; ++i) {
- entries[i].ancestors = SetType::Singleton(i);
- entries[i].descendants = SetType::Singleton(i);
- }
- }
-
- /** Construct a DepGraph object given a cluster.
+ * @param depgraph The original DepGraph that is being remapped.
+ *
+ * @param mapping A Span such that mapping[i] gives the position in the new DepGraph
+ * for position i in the old depgraph. Its size must be equal to
+ * depgraph.PositionRange(). The value of mapping[i] is ignored if
+ * position i is a hole in depgraph (i.e., if !depgraph.Positions()[i]).
+ *
+ * @param pos_range The PositionRange() for the new DepGraph. It must equal the largest
+ * value in mapping for any used position in depgraph plus 1, or 0 if
+ * depgraph.TxCount() == 0.
*
- * Complexity: O(N^2) where N=cluster.size().
+ * Complexity: O(N^2) where N=depgraph.TxCount().
*/
- explicit DepGraph(const Cluster<SetType>& cluster) noexcept : entries(cluster.size())
+ DepGraph(const DepGraph<SetType>& depgraph, Span<const ClusterIndex> mapping, ClusterIndex pos_range) noexcept : entries(pos_range)
{
- for (ClusterIndex i = 0; i < cluster.size(); ++i) {
+ Assume(mapping.size() == depgraph.PositionRange());
+ Assume((pos_range == 0) == (depgraph.TxCount() == 0));
+ for (ClusterIndex i : depgraph.Positions()) {
+ auto new_idx = mapping[i];
+ Assume(new_idx < pos_range);
+ // Add transaction.
+ entries[new_idx].ancestors = SetType::Singleton(new_idx);
+ entries[new_idx].descendants = SetType::Singleton(new_idx);
+ m_used.Set(new_idx);
// Fill in fee and size.
- entries[i].feerate = cluster[i].first;
- // Fill in direct parents as ancestors.
- entries[i].ancestors = cluster[i].second;
- // Make sure transactions are ancestors of themselves.
- entries[i].ancestors.Set(i);
- }
-
- // Propagate ancestor information.
- for (ClusterIndex i = 0; i < entries.size(); ++i) {
- // At this point, entries[a].ancestors[b] is true iff b is an ancestor of a and there
- // is a path from a to b through the subgraph consisting of {a, b} union
- // {0, 1, ..., (i-1)}.
- SetType to_merge = entries[i].ancestors;
- for (ClusterIndex j = 0; j < entries.size(); ++j) {
- if (entries[j].ancestors[i]) {
- entries[j].ancestors |= to_merge;
- }
- }
+ entries[new_idx].feerate = depgraph.entries[i].feerate;
}
-
- // Fill in descendant information by transposing the ancestor information.
- for (ClusterIndex i = 0; i < entries.size(); ++i) {
- for (auto j : entries[i].ancestors) {
- entries[j].descendants.Set(i);
- }
+ for (ClusterIndex i : depgraph.Positions()) {
+ // Fill in dependencies by mapping direct parents.
+ SetType parents;
+ for (auto j : depgraph.GetReducedParents(i)) parents.Set(mapping[j]);
+ AddDependencies(parents, mapping[i]);
}
+ // Verify that the provided pos_range was correct (no unused positions at the end).
+ Assume(m_used.None() ? (pos_range == 0) : (pos_range == m_used.Last() + 1));
}
+ /** Get the set of transactions positions in use. Complexity: O(1). */
+ const SetType& Positions() const noexcept { return m_used; }
+ /** Get the range of positions in this DepGraph. All entries in Positions() are in [0, PositionRange() - 1]. */
+ ClusterIndex PositionRange() const noexcept { return entries.size(); }
/** Get the number of transactions in the graph. Complexity: O(1). */
- auto TxCount() const noexcept { return entries.size(); }
+ auto TxCount() const noexcept { return m_used.Count(); }
/** Get the feerate of a given transaction i. Complexity: O(1). */
const FeeFrac& FeeRate(ClusterIndex i) const noexcept { return entries[i].feerate; }
/** Get the mutable feerate of a given transaction i. Complexity: O(1). */
@@ -129,39 +125,120 @@ public:
/** Get the descendants of a given transaction i. Complexity: O(1). */
const SetType& Descendants(ClusterIndex i) const noexcept { return entries[i].descendants; }
- /** Add a new unconnected transaction to this transaction graph (at the end), and return its
- * ClusterIndex.
+ /** Add a new unconnected transaction to this transaction graph (in the first available
+ * position), and return its ClusterIndex.
*
* Complexity: O(1) (amortized, due to resizing of backing vector).
*/
ClusterIndex AddTransaction(const FeeFrac& feefrac) noexcept
{
- Assume(TxCount() < SetType::Size());
- ClusterIndex new_idx = TxCount();
- entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
+ static constexpr auto ALL_POSITIONS = SetType::Fill(SetType::Size());
+ auto available = ALL_POSITIONS - m_used;
+ Assume(available.Any());
+ ClusterIndex new_idx = available.First();
+ if (new_idx == entries.size()) {
+ entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
+ } else {
+ entries[new_idx] = Entry(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
+ }
+ m_used.Set(new_idx);
return new_idx;
}
- /** Modify this transaction graph, adding a dependency between a specified parent and child.
+ /** Remove the specified positions from this DepGraph.
+ *
+ * The specified positions will no longer be part of Positions(), and dependencies with them are
+ * removed. Note that due to DepGraph only tracking ancestors/descendants (and not direct
+ * dependencies), if a parent is removed while a grandparent remains, the grandparent will
+ * remain an ancestor.
*
* Complexity: O(N) where N=TxCount().
- **/
- void AddDependency(ClusterIndex parent, ClusterIndex child) noexcept
+ */
+ void RemoveTransactions(const SetType& del) noexcept
+ {
+ m_used -= del;
+ // Remove now-unused trailing entries.
+ while (!entries.empty() && !m_used[entries.size() - 1]) {
+ entries.pop_back();
+ }
+ // Remove the deleted transactions from ancestors/descendants of other transactions. Note
+ // that the deleted positions will retain old feerate and dependency information. This does
+ // not matter as they will be overwritten by AddTransaction if they get used again.
+ for (auto& entry : entries) {
+ entry.ancestors &= m_used;
+ entry.descendants &= m_used;
+ }
+ }
+
+ /** Modify this transaction graph, adding multiple parents to a specified child.
+ *
+ * Complexity: O(N) where N=TxCount().
+ */
+ void AddDependencies(const SetType& parents, ClusterIndex child) noexcept
{
- // Bail out if dependency is already implied.
- if (entries[child].ancestors[parent]) return;
- // To each ancestor of the parent, add as descendants the descendants of the child.
+ Assume(m_used[child]);
+ Assume(parents.IsSubsetOf(m_used));
+ // Compute the ancestors of parents that are not already ancestors of child.
+ SetType par_anc;
+ for (auto par : parents - Ancestors(child)) {
+ par_anc |= Ancestors(par);
+ }
+ par_anc -= Ancestors(child);
+ // Bail out if there are no such ancestors.
+ if (par_anc.None()) return;
+ // To each such ancestor, add as descendants the descendants of the child.
const auto& chl_des = entries[child].descendants;
- for (auto anc_of_par : Ancestors(parent)) {
+ for (auto anc_of_par : par_anc) {
entries[anc_of_par].descendants |= chl_des;
}
- // To each descendant of the child, add as ancestors the ancestors of the parent.
- const auto& par_anc = entries[parent].ancestors;
+ // To each descendant of the child, add those ancestors.
for (auto dec_of_chl : Descendants(child)) {
entries[dec_of_chl].ancestors |= par_anc;
}
}
+ /** Compute the (reduced) set of parents of node i in this graph.
+ *
+ * This returns the minimal subset of the parents of i whose ancestors together equal all of
+ * i's ancestors (unless i is part of a cycle of dependencies). Note that DepGraph does not
+ * store the set of parents; this information is inferred from the ancestor sets.
+ *
+ * Complexity: O(N) where N=Ancestors(i).Count() (which is bounded by TxCount()).
+ */
+ SetType GetReducedParents(ClusterIndex i) const noexcept
+ {
+ SetType parents = Ancestors(i);
+ parents.Reset(i);
+ for (auto parent : parents) {
+ if (parents[parent]) {
+ parents -= Ancestors(parent);
+ parents.Set(parent);
+ }
+ }
+ return parents;
+ }
+
+ /** Compute the (reduced) set of children of node i in this graph.
+ *
+ * This returns the minimal subset of the children of i whose descendants together equal all of
+ * i's descendants (unless i is part of a cycle of dependencies). Note that DepGraph does not
+ * store the set of children; this information is inferred from the descendant sets.
+ *
+ * Complexity: O(N) where N=Descendants(i).Count() (which is bounded by TxCount()).
+ */
+ SetType GetReducedChildren(ClusterIndex i) const noexcept
+ {
+ SetType children = Descendants(i);
+ children.Reset(i);
+ for (auto child : children) {
+ if (children[child]) {
+ children -= Descendants(child);
+ children.Set(child);
+ }
+ }
+ return children;
+ }
+
/** Compute the aggregate feerate of a set of nodes in this graph.
*
* Complexity: O(N) where N=elems.Count().
@@ -215,7 +292,7 @@ public:
*
* Complexity: O(TxCount()).
*/
- bool IsConnected() const noexcept { return IsConnected(SetType::Fill(TxCount())); }
+ bool IsConnected() const noexcept { return IsConnected(m_used); }
/** Append the entries of select to list in a topologically valid order.
*
@@ -257,6 +334,14 @@ struct SetInfo
explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
transactions(txn), feerate(depgraph.FeeRate(txn)) {}
+ /** Add a transaction to this SetInfo (which must not yet be in it). */
+ void Set(const DepGraph<SetType>& depgraph, ClusterIndex pos) noexcept
+ {
+ Assume(!transactions[pos]);
+ transactions.Set(pos);
+ feerate += depgraph.FeeRate(pos);
+ }
+
/** Add the transactions of other to this SetInfo (no overlap allowed). */
SetInfo& operator|=(const SetInfo& other) noexcept
{
@@ -457,11 +542,11 @@ public:
*/
AncestorCandidateFinder(const DepGraph<SetType>& depgraph LIFETIMEBOUND) noexcept :
m_depgraph(depgraph),
- m_todo{SetType::Fill(depgraph.TxCount())},
- m_ancestor_set_feerates(depgraph.TxCount())
+ m_todo{depgraph.Positions()},
+ m_ancestor_set_feerates(depgraph.PositionRange())
{
// Precompute ancestor-set feerates.
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ for (ClusterIndex i : m_depgraph.Positions()) {
/** The remaining ancestors for transaction i. */
SetType anc_to_add = m_depgraph.Ancestors(i);
FeeFrac anc_feerate;
@@ -506,6 +591,12 @@ public:
return m_todo.None();
}
+ /** Count the number of remaining unlinearized transactions. */
+ ClusterIndex NumRemaining() const noexcept
+ {
+ return m_todo.Count();
+ }
+
/** Find the best (highest-feerate, smallest among those in case of a tie) ancestor set
* among the remaining transactions. Requires !AllDone().
*
@@ -541,23 +632,64 @@ class SearchCandidateFinder
{
/** Internal RNG. */
InsecureRandomContext m_rng;
- /** Internal dependency graph for the cluster. */
- const DepGraph<SetType>& m_depgraph;
- /** Which transactions are left to do (sorted indices). */
+ /** m_sorted_to_original[i] is the original position that sorted transaction position i had. */
+ std::vector<ClusterIndex> m_sorted_to_original;
+ /** m_original_to_sorted[i] is the sorted position original transaction position i has. */
+ std::vector<ClusterIndex> m_original_to_sorted;
+ /** Internal dependency graph for the cluster (with transactions in decreasing individual
+ * feerate order). */
+ DepGraph<SetType> m_sorted_depgraph;
+ /** Which transactions are left to do (indices in m_sorted_depgraph's order). */
SetType m_todo;
+ /** Given a set of transactions with sorted indices, get their original indices. */
+ SetType SortedToOriginal(const SetType& arg) const noexcept
+ {
+ SetType ret;
+ for (auto pos : arg) ret.Set(m_sorted_to_original[pos]);
+ return ret;
+ }
+
+ /** Given a set of transactions with original indices, get their sorted indices. */
+ SetType OriginalToSorted(const SetType& arg) const noexcept
+ {
+ SetType ret;
+ for (auto pos : arg) ret.Set(m_original_to_sorted[pos]);
+ return ret;
+ }
+
public:
/** Construct a candidate finder for a graph.
*
* @param[in] depgraph Dependency graph for the to-be-linearized cluster.
* @param[in] rng_seed A random seed to control the search order.
*
- * Complexity: O(1).
+ * Complexity: O(N^2) where N=depgraph.Count().
*/
- SearchCandidateFinder(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed) noexcept :
+ SearchCandidateFinder(const DepGraph<SetType>& depgraph, uint64_t rng_seed) noexcept :
m_rng(rng_seed),
- m_depgraph(depgraph),
- m_todo(SetType::Fill(depgraph.TxCount())) {}
+ m_sorted_to_original(depgraph.TxCount()),
+ m_original_to_sorted(depgraph.PositionRange())
+ {
+ // Determine reordering mapping, by sorting by decreasing feerate. Unusued positions are
+ // not included, as they will never be looked up anyway.
+ ClusterIndex sorted_pos{0};
+ for (auto i : depgraph.Positions()) {
+ m_sorted_to_original[sorted_pos++] = i;
+ }
+ std::sort(m_sorted_to_original.begin(), m_sorted_to_original.end(), [&](auto a, auto b) {
+ auto feerate_cmp = depgraph.FeeRate(a) <=> depgraph.FeeRate(b);
+ if (feerate_cmp == 0) return a < b;
+ return feerate_cmp > 0;
+ });
+ // Compute reverse mapping.
+ for (ClusterIndex i = 0; i < m_sorted_to_original.size(); ++i) {
+ m_original_to_sorted[m_sorted_to_original[i]] = i;
+ }
+ // Compute reordered dependency graph.
+ m_sorted_depgraph = DepGraph(depgraph, m_original_to_sorted, m_sorted_to_original.size());
+ m_todo = m_sorted_depgraph.Positions();
+ }
/** Check whether any unlinearized transactions remain. */
bool AllDone() const noexcept
@@ -580,12 +712,15 @@ public:
* be <= max_iterations. If strictly < max_iterations, the
* returned subset is optimal.
*
- * Complexity: O(N * min(max_iterations, 2^N)) where N=depgraph.TxCount().
+ * Complexity: possibly O(N * min(max_iterations, sqrt(2^N))) where N=depgraph.TxCount().
*/
std::pair<SetInfo<SetType>, uint64_t> FindCandidateSet(uint64_t max_iterations, SetInfo<SetType> best) noexcept
{
Assume(!AllDone());
+ // Convert the provided best to internal sorted indices.
+ best.transactions = OriginalToSorted(best.transactions);
+
/** Type for work queue items. */
struct WorkItem
{
@@ -596,16 +731,27 @@ public:
/** Set of undecided transactions. This must be a subset of m_todo, and have no overlap
* with inc. The set (inc | und) must be topologically valid. */
SetType und;
+ /** (Only when inc is not empty) The best feerate of any superset of inc that is also a
+ * subset of (inc | und), without requiring it to be topologically valid. It forms a
+ * conservative upper bound on how good a set this work item can give rise to.
+ * Transactions whose feerate is below best's are ignored when determining this value,
+ * which means it may technically be an underestimate, but if so, this work item
+ * cannot result in something that beats best anyway. */
+ FeeFrac pot_feerate;
/** Construct a new work item. */
- WorkItem(SetInfo<SetType>&& i, SetType&& u) noexcept :
- inc(std::move(i)), und(std::move(u)) {}
+ WorkItem(SetInfo<SetType>&& i, SetType&& u, FeeFrac&& p_f) noexcept :
+ inc(std::move(i)), und(std::move(u)), pot_feerate(std::move(p_f))
+ {
+ Assume(pot_feerate.IsEmpty() == inc.feerate.IsEmpty());
+ }
/** Swap two WorkItems. */
void Swap(WorkItem& other) noexcept
{
swap(inc, other.inc);
swap(und, other.und);
+ swap(pot_feerate, other.pot_feerate);
}
};
@@ -613,39 +759,111 @@ public:
VecDeque<WorkItem> queue;
queue.reserve(std::max<size_t>(256, 2 * m_todo.Count()));
- // Create an initial entry with m_todo as undecided. Also use it as best if not provided,
- // so that during the work processing loop below, and during the add_fn/split_fn calls, we
- // do not need to deal with the best=empty case.
- if (best.feerate.IsEmpty()) best = SetInfo(m_depgraph, m_todo);
- queue.emplace_back(SetInfo<SetType>{}, SetType{m_todo});
+ // Create initial entries per connected component of m_todo. While clusters themselves are
+ // generally connected, this is not necessarily true after some parts have already been
+ // removed from m_todo. Without this, effort can be wasted on searching "inc" sets that
+ // span multiple components.
+ auto to_cover = m_todo;
+ do {
+ auto component = m_sorted_depgraph.FindConnectedComponent(to_cover);
+ to_cover -= component;
+ // If best is not provided, set it to the first component, so that during the work
+ // processing loop below, and during the add_fn/split_fn calls, we do not need to deal
+ // with the best=empty case.
+ if (best.feerate.IsEmpty()) best = SetInfo(m_sorted_depgraph, component);
+ queue.emplace_back(/*inc=*/SetInfo<SetType>{},
+ /*und=*/std::move(component),
+ /*pot_feerate=*/FeeFrac{});
+ } while (to_cover.Any());
/** Local copy of the iteration limit. */
uint64_t iterations_left = max_iterations;
+ /** The set of transactions in m_todo which have feerate > best's. */
+ SetType imp = m_todo;
+ while (imp.Any()) {
+ ClusterIndex check = imp.Last();
+ if (m_sorted_depgraph.FeeRate(check) >> best.feerate) break;
+ imp.Reset(check);
+ }
+
/** Internal function to add an item to the queue of elements to explore if there are any
- * transactions left to split on, and to update best.
+ * transactions left to split on, possibly improving it before doing so, and to update
+ * best/imp.
*
* - inc: the "inc" value for the new work item (must be topological).
* - und: the "und" value for the new work item ((inc | und) must be topological).
*/
auto add_fn = [&](SetInfo<SetType> inc, SetType und) noexcept {
+ /** SetInfo object with the set whose feerate will become the new work item's
+ * pot_feerate. It starts off equal to inc. */
+ auto pot = inc;
if (!inc.feerate.IsEmpty()) {
+ // Add entries to pot. We iterate over all undecided transactions whose feerate is
+ // higher than best. While undecided transactions of lower feerate may improve pot,
+ // the resulting pot feerate cannot possibly exceed best's (and this item will be
+ // skipped in split_fn anyway).
+ for (auto pos : imp & und) {
+ // Determine if adding transaction pos to pot (ignoring topology) would improve
+ // it. If not, we're done updating pot. This relies on the fact that
+ // m_sorted_depgraph, and thus the transactions iterated over, are in decreasing
+ // individual feerate order.
+ if (!(m_sorted_depgraph.FeeRate(pos) >> pot.feerate)) break;
+ pot.Set(m_sorted_depgraph, pos);
+ }
+
+ // The "jump ahead" optimization: whenever pot has a topologically-valid subset,
+ // that subset can be added to inc. Any subset of (pot - inc) has the property that
+ // its feerate exceeds that of any set compatible with this work item (superset of
+ // inc, subset of (inc | und)). Thus, if T is a topological subset of pot, and B is
+ // the best topologically-valid set compatible with this work item, and (T - B) is
+ // non-empty, then (T | B) is better than B and also topological. This is in
+ // contradiction with the assumption that B is best. Thus, (T - B) must be empty,
+ // or T must be a subset of B.
+ //
+ // See https://delvingbitcoin.org/t/how-to-linearize-your-cluster/303 section 2.4.
+ const auto init_inc = inc.transactions;
+ for (auto pos : pot.transactions - inc.transactions) {
+ // If the transaction's ancestors are a subset of pot, we can add it together
+ // with its ancestors to inc. Just update the transactions here; the feerate
+ // update happens below.
+ auto anc_todo = m_sorted_depgraph.Ancestors(pos) & m_todo;
+ if (anc_todo.IsSubsetOf(pot.transactions)) inc.transactions |= anc_todo;
+ }
+ // Finally update und and inc's feerate to account for the added transactions.
+ und -= inc.transactions;
+ inc.feerate += m_sorted_depgraph.FeeRate(inc.transactions - init_inc);
+
// If inc's feerate is better than best's, remember it as our new best.
if (inc.feerate > best.feerate) {
best = inc;
+ // See if we can remove any entries from imp now.
+ while (imp.Any()) {
+ ClusterIndex check = imp.Last();
+ if (m_sorted_depgraph.FeeRate(check) >> best.feerate) break;
+ imp.Reset(check);
+ }
}
+
+ // If no potential transactions exist beyond the already included ones, no
+ // improvement is possible anymore.
+ if (pot.feerate.size == inc.feerate.size) return;
+ // At this point und must be non-empty. If it were empty then pot would equal inc.
+ Assume(und.Any());
} else {
Assume(inc.transactions.None());
+ // If inc is empty, we just make sure there are undecided transactions left to
+ // split on.
+ if (und.None()) return;
}
- // Make sure there are undecided transactions left to split on.
- if (und.None()) return;
-
// Actually construct a new work item on the queue. Due to the switch to DFS when queue
// space runs out (see below), we know that no reallocation of the queue should ever
// occur.
Assume(queue.size() < queue.capacity());
- queue.emplace_back(std::move(inc), std::move(und));
+ queue.emplace_back(/*inc=*/std::move(inc),
+ /*und=*/std::move(und),
+ /*pot_feerate=*/std::move(pot.feerate));
};
/** Internal process function. It takes an existing work item, and splits it in two: one
@@ -659,18 +877,66 @@ public:
Assume(elem.inc.transactions.IsSubsetOf(m_todo) && elem.und.IsSubsetOf(m_todo));
// Included transactions cannot be undecided.
Assume(!elem.inc.transactions.Overlaps(elem.und));
+ // If pot is empty, then so is inc.
+ Assume(elem.inc.feerate.IsEmpty() == elem.pot_feerate.IsEmpty());
+
+ const ClusterIndex first = elem.und.First();
+ if (!elem.inc.feerate.IsEmpty()) {
+ // If no undecided transactions remain with feerate higher than best, this entry
+ // cannot be improved beyond best.
+ if (!elem.und.Overlaps(imp)) return;
+ // We can ignore any queue item whose potential feerate isn't better than the best
+ // seen so far.
+ if (elem.pot_feerate <= best.feerate) return;
+ } else {
+ // In case inc is empty use a simpler alternative check.
+ if (m_sorted_depgraph.FeeRate(first) <= best.feerate) return;
+ }
- // Pick the first undecided transaction as the one to split on.
- const ClusterIndex split = elem.und.First();
+ // Decide which transaction to split on. Splitting is how new work items are added, and
+ // how progress is made. One split transaction is chosen among the queue item's
+ // undecided ones, and:
+ // - A work item is (potentially) added with that transaction plus its remaining
+ // descendants excluded (removed from the und set).
+ // - A work item is (potentially) added with that transaction plus its remaining
+ // ancestors included (added to the inc set).
+ //
+ // To decide what to split on, consider the undecided ancestors of the highest
+ // individual feerate undecided transaction. Pick the one which reduces the search space
+ // most. Let I(t) be the size of the undecided set after including t, and E(t) the size
+ // of the undecided set after excluding t. Then choose the split transaction t such
+ // that 2^I(t) + 2^E(t) is minimal, tie-breaking by highest individual feerate for t.
+ ClusterIndex split = 0;
+ const auto select = elem.und & m_sorted_depgraph.Ancestors(first);
+ Assume(select.Any());
+ std::optional<std::pair<ClusterIndex, ClusterIndex>> split_counts;
+ for (auto t : select) {
+ // Call max = max(I(t), E(t)) and min = min(I(t), E(t)). Let counts = {max,min}.
+ // Sorting by the tuple counts is equivalent to sorting by 2^I(t) + 2^E(t). This
+ // expression is equal to 2^max + 2^min = 2^max * (1 + 1/2^(max - min)). The second
+ // factor (1 + 1/2^(max - min)) there is in (1,2]. Thus increasing max will always
+ // increase it, even when min decreases. Because of this, we can first sort by max.
+ std::pair<ClusterIndex, ClusterIndex> counts{
+ (elem.und - m_sorted_depgraph.Ancestors(t)).Count(),
+ (elem.und - m_sorted_depgraph.Descendants(t)).Count()};
+ if (counts.first < counts.second) std::swap(counts.first, counts.second);
+ // Remember the t with the lowest counts.
+ if (!split_counts.has_value() || counts < *split_counts) {
+ split = t;
+ split_counts = counts;
+ }
+ }
+ // Since there was at least one transaction in select, we must always find one.
+ Assume(split_counts.has_value());
// Add a work item corresponding to exclusion of the split transaction.
- const auto& desc = m_depgraph.Descendants(split);
+ const auto& desc = m_sorted_depgraph.Descendants(split);
add_fn(/*inc=*/elem.inc,
/*und=*/elem.und - desc);
// Add a work item corresponding to inclusion of the split transaction.
- const auto anc = m_depgraph.Ancestors(split) & m_todo;
- add_fn(/*inc=*/elem.inc.Add(m_depgraph, anc),
+ const auto anc = m_sorted_depgraph.Ancestors(split) & m_todo;
+ add_fn(/*inc=*/elem.inc.Add(m_sorted_depgraph, anc),
/*und=*/elem.und - anc);
// Account for the performed split.
@@ -713,7 +979,9 @@ public:
split_fn(std::move(elem));
}
- // Return the found best set and the number of iterations performed.
+ // Return the found best set (converted to the original transaction indices), and the
+ // number of iterations performed.
+ best.transactions = SortedToOriginal(best.transactions);
return {std::move(best), max_iterations - iterations_left};
}
@@ -723,9 +991,10 @@ public:
*/
void MarkDone(const SetType& done) noexcept
{
- Assume(done.Any());
- Assume(done.IsSubsetOf(m_todo));
- m_todo -= done;
+ const auto done_sorted = OriginalToSorted(done);
+ Assume(done_sorted.Any());
+ Assume(done_sorted.IsSubsetOf(m_todo));
+ m_todo -= done_sorted;
}
};
@@ -744,7 +1013,7 @@ public:
* - A boolean indicating whether the result is guaranteed to be
* optimal.
*
- * Complexity: O(N * min(max_iterations + N, 2^N)) where N=depgraph.TxCount().
+ * Complexity: possibly O(N * min(max_iterations + N, sqrt(2^N))) where N=depgraph.TxCount().
*/
template<typename SetType>
std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& depgraph, uint64_t max_iterations, uint64_t rng_seed, Span<const ClusterIndex> old_linearization = {}) noexcept
@@ -756,10 +1025,20 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
std::vector<ClusterIndex> linearization;
AncestorCandidateFinder anc_finder(depgraph);
- SearchCandidateFinder src_finder(depgraph, rng_seed);
+ std::optional<SearchCandidateFinder<SetType>> src_finder;
linearization.reserve(depgraph.TxCount());
bool optimal = true;
+ // Treat the initialization of SearchCandidateFinder as taking N^2/64 (rounded up) iterations
+ // (largely due to the cost of constructing the internal sorted-by-feerate DepGraph inside
+ // SearchCandidateFinder), a rough approximation based on benchmark. If we don't have that
+ // many, don't start it.
+ uint64_t start_iterations = (uint64_t{depgraph.TxCount()} * depgraph.TxCount() + 63) / 64;
+ if (iterations_left > start_iterations) {
+ iterations_left -= start_iterations;
+ src_finder.emplace(depgraph, rng_seed);
+ }
+
/** Chunking of what remains of the old linearization. */
LinearizationChunking old_chunking(depgraph, old_linearization);
@@ -772,12 +1051,22 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
auto best = anc_finder.FindCandidateSet();
if (!best_prefix.feerate.IsEmpty() && best_prefix.feerate >= best.feerate) best = best_prefix;
- // Invoke bounded search to update best, with up to half of our remaining iterations as
- // limit.
- uint64_t max_iterations_now = (iterations_left + 1) / 2;
uint64_t iterations_done_now = 0;
- std::tie(best, iterations_done_now) = src_finder.FindCandidateSet(max_iterations_now, best);
- iterations_left -= iterations_done_now;
+ uint64_t max_iterations_now = 0;
+ if (src_finder) {
+ // Treat the invocation of SearchCandidateFinder::FindCandidateSet() as costing N/4
+ // up-front (rounded up) iterations (largely due to the cost of connected-component
+ // splitting), a rough approximation based on benchmarks.
+ uint64_t base_iterations = (anc_finder.NumRemaining() + 3) / 4;
+ if (iterations_left > base_iterations) {
+ // Invoke bounded search to update best, with up to half of our remaining
+ // iterations as limit.
+ iterations_left -= base_iterations;
+ max_iterations_now = (iterations_left + 1) / 2;
+ std::tie(best, iterations_done_now) = src_finder->FindCandidateSet(max_iterations_now, best);
+ iterations_left -= iterations_done_now;
+ }
+ }
if (iterations_done_now == max_iterations_now) {
optimal = false;
@@ -795,7 +1084,7 @@ std::pair<std::vector<ClusterIndex>, bool> Linearize(const DepGraph<SetType>& de
// Update state to reflect best is no longer to be linearized.
anc_finder.MarkDone(best.transactions);
if (anc_finder.AllDone()) break;
- src_finder.MarkDone(best.transactions);
+ if (src_finder) src_finder->MarkDone(best.transactions);
if (old_chunking.NumChunksLeft() > 0) {
old_chunking.MarkDone(best.transactions);
}
@@ -911,7 +1200,7 @@ void PostLinearize(const DepGraph<SetType>& depgraph, Span<ClusterIndex> lineari
// During an even pass, the diagram above would correspond to linearization [2,3,0,1], with
// groups [2] and [3,0,1].
- std::vector<TxEntry> entries(linearization.size() + 1);
+ std::vector<TxEntry> entries(depgraph.PositionRange() + 1);
// Perform two passes over the linearization.
for (int pass = 0; pass < 2; ++pass) {
diff --git a/src/common/args.cpp b/src/common/args.cpp
index a37a16b62b..f59d2b8f0f 100644
--- a/src/common/args.cpp
+++ b/src/common/args.cpp
@@ -16,6 +16,7 @@
#include <util/fs.h>
#include <util/fs_helpers.h>
#include <util/strencodings.h>
+#include <util/string.h>
#ifdef WIN32
#include <codecvt> /* for codecvt_utf8_utf16 */
@@ -588,6 +589,23 @@ void ArgsManager::AddHiddenArgs(const std::vector<std::string>& names)
}
}
+void ArgsManager::CheckMultipleCLIArgs() const
+{
+ LOCK(cs_args);
+ std::vector<std::string> found{};
+ auto cmds = m_available_args.find(OptionsCategory::CLI_COMMANDS);
+ if (cmds != m_available_args.end()) {
+ for (const auto& [cmd, argspec] : cmds->second) {
+ if (IsArgSet(cmd)) {
+ found.push_back(cmd);
+ }
+ }
+ if (found.size() > 1) {
+ throw std::runtime_error(strprintf("Only one of %s may be specified.", util::Join(found, ", ")));
+ }
+ }
+}
+
std::string ArgsManager::GetHelpMessage() const
{
const bool show_debug = GetBoolArg("-help-debug", false);
@@ -617,6 +635,9 @@ std::string ArgsManager::GetHelpMessage() const
case OptionsCategory::RPC:
usage += HelpMessageGroup("RPC server options:");
break;
+ case OptionsCategory::IPC:
+ usage += HelpMessageGroup("IPC interprocess connection options:");
+ break;
case OptionsCategory::WALLET:
usage += HelpMessageGroup("Wallet options:");
break;
@@ -635,6 +656,9 @@ std::string ArgsManager::GetHelpMessage() const
case OptionsCategory::REGISTER_COMMANDS:
usage += HelpMessageGroup("Register Commands:");
break;
+ case OptionsCategory::CLI_COMMANDS:
+ usage += HelpMessageGroup("CLI Commands:");
+ break;
default:
break;
}
diff --git a/src/common/args.h b/src/common/args.h
index 323a86d8dc..8d9daf5f65 100644
--- a/src/common/args.h
+++ b/src/common/args.h
@@ -63,6 +63,8 @@ enum class OptionsCategory {
GUI,
COMMANDS,
REGISTER_COMMANDS,
+ CLI_COMMANDS,
+ IPC,
HIDDEN // Always the last option to avoid printing these in the help
};
@@ -364,6 +366,13 @@ protected:
}
/**
+ * Check CLI command args
+ *
+ * @throws std::runtime_error when multiple CLI_COMMAND arguments are specified
+ */
+ void CheckMultipleCLIArgs() const;
+
+ /**
* Get the help string
*/
std::string GetHelpMessage() const;
diff --git a/src/common/netif.cpp b/src/common/netif.cpp
new file mode 100644
index 0000000000..08f034a412
--- /dev/null
+++ b/src/common/netif.cpp
@@ -0,0 +1,303 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#include <bitcoin-build-config.h> // IWYU pragma: keep
+
+#include <common/netif.h>
+
+#include <logging.h>
+#include <netbase.h>
+#include <util/check.h>
+#include <util/sock.h>
+#include <util/syserror.h>
+
+#if defined(__linux__)
+#include <linux/rtnetlink.h>
+#elif defined(__FreeBSD__)
+#include <osreldate.h>
+#if __FreeBSD_version >= 1400000
+// Workaround https://github.com/freebsd/freebsd-src/pull/1070.
+#define typeof __typeof
+#include <netlink/netlink.h>
+#include <netlink/netlink_route.h>
+#endif
+#elif defined(WIN32)
+#include <iphlpapi.h>
+#elif defined(__APPLE__)
+#include <net/route.h>
+#include <sys/sysctl.h>
+#endif
+
+namespace {
+
+// Linux and FreeBSD 14.0+. For FreeBSD 13.2 the code can be compiled but
+// running it requires loading a special kernel module, otherwise socket(AF_NETLINK,...)
+// will fail, so we skip that.
+#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1400000)
+
+std::optional<CNetAddr> QueryDefaultGatewayImpl(sa_family_t family)
+{
+ // Create a netlink socket.
+ auto sock{CreateSock(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE)};
+ if (!sock) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "socket(AF_NETLINK): %s\n", NetworkErrorString(errno));
+ return std::nullopt;
+ }
+
+ // Send request.
+ struct {
+ nlmsghdr hdr; ///< Request header.
+ rtmsg data; ///< Request data, a "route message".
+ nlattr dst_hdr; ///< One attribute, conveying the route destination address.
+ char dst_data[16]; ///< Route destination address. To query the default route we use 0.0.0.0/0 or [::]/0. For IPv4 the first 4 bytes are used.
+ } request{};
+
+ // Whether to use the first 4 or 16 bytes from request.dst_data.
+ const size_t dst_data_len = family == AF_INET ? 4 : 16;
+
+ request.hdr.nlmsg_type = RTM_GETROUTE;
+ request.hdr.nlmsg_flags = NLM_F_REQUEST;
+#ifdef __linux__
+ // Linux IPv4 / IPv6 - this must be present, otherwise no gateway is found
+ // FreeBSD IPv4 - does not matter, the gateway is found with or without this
+ // FreeBSD IPv6 - this must be absent, otherwise no gateway is found
+ request.hdr.nlmsg_flags |= NLM_F_DUMP;
+#endif
+ request.hdr.nlmsg_len = NLMSG_LENGTH(sizeof(rtmsg) + sizeof(nlattr) + dst_data_len);
+ request.hdr.nlmsg_seq = 0; // Sequence number, used to match which reply is to which request. Irrelevant for us because we send just one request.
+ request.data.rtm_family = family;
+ request.data.rtm_dst_len = 0; // Prefix length.
+#ifdef __FreeBSD__
+ // Linux IPv4 / IPv6 this must be absent, otherwise no gateway is found
+ // FreeBSD IPv4 - does not matter, the gateway is found with or without this
+ // FreeBSD IPv6 - this must be present, otherwise no gateway is found
+ request.data.rtm_flags = RTM_F_PREFIX;
+#endif
+ request.dst_hdr.nla_type = RTA_DST;
+ request.dst_hdr.nla_len = sizeof(nlattr) + dst_data_len;
+
+ if (sock->Send(&request, request.hdr.nlmsg_len, 0) != static_cast<ssize_t>(request.hdr.nlmsg_len)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "send() to netlink socket: %s\n", NetworkErrorString(errno));
+ return std::nullopt;
+ }
+
+ // Receive response.
+ char response[4096];
+ int64_t recv_result;
+ do {
+ recv_result = sock->Recv(response, sizeof(response), 0);
+ } while (recv_result < 0 && (errno == EINTR || errno == EAGAIN));
+ if (recv_result < 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "recv() from netlink socket: %s\n", NetworkErrorString(errno));
+ return std::nullopt;
+ }
+
+ for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) {
+ rtmsg* r = (rtmsg*)NLMSG_DATA(hdr);
+ int remaining_len = RTM_PAYLOAD(hdr);
+
+ // Iterate over the attributes.
+ rtattr *rta_gateway = nullptr;
+ int scope_id = 0;
+ for (rtattr* attr = RTM_RTA(r); RTA_OK(attr, remaining_len); attr = RTA_NEXT(attr, remaining_len)) {
+ if (attr->rta_type == RTA_GATEWAY) {
+ rta_gateway = attr;
+ } else if (attr->rta_type == RTA_OIF && sizeof(int) == RTA_PAYLOAD(attr)) {
+ std::memcpy(&scope_id, RTA_DATA(attr), sizeof(scope_id));
+ }
+ }
+
+ // Found gateway?
+ if (rta_gateway != nullptr) {
+ if (family == AF_INET && sizeof(in_addr) == RTA_PAYLOAD(rta_gateway)) {
+ in_addr gw;
+ std::memcpy(&gw, RTA_DATA(rta_gateway), sizeof(gw));
+ return CNetAddr(gw);
+ } else if (family == AF_INET6 && sizeof(in6_addr) == RTA_PAYLOAD(rta_gateway)) {
+ in6_addr gw;
+ std::memcpy(&gw, RTA_DATA(rta_gateway), sizeof(gw));
+ return CNetAddr(gw, scope_id);
+ }
+ }
+ }
+
+ return std::nullopt;
+}
+
+#elif defined(WIN32)
+
+std::optional<CNetAddr> QueryDefaultGatewayImpl(sa_family_t family)
+{
+ NET_LUID interface_luid = {};
+ SOCKADDR_INET destination_address = {};
+ MIB_IPFORWARD_ROW2 best_route = {};
+ SOCKADDR_INET best_source_address = {};
+ DWORD best_if_idx = 0;
+ DWORD status = 0;
+
+ // Pass empty destination address of the requested type (:: or 0.0.0.0) to get interface of default route.
+ destination_address.si_family = family;
+ status = GetBestInterfaceEx((sockaddr*)&destination_address, &best_if_idx);
+ if (status != NO_ERROR) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get best interface for default route: %s\n", NetworkErrorString(status));
+ return std::nullopt;
+ }
+
+ // Get best route to default gateway.
+ // Leave interface_luid at all-zeros to use interface index instead.
+ status = GetBestRoute2(&interface_luid, best_if_idx, nullptr, &destination_address, 0, &best_route, &best_source_address);
+ if (status != NO_ERROR) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get best route for default route for interface index %d: %s\n",
+ best_if_idx, NetworkErrorString(status));
+ return std::nullopt;
+ }
+
+ Assume(best_route.NextHop.si_family == family);
+ if (family == AF_INET) {
+ return CNetAddr(best_route.NextHop.Ipv4.sin_addr);
+ } else if(family == AF_INET6) {
+ return CNetAddr(best_route.NextHop.Ipv6.sin6_addr, best_route.InterfaceIndex);
+ }
+ return std::nullopt;
+}
+
+#elif defined(__APPLE__)
+
+#define ROUNDUP32(a) \
+ ((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t))
+
+std::optional<CNetAddr> FromSockAddr(const struct sockaddr* addr)
+{
+ // Check valid length. Note that sa_len is not part of POSIX, and exists on MacOS and some BSDs only, so we can't
+ // do this check in SetSockAddr.
+ if (!(addr->sa_family == AF_INET && addr->sa_len == sizeof(struct sockaddr_in)) &&
+ !(addr->sa_family == AF_INET6 && addr->sa_len == sizeof(struct sockaddr_in6))) {
+ return std::nullopt;
+ }
+
+ // Fill in a CService from the sockaddr, then drop the port part.
+ CService service;
+ if (service.SetSockAddr(addr)) {
+ return (CNetAddr)service;
+ }
+ return std::nullopt;
+}
+
+//! MacOS: Get default gateway from route table. See route(4) for the format.
+std::optional<CNetAddr> QueryDefaultGatewayImpl(sa_family_t family)
+{
+ // net.route.0.inet[6].flags.gateway
+ int mib[] = {CTL_NET, PF_ROUTE, 0, family, NET_RT_FLAGS, RTF_GATEWAY};
+ // The size of the available data is determined by calling sysctl() with oldp=nullptr. See sysctl(3).
+ size_t l = 0;
+ if (sysctl(/*name=*/mib, /*namelen=*/sizeof(mib) / sizeof(int), /*oldp=*/nullptr, /*oldlenp=*/&l, /*newp=*/nullptr, /*newlen=*/0) < 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get sysctl length of routing table: %s\n", SysErrorString(errno));
+ return std::nullopt;
+ }
+ std::vector<std::byte> buf(l);
+ if (sysctl(/*name=*/mib, /*namelen=*/sizeof(mib) / sizeof(int), /*oldp=*/buf.data(), /*oldlenp=*/&l, /*newp=*/nullptr, /*newlen=*/0) < 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get sysctl data of routing table: %s\n", SysErrorString(errno));
+ return std::nullopt;
+ }
+ // Iterate over messages (each message is a routing table entry).
+ for (size_t msg_pos = 0; msg_pos < buf.size(); ) {
+ if ((msg_pos + sizeof(rt_msghdr)) > buf.size()) return std::nullopt;
+ const struct rt_msghdr* rt = (const struct rt_msghdr*)(buf.data() + msg_pos);
+ const size_t next_msg_pos = msg_pos + rt->rtm_msglen;
+ if (rt->rtm_msglen < sizeof(rt_msghdr) || next_msg_pos > buf.size()) return std::nullopt;
+ // Iterate over addresses within message, get destination and gateway (if present).
+ // Address data starts after header.
+ size_t sa_pos = msg_pos + sizeof(struct rt_msghdr);
+ std::optional<CNetAddr> dst, gateway;
+ for (int i = 0; i < RTAX_MAX; i++) {
+ if (rt->rtm_addrs & (1 << i)) {
+ // 2 is just sa_len + sa_family, the theoretical minimum size of a socket address.
+ if ((sa_pos + 2) > next_msg_pos) return std::nullopt;
+ const struct sockaddr* sa = (const struct sockaddr*)(buf.data() + sa_pos);
+ if ((sa_pos + sa->sa_len) > next_msg_pos) return std::nullopt;
+ if (i == RTAX_DST) {
+ dst = FromSockAddr(sa);
+ } else if (i == RTAX_GATEWAY) {
+ gateway = FromSockAddr(sa);
+ }
+ // Skip sockaddr entries for bit flags we're not interested in,
+ // move cursor.
+ sa_pos += ROUNDUP32(sa->sa_len);
+ }
+ }
+ // Found default gateway?
+ if (dst && gateway && dst->IsBindAny()) { // Route to 0.0.0.0 or :: ?
+ return *gateway;
+ }
+ // Skip to next message.
+ msg_pos = next_msg_pos;
+ }
+ return std::nullopt;
+}
+
+#else
+
+// Dummy implementation.
+std::optional<CNetAddr> QueryDefaultGatewayImpl(sa_family_t)
+{
+ return std::nullopt;
+}
+
+#endif
+
+}
+
+std::optional<CNetAddr> QueryDefaultGateway(Network network)
+{
+ Assume(network == NET_IPV4 || network == NET_IPV6);
+
+ sa_family_t family;
+ if (network == NET_IPV4) {
+ family = AF_INET;
+ } else if(network == NET_IPV6) {
+ family = AF_INET6;
+ } else {
+ return std::nullopt;
+ }
+
+ std::optional<CNetAddr> ret = QueryDefaultGatewayImpl(family);
+
+ // It's possible for the default gateway to be 0.0.0.0 or ::0 on at least Windows
+ // for some routing strategies. If so, return as if no default gateway was found.
+ if (ret && !ret->IsBindAny()) {
+ return ret;
+ } else {
+ return std::nullopt;
+ }
+}
+
+std::vector<CNetAddr> GetLocalAddresses()
+{
+ std::vector<CNetAddr> addresses;
+#ifdef WIN32
+ char pszHostName[256] = "";
+ if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) {
+ addresses = LookupHost(pszHostName, 0, true);
+ }
+#elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
+ struct ifaddrs* myaddrs;
+ if (getifaddrs(&myaddrs) == 0) {
+ for (struct ifaddrs* ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next)
+ {
+ if (ifa->ifa_addr == nullptr) continue;
+ if ((ifa->ifa_flags & IFF_UP) == 0) continue;
+ if ((ifa->ifa_flags & IFF_LOOPBACK) != 0) continue;
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
+ addresses.emplace_back(s4->sin_addr);
+ } else if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
+ addresses.emplace_back(s6->sin6_addr);
+ }
+ }
+ freeifaddrs(myaddrs);
+ }
+#endif
+ return addresses;
+}
diff --git a/src/common/netif.h b/src/common/netif.h
new file mode 100644
index 0000000000..55bc023be6
--- /dev/null
+++ b/src/common/netif.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_COMMON_NETIF_H
+#define BITCOIN_COMMON_NETIF_H
+
+#include <netaddress.h>
+
+#include <optional>
+
+//! Query the OS for the default gateway for `network`. This only makes sense for NET_IPV4 and NET_IPV6.
+//! Returns std::nullopt if it cannot be found, or there is no support for this OS.
+std::optional<CNetAddr> QueryDefaultGateway(Network network);
+
+//! Return all local non-loopback IPv4 and IPv6 network addresses.
+std::vector<CNetAddr> GetLocalAddresses();
+
+#endif // BITCOIN_COMMON_NETIF_H
diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp
new file mode 100644
index 0000000000..3cc1cba924
--- /dev/null
+++ b/src/common/pcp.cpp
@@ -0,0 +1,524 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#include <common/pcp.h>
+
+#include <common/netif.h>
+#include <crypto/common.h>
+#include <logging.h>
+#include <netaddress.h>
+#include <netbase.h>
+#include <random.h>
+#include <span.h>
+#include <util/check.h>
+#include <util/readwritefile.h>
+#include <util/sock.h>
+#include <util/strencodings.h>
+
+namespace {
+
+// RFC6886 NAT-PMP and RFC6887 Port Control Protocol (PCP) implementation.
+// NAT-PMP and PCP use network byte order (big-endian).
+
+// NAT-PMP (v0) protocol constants.
+//! NAT-PMP uses a fixed server port number (RFC6887 section 1.1).
+constexpr uint16_t NATPMP_SERVER_PORT = 5351;
+//! Version byte for NATPMP (RFC6886 1.1)
+constexpr uint8_t NATPMP_VERSION = 0;
+//! Request opcode base (RFC6886 3).
+constexpr uint8_t NATPMP_REQUEST = 0x00;
+//! Response opcode base (RFC6886 3).
+constexpr uint8_t NATPMP_RESPONSE = 0x80;
+//! Get external address (RFC6886 3.2)
+constexpr uint8_t NATPMP_OP_GETEXTERNAL = 0x00;
+//! Map TCP port (RFC6886 3.3)
+constexpr uint8_t NATPMP_OP_MAP_TCP = 0x02;
+//! Shared request header size in bytes.
+constexpr size_t NATPMP_REQUEST_HDR_SIZE = 2;
+//! Shared response header (minimum) size in bytes.
+constexpr size_t NATPMP_RESPONSE_HDR_SIZE = 8;
+//! GETEXTERNAL request size in bytes, including header (RFC6886 3.2).
+constexpr size_t NATPMP_GETEXTERNAL_REQUEST_SIZE = NATPMP_REQUEST_HDR_SIZE + 0;
+//! GETEXTERNAL response size in bytes, including header (RFC6886 3.2).
+constexpr size_t NATPMP_GETEXTERNAL_RESPONSE_SIZE = NATPMP_RESPONSE_HDR_SIZE + 4;
+//! MAP request size in bytes, including header (RFC6886 3.3).
+constexpr size_t NATPMP_MAP_REQUEST_SIZE = NATPMP_REQUEST_HDR_SIZE + 10;
+//! MAP response size in bytes, including header (RFC6886 3.3).
+constexpr size_t NATPMP_MAP_RESPONSE_SIZE = NATPMP_RESPONSE_HDR_SIZE + 8;
+
+// Shared header offsets (RFC6886 3.2, 3.3), relative to start of packet.
+//! Offset of version field in packets.
+constexpr size_t NATPMP_HDR_VERSION_OFS = 0;
+//! Offset of opcode field in packets
+constexpr size_t NATPMP_HDR_OP_OFS = 1;
+//! Offset of result code in packets. Result codes are 16 bit in NAT-PMP instead of 8 bit in PCP.
+constexpr size_t NATPMP_RESPONSE_HDR_RESULT_OFS = 2;
+
+// GETEXTERNAL response offsets (RFC6886 3.2), relative to start of packet.
+//! Returned external address
+constexpr size_t NATPMP_GETEXTERNAL_RESPONSE_IP_OFS = 8;
+
+// MAP request offsets (RFC6886 3.3), relative to start of packet.
+//! Internal port to be mapped.
+constexpr size_t NATPMP_MAP_REQUEST_INTERNAL_PORT_OFS = 4;
+//! Suggested external port for mapping.
+constexpr size_t NATPMP_MAP_REQUEST_EXTERNAL_PORT_OFS = 6;
+//! Requested port mapping lifetime in seconds.
+constexpr size_t NATPMP_MAP_REQUEST_LIFETIME_OFS = 8;
+
+// MAP response offsets (RFC6886 3.3), relative to start of packet.
+//! Internal port for mapping (will match internal port of request).
+constexpr size_t NATPMP_MAP_RESPONSE_INTERNAL_PORT_OFS = 8;
+//! External port for mapping.
+constexpr size_t NATPMP_MAP_RESPONSE_EXTERNAL_PORT_OFS = 10;
+//! Created port mapping lifetime in seconds.
+constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12;
+
+// Relevant NETPMP result codes (RFC6886 3.5).
+//! Result code representing success status.
+constexpr uint8_t NATPMP_RESULT_SUCCESS = 0;
+//! Result code representing unsupported version.
+constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1;
+//! Result code representing lack of resources.
+constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4;
+
+//! Mapping of NATPMP result code to string (RFC6886 3.5). Result codes <=2 match PCP.
+const std::map<uint8_t, std::string> NATPMP_RESULT_STR{
+ {0, "SUCCESS"},
+ {1, "UNSUPP_VERSION"},
+ {2, "NOT_AUTHORIZED"},
+ {3, "NETWORK_FAILURE"},
+ {4, "NO_RESOURCES"},
+ {5, "UNSUPP_OPCODE"},
+};
+
+// PCP (v2) protocol constants.
+//! Maximum packet size in bytes (RFC6887 section 7).
+constexpr size_t PCP_MAX_SIZE = 1100;
+//! PCP uses a fixed server port number (RFC6887 section 19.1). Shared with NAT-PMP.
+constexpr uint16_t PCP_SERVER_PORT = NATPMP_SERVER_PORT;
+//! Version byte. 0 is NAT-PMP (RFC6886), 1 is forbidden, 2 for PCP (RFC6887).
+constexpr uint8_t PCP_VERSION = 2;
+//! PCP Request Header. See RFC6887 section 7.1. Shared with NAT-PMP.
+constexpr uint8_t PCP_REQUEST = NATPMP_REQUEST; // R = 0
+//! PCP Response Header. See RFC6887 section 7.2. Shared with NAT-PMP.
+constexpr uint8_t PCP_RESPONSE = NATPMP_RESPONSE; // R = 1
+//! Map opcode. See RFC6887 section 19.2
+constexpr uint8_t PCP_OP_MAP = 0x01;
+//! TCP protocol number (IANA).
+constexpr uint16_t PCP_PROTOCOL_TCP = 6;
+//! Request and response header size in bytes (RFC6887 section 7.1).
+constexpr size_t PCP_HDR_SIZE = 24;
+//! Map request and response size in bytes (RFC6887 section 11.1).
+constexpr size_t PCP_MAP_SIZE = 36;
+
+// Header offsets shared between request and responses (RFC6887 7.1, 7.2), relative to start of packet.
+//! Version field (1 byte).
+constexpr size_t PCP_HDR_VERSION_OFS = NATPMP_HDR_VERSION_OFS;
+//! Opcode field (1 byte).
+constexpr size_t PCP_HDR_OP_OFS = NATPMP_HDR_OP_OFS;
+//! Requested lifetime (request), granted lifetime (response) (4 bytes).
+constexpr size_t PCP_HDR_LIFETIME_OFS = 4;
+
+// Request header offsets (RFC6887 7.1), relative to start of packet.
+//! PCP client's IP address (16 bytes).
+constexpr size_t PCP_REQUEST_HDR_IP_OFS = 8;
+
+// Response header offsets (RFC6887 7.2), relative to start of packet.
+//! Result code (1 byte).
+constexpr size_t PCP_RESPONSE_HDR_RESULT_OFS = 3;
+
+// MAP request/response offsets (RFC6887 11.1), relative to start of opcode-specific data.
+//! Mapping nonce (12 bytes).
+constexpr size_t PCP_MAP_NONCE_OFS = 0;
+//! Protocol (1 byte).
+constexpr size_t PCP_MAP_PROTOCOL_OFS = 12;
+//! Internal port for mapping (2 bytes).
+constexpr size_t PCP_MAP_INTERNAL_PORT_OFS = 16;
+//! Suggested external port (request), assigned external port (response) (2 bytes).
+constexpr size_t PCP_MAP_EXTERNAL_PORT_OFS = 18;
+//! Suggested external IP (request), assigned external IP (response) (16 bytes).
+constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20;
+
+//! Result code representing success (RFC6887 7.4), shared with NAT-PMP.
+constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS;
+//! Result code representing lack of resources (RFC6887 7.4).
+constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8;
+
+//! Mapping of PCP result code to string (RFC6887 7.4). Result codes <=2 match NAT-PMP.
+const std::map<uint8_t, std::string> PCP_RESULT_STR{
+ {0, "SUCCESS"},
+ {1, "UNSUPP_VERSION"},
+ {2, "NOT_AUTHORIZED"},
+ {3, "MALFORMED_REQUEST"},
+ {4, "UNSUPP_OPCODE"},
+ {5, "UNSUPP_OPTION"},
+ {6, "MALFORMED_OPTION"},
+ {7, "NETWORK_FAILURE"},
+ {8, "NO_RESOURCES"},
+ {9, "UNSUPP_PROTOCOL"},
+ {10, "USER_EX_QUOTA"},
+ {11, "CANNOT_PROVIDE_EXTERNAL"},
+ {12, "ADDRESS_MISMATCH"},
+ {13, "EXCESSIVE_REMOTE_PEER"},
+};
+
+//! Return human-readable string from NATPMP result code.
+std::string NATPMPResultString(uint8_t result_code)
+{
+ auto result_i = NATPMP_RESULT_STR.find(result_code);
+ return strprintf("%s (code %d)", result_i == NATPMP_RESULT_STR.end() ? "(unknown)" : result_i->second, result_code);
+}
+
+//! Return human-readable string from PCP result code.
+std::string PCPResultString(uint8_t result_code)
+{
+ auto result_i = PCP_RESULT_STR.find(result_code);
+ return strprintf("%s (code %d)", result_i == PCP_RESULT_STR.end() ? "(unknown)" : result_i->second, result_code);
+}
+
+//! Wrap address in IPv6 according to RFC6887. wrapped_addr needs to be able to store 16 bytes.
+[[nodiscard]] bool PCPWrapAddress(Span<uint8_t> wrapped_addr, const CNetAddr &addr)
+{
+ Assume(wrapped_addr.size() == ADDR_IPV6_SIZE);
+ if (addr.IsIPv4()) {
+ struct in_addr addr4;
+ if (!addr.GetInAddr(&addr4)) return false;
+ // Section 5: "When the address field holds an IPv4 address, an IPv4-mapped IPv6 address [RFC4291] is used (::ffff:0:0/96)."
+ std::memcpy(wrapped_addr.data(), IPV4_IN_IPV6_PREFIX.data(), IPV4_IN_IPV6_PREFIX.size());
+ std::memcpy(wrapped_addr.data() + IPV4_IN_IPV6_PREFIX.size(), &addr4, ADDR_IPV4_SIZE);
+ return true;
+ } else if (addr.IsIPv6()) {
+ struct in6_addr addr6;
+ if (!addr.GetIn6Addr(&addr6)) return false;
+ std::memcpy(wrapped_addr.data(), &addr6, ADDR_IPV6_SIZE);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+//! Unwrap PCP-encoded address according to RFC6887.
+CNetAddr PCPUnwrapAddress(Span<const uint8_t> wrapped_addr)
+{
+ Assume(wrapped_addr.size() == ADDR_IPV6_SIZE);
+ if (util::HasPrefix(wrapped_addr, IPV4_IN_IPV6_PREFIX)) {
+ struct in_addr addr4;
+ std::memcpy(&addr4, wrapped_addr.data() + IPV4_IN_IPV6_PREFIX.size(), ADDR_IPV4_SIZE);
+ return CNetAddr(addr4);
+ } else {
+ struct in6_addr addr6;
+ std::memcpy(&addr6, wrapped_addr.data(), ADDR_IPV6_SIZE);
+ return CNetAddr(addr6);
+ }
+}
+
+//! PCP or NAT-PMP send-receive loop.
+std::optional<std::vector<uint8_t>> PCPSendRecv(Sock &sock, const std::string &protocol, Span<const uint8_t> request, int num_tries,
+ std::chrono::milliseconds timeout_per_try,
+ std::function<bool(Span<const uint8_t>)> check_packet)
+{
+ using namespace std::chrono;
+ // UDP is a potentially lossy protocol, so we try to send again a few times.
+ uint8_t response[PCP_MAX_SIZE];
+ bool got_response = false;
+ int recvsz = 0;
+ for (int ntry = 0; !got_response && ntry < num_tries; ++ntry) {
+ if (ntry > 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Retrying (%d)\n", protocol, ntry);
+ }
+ // Dispatch packet to gateway.
+ if (sock.Send(request.data(), request.size(), 0) != static_cast<ssize_t>(request.size())) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError()));
+ return std::nullopt; // Network-level error, probably no use retrying.
+ }
+
+ // Wait for response(s) until we get a valid response, a network error, or time out.
+ auto cur_time = time_point_cast<milliseconds>(steady_clock::now());
+ auto deadline = cur_time + timeout_per_try;
+ while ((cur_time = time_point_cast<milliseconds>(steady_clock::now())) < deadline) {
+ Sock::Event occurred = 0;
+ if (!sock.Wait(deadline - cur_time, Sock::RECV, &occurred)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not wait on socket: %s\n", protocol, NetworkErrorString(WSAGetLastError()));
+ return std::nullopt; // Network-level error, probably no use retrying.
+ }
+ if (!occurred) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Timeout\n", protocol);
+ break; // Retry.
+ }
+
+ // Receive response.
+ recvsz = sock.Recv(response, sizeof(response), MSG_DONTWAIT);
+ if (recvsz < 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError()));
+ return std::nullopt; // Network-level error, probably no use retrying.
+ }
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Received response of %d bytes: %s\n", protocol, recvsz, HexStr(Span(response, recvsz)));
+
+ if (check_packet(Span<uint8_t>(response, recvsz))) {
+ got_response = true; // Got expected response, break from receive loop as well as from retry loop.
+ break;
+ }
+ }
+ }
+ if (!got_response) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Giving up after %d tries\n", protocol, num_tries);
+ return std::nullopt;
+ }
+ return std::vector<uint8_t>(response, response + recvsz);
+}
+
+}
+
+std::variant<MappingResult, MappingError> NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try)
+{
+ struct sockaddr_storage dest_addr;
+ socklen_t dest_addrlen = sizeof(struct sockaddr_storage);
+
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "natpmp: Requesting port mapping port %d from gateway %s\n", port, gateway.ToStringAddr());
+
+ // Validate gateway, make sure it's IPv4. NAT-PMP does not support IPv6.
+ if (!CService(gateway, PCP_SERVER_PORT).GetSockAddr((struct sockaddr*)&dest_addr, &dest_addrlen)) return MappingError::NETWORK_ERROR;
+ if (dest_addr.ss_family != AF_INET) return MappingError::NETWORK_ERROR;
+
+ // Create IPv4 UDP socket
+ auto sock{CreateSock(AF_INET, SOCK_DGRAM, IPPROTO_UDP)};
+ if (!sock) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Associate UDP socket to gateway.
+ if (sock->Connect((struct sockaddr*)&dest_addr, dest_addrlen) != 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Use getsockname to get the address toward the default gateway (the internal address).
+ struct sockaddr_in internal;
+ socklen_t internal_addrlen = sizeof(struct sockaddr_in);
+ if (sock->GetSockName((struct sockaddr*)&internal, &internal_addrlen) != 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Request external IP address (RFC6886 section 3.2).
+ std::vector<uint8_t> request(NATPMP_GETEXTERNAL_REQUEST_SIZE);
+ request[NATPMP_HDR_VERSION_OFS] = NATPMP_VERSION;
+ request[NATPMP_HDR_OP_OFS] = NATPMP_REQUEST | NATPMP_OP_GETEXTERNAL;
+
+ auto recv_res = PCPSendRecv(*sock, "natpmp", request, num_tries, timeout_per_try,
+ [&](const Span<const uint8_t> response) -> bool {
+ if (response.size() < NATPMP_GETEXTERNAL_RESPONSE_SIZE) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response too small\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ if (response[NATPMP_HDR_VERSION_OFS] != NATPMP_VERSION || response[NATPMP_HDR_OP_OFS] != (NATPMP_RESPONSE | NATPMP_OP_GETEXTERNAL)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response to wrong command\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ return true;
+ });
+
+ struct in_addr external_addr;
+ if (recv_res) {
+ const std::span<const uint8_t> response = *recv_res;
+
+ Assume(response.size() >= NATPMP_GETEXTERNAL_RESPONSE_SIZE);
+ uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS);
+ if (result_code != NATPMP_RESULT_SUCCESS) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Getting external address failed with result %s\n", NATPMPResultString(result_code));
+ return MappingError::PROTOCOL_ERROR;
+ }
+
+ std::memcpy(&external_addr, response.data() + NATPMP_GETEXTERNAL_RESPONSE_IP_OFS, ADDR_IPV4_SIZE);
+ } else {
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Create TCP mapping request (RFC6886 section 3.3).
+ request = std::vector<uint8_t>(NATPMP_MAP_REQUEST_SIZE);
+ request[NATPMP_HDR_VERSION_OFS] = NATPMP_VERSION;
+ request[NATPMP_HDR_OP_OFS] = NATPMP_REQUEST | NATPMP_OP_MAP_TCP;
+ WriteBE16(request.data() + NATPMP_MAP_REQUEST_INTERNAL_PORT_OFS, port);
+ WriteBE16(request.data() + NATPMP_MAP_REQUEST_EXTERNAL_PORT_OFS, port);
+ WriteBE32(request.data() + NATPMP_MAP_REQUEST_LIFETIME_OFS, lifetime);
+
+ recv_res = PCPSendRecv(*sock, "natpmp", request, num_tries, timeout_per_try,
+ [&](const Span<const uint8_t> response) -> bool {
+ if (response.size() < NATPMP_MAP_RESPONSE_SIZE) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response too small\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ if (response[0] != NATPMP_VERSION || response[1] != (NATPMP_RESPONSE | NATPMP_OP_MAP_TCP)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response to wrong command\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ uint16_t internal_port = ReadBE16(response.data() + NATPMP_MAP_RESPONSE_INTERNAL_PORT_OFS);
+ if (internal_port != port) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response port doesn't match request\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ return true;
+ });
+
+ if (recv_res) {
+ const std::span<uint8_t> response = *recv_res;
+
+ Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE);
+ uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS);
+ if (result_code != NATPMP_RESULT_SUCCESS) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code));
+ if (result_code == NATPMP_RESULT_NO_RESOURCES) {
+ return MappingError::NO_RESOURCES;
+ }
+ return MappingError::PROTOCOL_ERROR;
+ }
+
+ uint32_t lifetime_ret = ReadBE32(response.data() + NATPMP_MAP_RESPONSE_LIFETIME_OFS);
+ uint16_t external_port = ReadBE16(response.data() + NATPMP_MAP_RESPONSE_EXTERNAL_PORT_OFS);
+ return MappingResult(NATPMP_VERSION, CService(internal.sin_addr, port), CService(external_addr, external_port), lifetime_ret);
+ } else {
+ return MappingError::NETWORK_ERROR;
+ }
+}
+
+std::variant<MappingResult, MappingError> PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try)
+{
+ struct sockaddr_storage dest_addr, bind_addr;
+ socklen_t dest_addrlen = sizeof(struct sockaddr_storage), bind_addrlen = sizeof(struct sockaddr_storage);
+
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "pcp: Requesting port mapping for addr %s port %d from gateway %s\n", bind.ToStringAddr(), port, gateway.ToStringAddr());
+
+ // Validate addresses, make sure they're the same network family.
+ if (!CService(gateway, PCP_SERVER_PORT).GetSockAddr((struct sockaddr*)&dest_addr, &dest_addrlen)) return MappingError::NETWORK_ERROR;
+ if (!CService(bind, 0).GetSockAddr((struct sockaddr*)&bind_addr, &bind_addrlen)) return MappingError::NETWORK_ERROR;
+ if (dest_addr.ss_family != bind_addr.ss_family) return MappingError::NETWORK_ERROR;
+
+ // Create UDP socket (IPv4 or IPv6 based on provided gateway).
+ auto sock{CreateSock(dest_addr.ss_family, SOCK_DGRAM, IPPROTO_UDP)};
+ if (!sock) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Make sure that we send from requested destination address, anything else will be
+ // rejected by a security-conscious router.
+ if (sock->Bind((struct sockaddr*)&bind_addr, bind_addrlen) != 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not bind to address: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Associate UDP socket to gateway.
+ if (sock->Connect((struct sockaddr*)&dest_addr, dest_addrlen) != 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+
+ // Use getsockname to get the address toward the default gateway (the internal address),
+ // in case we don't know what address to map
+ // (this is only needed if bind is INADDR_ANY, but it doesn't hurt as an extra check).
+ struct sockaddr_storage internal_addr;
+ socklen_t internal_addrlen = sizeof(struct sockaddr_storage);
+ if (sock->GetSockName((struct sockaddr*)&internal_addr, &internal_addrlen) != 0) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError()));
+ return MappingError::NETWORK_ERROR;
+ }
+ CService internal;
+ if (!internal.SetSockAddr((struct sockaddr*)&internal_addr)) return MappingError::NETWORK_ERROR;
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "pcp: Internal address after connect: %s\n", internal.ToStringAddr());
+
+ // Build request packet. Make sure the packet is zeroed so that reserved fields are zero
+ // as required by the spec (and not potentially leak data).
+ // Make sure there's space for the request header and MAP specific request data.
+ std::vector<uint8_t> request(PCP_HDR_SIZE + PCP_MAP_SIZE);
+ // Fill in request header, See RFC6887 Figure 2.
+ size_t ofs = 0;
+ request[ofs + PCP_HDR_VERSION_OFS] = PCP_VERSION;
+ request[ofs + PCP_HDR_OP_OFS] = PCP_REQUEST | PCP_OP_MAP;
+ WriteBE32(request.data() + ofs + PCP_HDR_LIFETIME_OFS, lifetime);
+ if (!PCPWrapAddress(Span(request).subspan(ofs + PCP_REQUEST_HDR_IP_OFS, ADDR_IPV6_SIZE), internal)) return MappingError::NETWORK_ERROR;
+
+ ofs += PCP_HDR_SIZE;
+
+ // Fill in MAP request packet, See RFC6887 Figure 9.
+ // Randomize mapping nonce (this is repeated in the response, to be able to
+ // correlate requests and responses, and used to authenticate changes to the mapping).
+ std::memcpy(request.data() + ofs + PCP_MAP_NONCE_OFS, nonce.data(), PCP_MAP_NONCE_SIZE);
+ request[ofs + PCP_MAP_PROTOCOL_OFS] = PCP_PROTOCOL_TCP;
+ WriteBE16(request.data() + ofs + PCP_MAP_INTERNAL_PORT_OFS, port);
+ WriteBE16(request.data() + ofs + PCP_MAP_EXTERNAL_PORT_OFS, port);
+ if (!PCPWrapAddress(Span(request).subspan(ofs + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE), bind)) return MappingError::NETWORK_ERROR;
+
+ ofs += PCP_MAP_SIZE;
+ Assume(ofs == request.size());
+
+ // Receive loop.
+ bool is_natpmp = false;
+ auto recv_res = PCPSendRecv(*sock, "pcp", request, num_tries, timeout_per_try,
+ [&](const Span<const uint8_t> response) -> bool {
+ // Unsupported version according to RFC6887 appendix A and RFC6886 section 3.5, can fall back to NAT-PMP.
+ if (response.size() == NATPMP_RESPONSE_HDR_SIZE && response[PCP_HDR_VERSION_OFS] == NATPMP_VERSION && response[PCP_RESPONSE_HDR_RESULT_OFS] == NATPMP_RESULT_UNSUPP_VERSION) {
+ is_natpmp = true;
+ return true; // Let it through to caller.
+ }
+ if (response.size() < (PCP_HDR_SIZE + PCP_MAP_SIZE)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response too small\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ if (response[PCP_HDR_VERSION_OFS] != PCP_VERSION || response[PCP_HDR_OP_OFS] != (PCP_RESPONSE | PCP_OP_MAP)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response to wrong command\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ // Handle MAP opcode response. See RFC6887 Figure 10.
+ // Check that returned mapping nonce matches our request.
+ if (!std::ranges::equal(response.subspan(PCP_HDR_SIZE + PCP_MAP_NONCE_OFS, PCP_MAP_NONCE_SIZE), nonce)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping nonce mismatch\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ uint8_t protocol = response[PCP_HDR_SIZE + 12];
+ uint16_t internal_port = ReadBE16(response.data() + PCP_HDR_SIZE + 16);
+ if (protocol != PCP_PROTOCOL_TCP || internal_port != port) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response protocol or port doesn't match request\n");
+ return false; // Wasn't response to what we expected, try receiving next packet.
+ }
+ return true;
+ });
+
+ if (!recv_res) {
+ return MappingError::NETWORK_ERROR;
+ }
+ if (is_natpmp) {
+ return MappingError::UNSUPP_VERSION;
+ }
+
+ const std::span<const uint8_t> response = *recv_res;
+ // If we get here, we got a valid MAP response to our request.
+ // Check to see if we got the result we expected.
+ Assume(response.size() >= (PCP_HDR_SIZE + PCP_MAP_SIZE));
+ uint8_t result_code = response[PCP_RESPONSE_HDR_RESULT_OFS];
+ uint32_t lifetime_ret = ReadBE32(response.data() + PCP_HDR_LIFETIME_OFS);
+ uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS);
+ CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))};
+ if (result_code != PCP_RESULT_SUCCESS) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping failed with result %s\n", PCPResultString(result_code));
+ if (result_code == PCP_RESULT_NO_RESOURCES) {
+ return MappingError::NO_RESOURCES;
+ }
+ return MappingError::PROTOCOL_ERROR;
+ }
+
+ return MappingResult(PCP_VERSION, CService(internal, port), CService(external_addr, external_port), lifetime_ret);
+}
+
+std::string MappingResult::ToString()
+{
+ Assume(version == NATPMP_VERSION || version == PCP_VERSION);
+ return strprintf("%s:%s -> %s (for %ds)",
+ version == NATPMP_VERSION ? "natpmp" : "pcp",
+ external.ToStringAddrPort(),
+ internal.ToStringAddrPort(),
+ lifetime
+ );
+}
diff --git a/src/common/pcp.h b/src/common/pcp.h
new file mode 100644
index 0000000000..ce2273e140
--- /dev/null
+++ b/src/common/pcp.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or https://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_COMMON_PCP_H
+#define BITCOIN_COMMON_PCP_H
+
+#include <netaddress.h>
+
+#include <variant>
+
+// RFC6886 NAT-PMP and RFC6887 Port Control Protocol (PCP) implementation.
+// NAT-PMP and PCP use network byte order (big-endian).
+
+//! Mapping nonce size in bytes (see RFC6887 section 11.1).
+constexpr size_t PCP_MAP_NONCE_SIZE = 12;
+
+//! PCP mapping nonce. Arbitrary data chosen by the client to identify a mapping.
+typedef std::array<uint8_t, PCP_MAP_NONCE_SIZE> PCPMappingNonce;
+
+//! Unsuccessful response to a port mapping.
+enum class MappingError {
+ NETWORK_ERROR, ///< Any kind of network-level error.
+ PROTOCOL_ERROR, ///< Any kind of protocol-level error, except unsupported version or no resources.
+ UNSUPP_VERSION, ///< Unsupported protocol version.
+ NO_RESOURCES, ///< No resources available (port probably already mapped).
+};
+
+//! Successful response to a port mapping.
+struct MappingResult {
+ MappingResult(uint8_t version, const CService &internal_in, const CService &external_in, uint32_t lifetime_in):
+ version(version), internal(internal_in), external(external_in), lifetime(lifetime_in) {}
+ //! Protocol version, one of NATPMP_VERSION or PCP_VERSION.
+ uint8_t version;
+ //! Internal host:port.
+ CService internal;
+ //! External host:port.
+ CService external;
+ //! Granted lifetime of binding (seconds).
+ uint32_t lifetime;
+
+ //! Format mapping as string for logging.
+ std::string ToString();
+};
+
+//! Try to open a port using RFC 6886 NAT-PMP. IPv4 only.
+//!
+//! * gateway: Destination address for PCP requests (usually the default gateway).
+//! * port: Internal port, and desired external port.
+//! * lifetime: Requested lifetime in seconds for mapping. The server may assign as shorter or longer lifetime. A lifetime of 0 deletes the mapping.
+//! * num_tries: Number of tries in case of no response.
+//!
+//! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError.
+std::variant<MappingResult, MappingError> NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000));
+
+//! Try to open a port using RFC 6887 Port Control Protocol (PCP). Handles IPv4 and IPv6.
+//!
+//! * nonce: Mapping cookie. Keep this the same over renewals.
+//! * gateway: Destination address for PCP requests (usually the default gateway).
+//! * bind: Specific local bind address for IPv6 pinholing. Set this as INADDR_ANY for IPv4.
+//! * port: Internal port, and desired external port.
+//! * lifetime: Requested lifetime in seconds for mapping. The server may assign as shorter or longer lifetime. A lifetime of 0 deletes the mapping.
+//! * num_tries: Number of tries in case of no response.
+//!
+//! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError.
+std::variant<MappingResult, MappingError> PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000));
+
+#endif // BITCOIN_COMMON_PCP_H
diff --git a/src/common/run_command.cpp b/src/common/run_command.cpp
index 67608b985f..1f6d51b4f4 100644
--- a/src/common/run_command.cpp
+++ b/src/common/run_command.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <common/run_command.h>
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index c1520dacd2..0b11e246c6 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -4,7 +4,7 @@
#include <common/settings.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <tinyformat.h>
#include <univalue.h>
diff --git a/src/common/system.cpp b/src/common/system.cpp
index 6d04c8a7bc..6a9463a0a5 100644
--- a/src/common/system.cpp
+++ b/src/common/system.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <common/system.h>
diff --git a/src/common/system.h b/src/common/system.h
index d9115d3b33..a4b56be9ac 100644
--- a/src/common/system.h
+++ b/src/common/system.h
@@ -6,7 +6,7 @@
#ifndef BITCOIN_COMMON_SYSTEM_H
#define BITCOIN_COMMON_SYSTEM_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <cstdint>
#include <string>
diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp
index af01902c92..dc32f0ab80 100644
--- a/src/consensus/merkle.cpp
+++ b/src/consensus/merkle.cpp
@@ -83,3 +83,106 @@ uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated)
return ComputeMerkleRoot(std::move(leaves), mutated);
}
+/* This implements a constant-space merkle root/path calculator, limited to 2^32 leaves. */
+static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot, bool* pmutated, uint32_t branchpos, std::vector<uint256>* pbranch) {
+ if (pbranch) pbranch->clear();
+ if (leaves.size() == 0) {
+ if (pmutated) *pmutated = false;
+ if (proot) *proot = uint256();
+ return;
+ }
+ bool mutated = false;
+ // count is the number of leaves processed so far.
+ uint32_t count = 0;
+ // inner is an array of eagerly computed subtree hashes, indexed by tree
+ // level (0 being the leaves).
+ // For example, when count is 25 (11001 in binary), inner[4] is the hash of
+ // the first 16 leaves, inner[3] of the next 8 leaves, and inner[0] equal to
+ // the last leaf. The other inner entries are undefined.
+ uint256 inner[32];
+ // Which position in inner is a hash that depends on the matching leaf.
+ int matchlevel = -1;
+ // First process all leaves into 'inner' values.
+ while (count < leaves.size()) {
+ uint256 h = leaves[count];
+ bool matchh = count == branchpos;
+ count++;
+ int level;
+ // For each of the lower bits in count that are 0, do 1 step. Each
+ // corresponds to an inner value that existed before processing the
+ // current leaf, and each needs a hash to combine it.
+ for (level = 0; !(count & ((uint32_t{1}) << level)); level++) {
+ if (pbranch) {
+ if (matchh) {
+ pbranch->push_back(inner[level]);
+ } else if (matchlevel == level) {
+ pbranch->push_back(h);
+ matchh = true;
+ }
+ }
+ mutated |= (inner[level] == h);
+ h = Hash(inner[level], h);
+ }
+ // Store the resulting hash at inner position level.
+ inner[level] = h;
+ if (matchh) {
+ matchlevel = level;
+ }
+ }
+ // Do a final 'sweep' over the rightmost branch of the tree to process
+ // odd levels, and reduce everything to a single top value.
+ // Level is the level (counted from the bottom) up to which we've sweeped.
+ int level = 0;
+ // As long as bit number level in count is zero, skip it. It means there
+ // is nothing left at this level.
+ while (!(count & ((uint32_t{1}) << level))) {
+ level++;
+ }
+ uint256 h = inner[level];
+ bool matchh = matchlevel == level;
+ while (count != ((uint32_t{1}) << level)) {
+ // If we reach this point, h is an inner value that is not the top.
+ // We combine it with itself (Bitcoin's special rule for odd levels in
+ // the tree) to produce a higher level one.
+ if (pbranch && matchh) {
+ pbranch->push_back(h);
+ }
+ h = Hash(h, h);
+ // Increment count to the value it would have if two entries at this
+ // level had existed.
+ count += ((uint32_t{1}) << level);
+ level++;
+ // And propagate the result upwards accordingly.
+ while (!(count & ((uint32_t{1}) << level))) {
+ if (pbranch) {
+ if (matchh) {
+ pbranch->push_back(inner[level]);
+ } else if (matchlevel == level) {
+ pbranch->push_back(h);
+ matchh = true;
+ }
+ }
+ h = Hash(inner[level], h);
+ level++;
+ }
+ }
+ // Return result.
+ if (pmutated) *pmutated = mutated;
+ if (proot) *proot = h;
+}
+
+static std::vector<uint256> ComputeMerkleBranch(const std::vector<uint256>& leaves, uint32_t position) {
+ std::vector<uint256> ret;
+ MerkleComputation(leaves, nullptr, nullptr, position, &ret);
+ return ret;
+}
+
+std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position)
+{
+ std::vector<uint256> leaves;
+ leaves.resize(block.vtx.size());
+ for (size_t s = 0; s < block.vtx.size(); s++) {
+ leaves[s] = block.vtx[s]->GetHash();
+ }
+ return ComputeMerkleBranch(leaves, position);
+}
diff --git a/src/consensus/merkle.h b/src/consensus/merkle.h
index 4ae5a5b897..363f68039c 100644
--- a/src/consensus/merkle.h
+++ b/src/consensus/merkle.h
@@ -24,4 +24,14 @@ uint256 BlockMerkleRoot(const CBlock& block, bool* mutated = nullptr);
*/
uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated = nullptr);
+/**
+ * Compute merkle path to the specified transaction
+ *
+ * @param[in] block the block
+ * @param[in] position transaction for which to calculate the merkle path, defaults to coinbase
+ *
+ * @return merkle path ordered from the deepest
+ */
+std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position = 0);
+
#endif // BITCOIN_CONSENSUS_MERKLE_H
diff --git a/src/crypto/common.h b/src/crypto/common.h
index 1dc4f3f55c..d45459b1f6 100644
--- a/src/crypto/common.h
+++ b/src/crypto/common.h
@@ -70,6 +70,12 @@ uint64_t static inline ReadBE64(const unsigned char* ptr)
return be64toh_internal(x);
}
+void static inline WriteBE16(unsigned char* ptr, uint16_t x)
+{
+ uint16_t v = htobe16_internal(x);
+ memcpy(ptr, &v, 2);
+}
+
void static inline WriteBE32(unsigned char* ptr, uint32_t x)
{
uint32_t v = htobe32_internal(x);
diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp
index deedc0a6d1..09c5d3123e 100644
--- a/src/crypto/sha256.cpp
+++ b/src/crypto/sha256.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <crypto/sha256.h>
#include <crypto/common.h>
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 05767a253f..69dd821dc0 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -314,8 +314,9 @@ static bool InitRPCAuthentication()
LogPrintf("Config options rpcuser and rpcpassword will soon be deprecated. Locally-run instances may remove rpcuser to use cookie-based auth, or may be replaced with rpcauth. Please see share/rpcauth for rpcauth auth generation.\n");
strRPCUserColonPass = gArgs.GetArg("-rpcuser", "") + ":" + gArgs.GetArg("-rpcpassword", "");
}
- if (gArgs.GetArg("-rpcauth", "") != "") {
- LogPrintf("Using rpcauth authentication.\n");
+
+ if (!gArgs.GetArgs("-rpcauth").empty()) {
+ LogInfo("Using rpcauth authentication.\n");
for (const std::string& rpcauth : gArgs.GetArgs("-rpcauth")) {
std::vector<std::string> fields{SplitString(rpcauth, ':')};
const std::vector<std::string> salt_hmac{SplitString(fields.back(), '$')};
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 2044be56a6..b8772ed852 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -2,12 +2,13 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <httpserver.h>
#include <chainparamsbase.h>
#include <common/args.h>
+#include <common/messages.h>
#include <compat/compat.h>
#include <logging.h>
#include <netbase.h>
@@ -43,6 +44,8 @@
#include <support/events.h>
+using common::InvalidPortErrMsg;
+
/** Maximum size of http request (request line + headers) */
static const size_t MAX_HEADERS_SIZE = 8192;
@@ -315,7 +318,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
if (i->exactMatch)
match = (strURI == i->prefix);
else
- match = (strURI.substr(0, i->prefix.size()) == i->prefix);
+ match = strURI.starts_with(i->prefix);
if (match) {
path = strURI.substr(i->prefix.size());
break;
@@ -374,7 +377,10 @@ static bool HTTPBindAddresses(struct evhttp* http)
for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) {
uint16_t port{http_port};
std::string host;
- SplitHostPort(strRPCBind, port, host);
+ if (!SplitHostPort(strRPCBind, port, host)) {
+ LogError("%s\n", InvalidPortErrMsg("-rpcbind", strRPCBind).original);
+ return false;
+ }
endpoints.emplace_back(host, port);
}
}
@@ -388,6 +394,12 @@ static bool HTTPBindAddresses(struct evhttp* http)
if (i->first.empty() || (addr.has_value() && addr->IsBindAny())) {
LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n");
}
+ // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
+ evutil_socket_t fd = evhttp_bound_socket_get_fd(bind_handle);
+ int one = 1;
+ if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (sockopt_arg_type)&one, sizeof(one)) == SOCKET_ERROR) {
+ LogInfo("WARNING: Unable to set TCP_NODELAY on RPC server socket, continuing anyway\n");
+ }
boundSockets.push_back(bind_handle);
} else {
LogPrintf("Binding RPC on address %s port %i failed.\n", i->first, i->second);
diff --git a/src/index/base.cpp b/src/index/base.cpp
index 955d7b67c9..1a7eb9cd5e 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2022 The Bitcoin Core developers
+// Copyright (c) 2017-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -14,6 +14,7 @@
#include <node/database_args.h>
#include <node/interface_ui.h>
#include <tinyformat.h>
+#include <util/string.h>
#include <util/thread.h>
#include <util/translation.h>
#include <validation.h> // For g_chainman
@@ -27,10 +28,10 @@ constexpr auto SYNC_LOG_INTERVAL{30s};
constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
template <typename... Args>
-void BaseIndex::FatalErrorf(const char* fmt, const Args&... args)
+void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
{
auto message = tfm::format(fmt, args...);
- node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
+ node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
}
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
@@ -112,7 +113,7 @@ bool BaseIndex::Init()
// Child init
const CBlockIndex* start_block = m_best_block_index.load();
- if (!CustomInit(start_block ? std::make_optional(interfaces::BlockKey{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
+ if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
return false;
}
diff --git a/src/index/base.h b/src/index/base.h
index 0eb1d9ca3b..fbd9069a51 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2022 The Bitcoin Core developers
+// Copyright (c) 2017-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -7,6 +7,8 @@
#include <dbwrapper.h>
#include <interfaces/chain.h>
+#include <interfaces/types.h>
+#include <util/string.h>
#include <util/threadinterrupt.h>
#include <validationinterface.h>
@@ -94,7 +96,7 @@ private:
virtual bool AllowPrune() const = 0;
template <typename... Args>
- void FatalErrorf(const char* fmt, const Args&... args);
+ void FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args);
protected:
std::unique_ptr<interfaces::Chain> m_chain;
@@ -106,7 +108,7 @@ protected:
void ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) override;
/// Initialize internal state from the database and block index.
- [[nodiscard]] virtual bool CustomInit(const std::optional<interfaces::BlockKey>& block) { return true; }
+ [[nodiscard]] virtual bool CustomInit(const std::optional<interfaces::BlockRef>& block) { return true; }
/// Write update index entries for a newly connected block.
[[nodiscard]] virtual bool CustomAppend(const interfaces::BlockInfo& block) { return true; }
@@ -117,7 +119,7 @@ protected:
/// Rewind index to an earlier chain tip during a chain reorg. The tip must
/// be an ancestor of the current best block.
- [[nodiscard]] virtual bool CustomRewind(const interfaces::BlockKey& current_tip, const interfaces::BlockKey& new_tip) { return true; }
+ [[nodiscard]] virtual bool CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip) { return true; }
virtual DB& GetDB() const = 0;
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 41bdca9df5..a808cc9085 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -112,7 +112,7 @@ BlockFilterIndex::BlockFilterIndex(std::unique_ptr<interfaces::Chain> chain, Blo
m_filter_fileseq = std::make_unique<FlatFileSeq>(std::move(path), "fltr", FLTR_FILE_CHUNK_SIZE);
}
-bool BlockFilterIndex::CustomInit(const std::optional<interfaces::BlockKey>& block)
+bool BlockFilterIndex::CustomInit(const std::optional<interfaces::BlockRef>& block)
{
if (!m_db->Read(DB_FILTER_POS, m_next_filter_pos)) {
// Check that the cause of the read failure is that the key does not exist. Any other errors
@@ -151,7 +151,7 @@ bool BlockFilterIndex::CustomCommit(CDBBatch& batch)
LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile);
return false;
}
- if (!FileCommit(file.Get())) {
+ if (!file.Commit()) {
LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile);
return false;
}
@@ -201,11 +201,11 @@ size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter&
LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile);
return 0;
}
- if (!TruncateFile(last_file.Get(), pos.nPos)) {
+ if (!last_file.Truncate(pos.nPos)) {
LogPrintf("%s: Failed to truncate filter file %d\n", __func__, pos.nFile);
return 0;
}
- if (!FileCommit(last_file.Get())) {
+ if (!last_file.Commit()) {
LogPrintf("%s: Failed to commit filter file %d\n", __func__, pos.nFile);
return 0;
}
@@ -316,7 +316,7 @@ bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, c
return true;
}
-bool BlockFilterIndex::CustomRewind(const interfaces::BlockKey& current_tip, const interfaces::BlockKey& new_tip)
+bool BlockFilterIndex::CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip)
{
CDBBatch batch(*m_db);
std::unique_ptr<CDBIterator> db_it(m_db->NewIterator());
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index cdb9563fb8..ccb4845ef5 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -52,13 +52,13 @@ private:
std::optional<uint256> ReadFilterHeader(int height, const uint256& expected_block_hash);
protected:
- bool CustomInit(const std::optional<interfaces::BlockKey>& block) override;
+ bool CustomInit(const std::optional<interfaces::BlockRef>& block) override;
bool CustomCommit(CDBBatch& batch) override;
bool CustomAppend(const interfaces::BlockInfo& block) override;
- bool CustomRewind(const interfaces::BlockKey& current_tip, const interfaces::BlockKey& new_tip) override;
+ bool CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip) override;
BaseIndex::DB& GetDB() const LIFETIMEBOUND override { return *m_db; }
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index dff8e50a4e..c950a18f3f 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -265,7 +265,7 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block)
return true;
}
-bool CoinStatsIndex::CustomRewind(const interfaces::BlockKey& current_tip, const interfaces::BlockKey& new_tip)
+bool CoinStatsIndex::CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip)
{
CDBBatch batch(*m_db);
std::unique_ptr<CDBIterator> db_it(m_db->NewIterator());
@@ -304,7 +304,7 @@ bool CoinStatsIndex::CustomRewind(const interfaces::BlockKey& current_tip, const
return true;
}
-static bool LookUpOne(const CDBWrapper& db, const interfaces::BlockKey& block, DBVal& result)
+static bool LookUpOne(const CDBWrapper& db, const interfaces::BlockRef& block, DBVal& result)
{
// First check if the result is stored under the height index and the value
// there matches the block hash. This should be the case if the block is on
@@ -350,7 +350,7 @@ std::optional<CCoinsStats> CoinStatsIndex::LookUpStats(const CBlockIndex& block_
return stats;
}
-bool CoinStatsIndex::CustomInit(const std::optional<interfaces::BlockKey>& block)
+bool CoinStatsIndex::CustomInit(const std::optional<interfaces::BlockRef>& block)
{
if (!m_db->Read(DB_MUHASH, m_muhash)) {
// Check that the cause of the read failure is that the key does not
diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h
index d6322bfa7c..885b9e0a86 100644
--- a/src/index/coinstatsindex.h
+++ b/src/index/coinstatsindex.h
@@ -43,13 +43,13 @@ private:
bool AllowPrune() const override { return true; }
protected:
- bool CustomInit(const std::optional<interfaces::BlockKey>& block) override;
+ bool CustomInit(const std::optional<interfaces::BlockRef>& block) override;
bool CustomCommit(CDBBatch& batch) override;
bool CustomAppend(const interfaces::BlockInfo& block) override;
- bool CustomRewind(const interfaces::BlockKey& current_tip, const interfaces::BlockKey& new_tip) override;
+ bool CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip) override;
BaseIndex::DB& GetDB() const override { return *m_db; }
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 80f615ed0e..425a7f00a0 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -87,10 +87,7 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
CBlockHeader header;
try {
file >> header;
- if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) {
- LogError("%s: fseek(...) failed\n", __func__);
- return false;
- }
+ file.seek(postx.nTxOffset, SEEK_CUR);
file >> TX_WITH_WITNESS(tx);
} catch (const std::exception& e) {
LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what());
diff --git a/src/init.cpp b/src/init.cpp
index e60feecf10..ab53cb851d 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <init.h>
@@ -29,6 +29,7 @@
#include <init/common.h>
#include <interfaces/chain.h>
#include <interfaces/init.h>
+#include <interfaces/ipc.h>
#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <kernel/context.h>
@@ -122,17 +123,19 @@ using node::ApplyArgsManOptions;
using node::BlockManager;
using node::CacheSizes;
using node::CalculateCacheSizes;
+using node::ChainstateLoadResult;
+using node::ChainstateLoadStatus;
using node::DEFAULT_PERSIST_MEMPOOL;
using node::DEFAULT_PRINT_MODIFIED_FEE;
using node::DEFAULT_STOPATHEIGHT;
using node::DumpMempool;
-using node::LoadMempool;
+using node::ImportBlocks;
using node::KernelNotifications;
using node::LoadChainstate;
+using node::LoadMempool;
using node::MempoolPath;
using node::NodeContext;
using node::ShouldPersistMempool;
-using node::ImportBlocks;
using node::VerifyLoadedChainstate;
using util::Join;
using util::ReplaceAll;
@@ -147,11 +150,12 @@ static constexpr bool DEFAULT_STOPAFTERBLOCKIMPORT{false};
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
// accessing block files don't count towards the fd_set size limit
// anyway.
-#define MIN_CORE_FILEDESCRIPTORS 0
+#define MIN_LEVELDB_FDS 0
#else
-#define MIN_CORE_FILEDESCRIPTORS 150
+#define MIN_LEVELDB_FDS 150
#endif
+static constexpr int MIN_CORE_FDS = MIN_LEVELDB_FDS + NUM_FDS_MESSAGE_CAPTURE;
static const char* DEFAULT_ASMAP_FILENAME="ip_asn.map";
/**
@@ -203,7 +207,14 @@ void InitContext(NodeContext& node)
g_shutdown.emplace();
node.args = &gArgs;
- node.shutdown = &*g_shutdown;
+ node.shutdown_signal = &*g_shutdown;
+ node.shutdown_request = [&node] {
+ assert(node.shutdown_signal);
+ if (!(*node.shutdown_signal)()) return false;
+ // Wake any threads that may be waiting for the tip to change.
+ if (node.notifications) WITH_LOCK(node.notifications->m_tip_block_mutex, node.notifications->m_tip_block_cv.notify_all());
+ return true;
+ };
}
//////////////////////////////////////////////////////////////////////////////
@@ -231,7 +242,7 @@ void InitContext(NodeContext& node)
bool ShutdownRequested(node::NodeContext& node)
{
- return bool{*Assert(node.shutdown)};
+ return bool{*Assert(node.shutdown_signal)};
}
#if HAVE_SYSTEM
@@ -296,7 +307,7 @@ void Shutdown(NodeContext& node)
StopTorControl();
- if (node.chainman && node.chainman->m_thread_load.joinable()) node.chainman->m_thread_load.join();
+ if (node.background_init_thread.joinable()) node.background_init_thread.join();
// After everything has been shut down, but before things get flushed, stop the
// the scheduler. After this point, SyncWithValidationInterfaceQueue() should not be called anymore
// as this would prevent the shutdown from completing.
@@ -427,21 +438,7 @@ static void registerSignalHandler(int signal, void(*handler)(int))
}
#endif
-static boost::signals2::connection rpc_notify_block_change_connection;
-static void OnRPCStarted()
-{
- rpc_notify_block_change_connection = uiInterface.NotifyBlockTip_connect(std::bind(RPCNotifyBlockChange, std::placeholders::_2));
-}
-
-static void OnRPCStopped()
-{
- rpc_notify_block_change_connection.disconnect();
- RPCNotifyBlockChange(nullptr);
- g_best_block_cv.notify_all();
- LogDebug(BCLog::RPC, "RPC stopped.\n");
-}
-
-void SetupServerArgs(ArgsManager& argsman)
+void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc)
{
SetupHelpOptions(argsman);
argsman.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now
@@ -488,7 +485,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location (only useable from command line, not configuration file) (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (minimum %d, default: %d). Make sure you have enough RAM. In addition, unused memory allocated to the mempool is shared with this cache (see -maxmempool).", nMinDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-allowignoredconf", strprintf("For backwards compatibility, treat an unused %s file in the datadir as a warning, not an error.", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -569,11 +566,7 @@ void SetupServerArgs(ArgsManager& argsman)
#else
hidden_args.emplace_back("-upnp");
#endif
-#ifdef USE_NATPMP
- argsman.AddArg("-natpmp", strprintf("Use NAT-PMP to map the listening port (default: %u)", DEFAULT_NATPMP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
-#else
- hidden_args.emplace_back("-natpmp");
-#endif // USE_NATPMP
+ argsman.AddArg("-natpmp", strprintf("Use PCP or NAT-PMP to map the listening port (default: %u)", DEFAULT_NATPMP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. "
"Use [host]:port notation for IPv6. Allowed permissions: " + Join(NET_PERMISSIONS_DOC, ", ") + ". "
"Specify multiple permissions separated by commas (default: download,noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -676,6 +669,9 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
argsman.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ if (can_listen_ipc) {
+ argsman.AddArg("-ipcbind=<address>", "Bind to Unix socket address and listen for incoming connections. Valid address values are \"unix\" to listen on the default path, <datadir>/node.sock, or \"unix:/custom/path\" to specify a custom path. Can be specified multiple times to listen on multiple paths. Default behavior is not to listen on any path. If relative paths are specified, they are interpreted relative to the network data directory. If paths include any parent directory components and the parent directories do not exist, they will be created.", ArgsManager::ALLOW_ANY, OptionsCategory::IPC);
+ }
#if HAVE_DECL_FORK
argsman.AddArg("-daemon", strprintf("Run in the background as a daemon and accept commands (default: %d)", DEFAULT_DAEMON), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -689,21 +685,6 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddHiddenArgs(hidden_args);
}
-static bool fHaveGenesis = false;
-static GlobalMutex g_genesis_wait_mutex;
-static std::condition_variable g_genesis_wait_cv;
-
-static void BlockNotifyGenesisWait(const CBlockIndex* pBlockIndex)
-{
- if (pBlockIndex != nullptr) {
- {
- LOCK(g_genesis_wait_mutex);
- fHaveGenesis = true;
- }
- g_genesis_wait_cv.notify_all();
- }
-}
-
#if HAVE_SYSTEM
static void StartupNotify(const ArgsManager& args)
{
@@ -718,9 +699,7 @@ static void StartupNotify(const ArgsManager& args)
static bool AppInitServers(NodeContext& node)
{
const ArgsManager& args = *Assert(node.args);
- RPCServer::OnStarted(&OnRPCStarted);
- RPCServer::OnStopped(&OnRPCStopped);
- if (!InitHTTPServer(*Assert(node.shutdown))) {
+ if (!InitHTTPServer(*Assert(node.shutdown_signal))) {
return false;
}
StartRPC();
@@ -834,9 +813,8 @@ void InitLogging(const ArgsManager& args)
namespace { // Variables internal to initialization process only
int nMaxConnections;
-int nUserMaxConnections;
-int nFD;
-ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS);
+int available_fds;
+ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS);
int64_t peer_connect_timeout;
std::set<BlockFilterType> g_enabled_filter_types;
@@ -951,7 +929,7 @@ bool AppInitParameterInteraction(const ArgsManager& args)
// Signal NODE_P2P_V2 if BIP324 v2 transport is enabled.
if (args.GetBoolArg("-v2transport", DEFAULT_V2_TRANSPORT)) {
- nLocalServices = ServiceFlags(nLocalServices | NODE_P2P_V2);
+ g_local_services = ServiceFlags(g_local_services | NODE_P2P_V2);
}
// Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled.
@@ -960,7 +938,7 @@ bool AppInitParameterInteraction(const ArgsManager& args)
return InitError(_("Cannot set -peerblockfilters without -blockfilterindex."));
}
- nLocalServices = ServiceFlags(nLocalServices | NODE_COMPACT_FILTERS);
+ g_local_services = ServiceFlags(g_local_services | NODE_COMPACT_FILTERS);
}
if (args.GetIntArg("-prune", 0)) {
@@ -987,27 +965,33 @@ bool AppInitParameterInteraction(const ArgsManager& args)
return InitError(Untranslated("Cannot set -listen=0 together with -listenonion=1"));
}
- // Make sure enough file descriptors are available
- int nBind = std::max(nUserBind, size_t(1));
- nUserMaxConnections = args.GetIntArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
- nMaxConnections = std::max(nUserMaxConnections, 0);
-
- nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS + nBind + NUM_FDS_MESSAGE_CAPTURE);
+ // Make sure enough file descriptors are available. We need to reserve enough FDs to account for the bare minimum,
+ // plus all manual connections and all bound interfaces. Any remainder will be available for connection sockets
-#ifdef USE_POLL
- int fd_max = nFD;
-#else
- int fd_max = FD_SETSIZE;
+ // Number of bound interfaces (we have at least one)
+ int nBind = std::max(nUserBind, size_t(1));
+ // Maximum number of connections with other nodes, this accounts for all types of outbounds and inbounds except for manual
+ int user_max_connection = args.GetIntArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
+ if (user_max_connection < 0) {
+ return InitError(Untranslated("-maxconnections must be greater or equal than zero"));
+ }
+ // Reserve enough FDs to account for the bare minimum, plus any manual connections, plus the bound interfaces
+ int min_required_fds = MIN_CORE_FDS + MAX_ADDNODE_CONNECTIONS + nBind;
+
+ // Try raising the FD limit to what we need (available_fds may be smaller than the requested amount if this fails)
+ available_fds = RaiseFileDescriptorLimit(user_max_connection + min_required_fds);
+ // If we are using select instead of poll, our actual limit may be even smaller
+#ifndef USE_POLL
+ available_fds = std::min(FD_SETSIZE, available_fds);
#endif
+ if (available_fds < min_required_fds)
+ return InitError(strprintf(_("Not enough file descriptors available. %d available, %d required."), available_fds, min_required_fds));
+
// Trim requested connection counts, to fit into system limitations
- // <int> in std::min<int>(...) to work around FreeBSD compilation issue described in #2695
- nMaxConnections = std::max(std::min<int>(nMaxConnections, fd_max - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE), 0);
- if (nFD < MIN_CORE_FILEDESCRIPTORS)
- return InitError(_("Not enough file descriptors available."));
- nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE, nMaxConnections);
+ nMaxConnections = std::min(available_fds - min_required_fds, user_max_connection);
- if (nMaxConnections < nUserMaxConnections)
- InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections));
+ if (nMaxConnections < user_max_connection)
+ InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), user_max_connection, nMaxConnections));
// ********************************************************* Step 3: parameter-to-internal-flags
if (auto result{init::SetLoggingCategories(args)}; !result) return InitError(util::ErrorString(result));
@@ -1039,7 +1023,7 @@ bool AppInitParameterInteraction(const ArgsManager& args)
SetMockTime(args.GetIntArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (args.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
- nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
+ g_local_services = ServiceFlags(g_local_services | NODE_BLOOM);
if (args.IsArgSet("-test")) {
if (chainparams.GetChainType() != ChainType::REGTEST) {
@@ -1078,6 +1062,13 @@ bool AppInitParameterInteraction(const ArgsManager& args)
if (!blockman_result) {
return InitError(util::ErrorString(blockman_result));
}
+ CTxMemPool::Options mempool_opts{
+ .check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0,
+ };
+ auto mempool_result{ApplyArgsManOptions(args, chainparams, mempool_opts)};
+ if (!mempool_result) {
+ return InitError(util::ErrorString(mempool_result));
+ }
}
return true;
@@ -1135,6 +1126,151 @@ bool AppInitInterfaces(NodeContext& node)
return true;
}
+bool CheckHostPortOptions(const ArgsManager& args) {
+ for (const std::string port_option : {
+ "-port",
+ "-rpcport",
+ }) {
+ if (args.IsArgSet(port_option)) {
+ const std::string port = args.GetArg(port_option, "");
+ uint16_t n;
+ if (!ParseUInt16(port, &n) || n == 0) {
+ return InitError(InvalidPortErrMsg(port_option, port));
+ }
+ }
+ }
+
+ for ([[maybe_unused]] const auto& [arg, unix] : std::vector<std::pair<std::string, bool>>{
+ // arg name UNIX socket support
+ {"-i2psam", false},
+ {"-onion", true},
+ {"-proxy", true},
+ {"-rpcbind", false},
+ {"-torcontrol", false},
+ {"-whitebind", false},
+ {"-zmqpubhashblock", true},
+ {"-zmqpubhashtx", true},
+ {"-zmqpubrawblock", true},
+ {"-zmqpubrawtx", true},
+ {"-zmqpubsequence", true},
+ }) {
+ for (const std::string& socket_addr : args.GetArgs(arg)) {
+ std::string host_out;
+ uint16_t port_out{0};
+ if (!SplitHostPort(socket_addr, port_out, host_out)) {
+#ifdef HAVE_SOCKADDR_UN
+ // Allow unix domain sockets for some options e.g. unix:/some/file/path
+ if (!unix || !socket_addr.starts_with(ADDR_PREFIX_UNIX)) {
+ return InitError(InvalidPortErrMsg(arg, socket_addr));
+ }
+#else
+ return InitError(InvalidPortErrMsg(arg, socket_addr));
+#endif
+ }
+ }
+ }
+
+ return true;
+}
+
+// A GUI user may opt to retry once if there is a failure during chainstate initialization.
+// The function therefore has to support re-entry.
+static ChainstateLoadResult InitAndLoadChainstate(
+ NodeContext& node,
+ bool do_reindex,
+ const bool do_reindex_chainstate,
+ CacheSizes& cache_sizes,
+ const ArgsManager& args)
+{
+ const CChainParams& chainparams = Params();
+ CTxMemPool::Options mempool_opts{
+ .check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0,
+ .signals = node.validation_signals.get(),
+ };
+ Assert(ApplyArgsManOptions(args, chainparams, mempool_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ bilingual_str mempool_error;
+ node.mempool = std::make_unique<CTxMemPool>(mempool_opts, mempool_error);
+ if (!mempool_error.empty()) {
+ return {ChainstateLoadStatus::FAILURE_FATAL, mempool_error};
+ }
+ LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of unused mempool space)\n", cache_sizes.coins * (1.0 / 1024 / 1024), mempool_opts.max_size_bytes * (1.0 / 1024 / 1024));
+ ChainstateManager::Options chainman_opts{
+ .chainparams = chainparams,
+ .datadir = args.GetDataDirNet(),
+ .notifications = *node.notifications,
+ .signals = node.validation_signals.get(),
+ };
+ Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ BlockManager::Options blockman_opts{
+ .chainparams = chainman_opts.chainparams,
+ .blocks_dir = args.GetBlocksDirPath(),
+ .notifications = chainman_opts.notifications,
+ };
+ Assert(ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ try {
+ node.chainman = std::make_unique<ChainstateManager>(*Assert(node.shutdown_signal), chainman_opts, blockman_opts);
+ } catch (std::exception& e) {
+ return {ChainstateLoadStatus::FAILURE_FATAL, strprintf(Untranslated("Failed to initialize ChainstateManager: %s"), e.what())};
+ }
+ ChainstateManager& chainman = *node.chainman;
+ // This is defined and set here instead of inline in validation.h to avoid a hard
+ // dependency between validation and index/base, since the latter is not in
+ // libbitcoinkernel.
+ chainman.snapshot_download_completed = [&node]() {
+ if (!node.chainman->m_blockman.IsPruneMode()) {
+ LogPrintf("[snapshot] re-enabling NODE_NETWORK services\n");
+ node.connman->AddLocalServices(NODE_NETWORK);
+ }
+ LogPrintf("[snapshot] restarting indexes\n");
+ // Drain the validation interface queue to ensure that the old indexes
+ // don't have any pending work.
+ Assert(node.validation_signals)->SyncWithValidationInterfaceQueue();
+ for (auto* index : node.indexes) {
+ index->Interrupt();
+ index->Stop();
+ if (!(index->Init() && index->StartBackgroundSync())) {
+ LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName());
+ }
+ }
+ };
+ node::ChainstateLoadOptions options;
+ options.mempool = Assert(node.mempool.get());
+ options.wipe_block_tree_db = do_reindex;
+ options.wipe_chainstate_db = do_reindex || do_reindex_chainstate;
+ options.prune = chainman.m_blockman.IsPruneMode();
+ options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
+ options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
+ options.require_full_verification = args.IsArgSet("-checkblocks") || args.IsArgSet("-checklevel");
+ options.coins_error_cb = [] {
+ uiInterface.ThreadSafeMessageBox(
+ _("Error reading from database, shutting down."),
+ "", CClientUIInterface::MSG_ERROR);
+ };
+ uiInterface.InitMessage(_("Loading block index…").translated);
+ const auto load_block_index_start_time{SteadyClock::now()};
+ auto catch_exceptions = [](auto&& f) {
+ try {
+ return f();
+ } catch (const std::exception& e) {
+ LogError("%s\n", e.what());
+ return std::make_tuple(node::ChainstateLoadStatus::FAILURE, _("Error opening block database"));
+ }
+ };
+ auto [status, error] = catch_exceptions([&] { return LoadChainstate(chainman, cache_sizes, options); });
+ if (status == node::ChainstateLoadStatus::SUCCESS) {
+ uiInterface.InitMessage(_("Verifying blocks…").translated);
+ if (chainman.m_blockman.m_have_pruned && options.check_blocks > MIN_BLOCKS_TO_KEEP) {
+ LogWarning("pruned datadir may not have more than %d blocks; only checking available blocks\n",
+ MIN_BLOCKS_TO_KEEP);
+ }
+ std::tie(status, error) = catch_exceptions([&] { return VerifyLoadedChainstate(chainman, options); });
+ if (status == node::ChainstateLoadStatus::SUCCESS) {
+ LogPrintf(" block index %15dms\n", Ticks<std::chrono::milliseconds>(SteadyClock::now() - load_block_index_start_time));
+ }
+ }
+ return {status, error};
+};
+
bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
{
const ArgsManager& args = *Assert(node.args);
@@ -1155,7 +1291,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
return false;
}
- LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
+ LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, available_fds);
// Warn about relative -datadir path.
if (args.IsArgSet("-datadir") && !args.GetPathArg("-datadir").is_absolute()) {
@@ -1183,7 +1319,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
constexpr uint64_t min_disk_space = 50 << 20; // 50 MB
if (!CheckDiskSpace(args.GetBlocksDirPath(), min_disk_space)) {
LogError("Shutting down due to lack of disk space!\n");
- if (!(*Assert(node.shutdown))()) {
+ if (!(Assert(node.shutdown_request))()) {
LogError("Failed to send shutdown signal after disk space check\n");
}
}
@@ -1200,6 +1336,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
g_wallet_init_interface.Construct(node);
uiInterface.InitWallet();
+ if (interfaces::Ipc* ipc = node.init->ipc()) {
+ for (std::string address : gArgs.GetArgs("-ipcbind")) {
+ try {
+ ipc->listenAddress(address);
+ } catch (const std::exception& e) {
+ return InitError(strprintf(Untranslated("Unable to bind to IPC address '%s'. %s"), address, e.what()));
+ }
+ LogPrintf("Listening for IPC requests on address %s\n", address);
+ }
+ }
+
/* Register RPC commands regardless of -server setting so they will be
* available in the GUI RPC console even if external calls are disabled.
*/
@@ -1211,6 +1358,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
RegisterZMQRPCCommands(tableRPC);
#endif
+ // Check port numbers
+ if (!CheckHostPortOptions(args)) return false;
+
/* Start the RPC server already. It will be started in "warmup" mode
* and not really process calls already (but it will signify connections
* that the server is there and will be ready later). Warmup mode will
@@ -1301,50 +1451,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
validation_signals.RegisterValidationInterface(fee_estimator);
}
- // Check port numbers
- for (const std::string port_option : {
- "-port",
- "-rpcport",
- }) {
- if (args.IsArgSet(port_option)) {
- const std::string port = args.GetArg(port_option, "");
- uint16_t n;
- if (!ParseUInt16(port, &n) || n == 0) {
- return InitError(InvalidPortErrMsg(port_option, port));
- }
- }
- }
-
- for ([[maybe_unused]] const auto& [arg, unix] : std::vector<std::pair<std::string, bool>>{
- // arg name UNIX socket support
- {"-i2psam", false},
- {"-onion", true},
- {"-proxy", true},
- {"-rpcbind", false},
- {"-torcontrol", false},
- {"-whitebind", false},
- {"-zmqpubhashblock", true},
- {"-zmqpubhashtx", true},
- {"-zmqpubrawblock", true},
- {"-zmqpubrawtx", true},
- {"-zmqpubsequence", true},
- }) {
- for (const std::string& socket_addr : args.GetArgs(arg)) {
- std::string host_out;
- uint16_t port_out{0};
- if (!SplitHostPort(socket_addr, port_out, host_out)) {
-#ifdef HAVE_SOCKADDR_UN
- // Allow unix domain sockets for some options e.g. unix:/some/file/path
- if (!unix || socket_addr.find(ADDR_PREFIX_UNIX) != 0) {
- return InitError(InvalidPortErrMsg(arg, socket_addr));
- }
-#else
- return InitError(InvalidPortErrMsg(arg, socket_addr));
-#endif
- }
- }
- }
-
for (const std::string& socket_addr : args.GetArgs("-bind")) {
std::string host_out;
uint16_t port_out{0};
@@ -1494,22 +1600,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 7: load block chain
- node.notifications = std::make_unique<KernelNotifications>(*Assert(node.shutdown), node.exit_status, *Assert(node.warnings));
- ReadNotificationArgs(args, *node.notifications);
- ChainstateManager::Options chainman_opts{
- .chainparams = chainparams,
- .datadir = args.GetDataDirNet(),
- .notifications = *node.notifications,
- .signals = &validation_signals,
- };
- Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
-
- BlockManager::Options blockman_opts{
- .chainparams = chainman_opts.chainparams,
- .blocks_dir = args.GetBlocksDirPath(),
- .notifications = chainman_opts.notifications,
- };
- Assert(ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ node.notifications = std::make_unique<KernelNotifications>(Assert(node.shutdown_request), node.exit_status, *Assert(node.warnings));
+ auto& kernel_notifications{*node.notifications};
+ ReadNotificationArgs(args, kernel_notifications);
// cache size calculations
CacheSizes cache_sizes = CalculateCacheSizes(args, g_enabled_filter_types.size());
@@ -1528,114 +1621,39 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
assert(!node.mempool);
assert(!node.chainman);
- CTxMemPool::Options mempool_opts{
- .check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0,
- .signals = &validation_signals,
- };
- auto result{ApplyArgsManOptions(args, chainparams, mempool_opts)};
- if (!result) {
- return InitError(util::ErrorString(result));
- }
-
bool do_reindex{args.GetBoolArg("-reindex", false)};
const bool do_reindex_chainstate{args.GetBoolArg("-reindex-chainstate", false)};
- for (bool fLoaded = false; !fLoaded && !ShutdownRequested(node);) {
- bilingual_str mempool_error;
- node.mempool = std::make_unique<CTxMemPool>(mempool_opts, mempool_error);
- if (!mempool_error.empty()) {
- return InitError(mempool_error);
- }
- LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of unused mempool space)\n", cache_sizes.coins * (1.0 / 1024 / 1024), mempool_opts.max_size_bytes * (1.0 / 1024 / 1024));
-
- try {
- node.chainman = std::make_unique<ChainstateManager>(*Assert(node.shutdown), chainman_opts, blockman_opts);
- } catch (std::exception& e) {
- return InitError(strprintf(Untranslated("Failed to initialize ChainstateManager: %s"), e.what()));
- }
- ChainstateManager& chainman = *node.chainman;
-
- // This is defined and set here instead of inline in validation.h to avoid a hard
- // dependency between validation and index/base, since the latter is not in
- // libbitcoinkernel.
- chainman.restart_indexes = [&node]() {
- LogPrintf("[snapshot] restarting indexes\n");
-
- // Drain the validation interface queue to ensure that the old indexes
- // don't have any pending work.
- Assert(node.validation_signals)->SyncWithValidationInterfaceQueue();
-
- for (auto* index : node.indexes) {
- index->Interrupt();
- index->Stop();
- if (!(index->Init() && index->StartBackgroundSync())) {
- LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName());
- }
- }
- };
-
- node::ChainstateLoadOptions options;
- options.mempool = Assert(node.mempool.get());
- options.wipe_block_tree_db = do_reindex;
- options.wipe_chainstate_db = do_reindex || do_reindex_chainstate;
- options.prune = chainman.m_blockman.IsPruneMode();
- options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
- options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
- options.require_full_verification = args.IsArgSet("-checkblocks") || args.IsArgSet("-checklevel");
- options.coins_error_cb = [] {
- uiInterface.ThreadSafeMessageBox(
- _("Error reading from database, shutting down."),
- "", CClientUIInterface::MSG_ERROR);
- };
-
- uiInterface.InitMessage(_("Loading block index…").translated);
- const auto load_block_index_start_time{SteadyClock::now()};
- auto catch_exceptions = [](auto&& f) {
- try {
- return f();
- } catch (const std::exception& e) {
- LogError("%s\n", e.what());
- return std::make_tuple(node::ChainstateLoadStatus::FAILURE, _("Error opening block database"));
- }
- };
- auto [status, error] = catch_exceptions([&]{ return LoadChainstate(chainman, cache_sizes, options); });
- if (status == node::ChainstateLoadStatus::SUCCESS) {
- uiInterface.InitMessage(_("Verifying blocks…").translated);
- if (chainman.m_blockman.m_have_pruned && options.check_blocks > MIN_BLOCKS_TO_KEEP) {
- LogWarning("pruned datadir may not have more than %d blocks; only checking available blocks\n",
- MIN_BLOCKS_TO_KEEP);
- }
- std::tie(status, error) = catch_exceptions([&]{ return VerifyLoadedChainstate(chainman, options);});
- if (status == node::ChainstateLoadStatus::SUCCESS) {
- fLoaded = true;
- LogPrintf(" block index %15dms\n", Ticks<std::chrono::milliseconds>(SteadyClock::now() - load_block_index_start_time));
- }
- }
-
- if (status == node::ChainstateLoadStatus::FAILURE_FATAL || status == node::ChainstateLoadStatus::FAILURE_INCOMPATIBLE_DB || status == node::ChainstateLoadStatus::FAILURE_INSUFFICIENT_DBCACHE) {
- return InitError(error);
+ // Chainstate initialization and loading may be retried once with reindexing by GUI users
+ auto [status, error] = InitAndLoadChainstate(
+ node,
+ do_reindex,
+ do_reindex_chainstate,
+ cache_sizes,
+ args);
+ if (status == ChainstateLoadStatus::FAILURE && !do_reindex && !ShutdownRequested(node)) {
+ // suggest a reindex
+ bool do_retry = uiInterface.ThreadSafeQuestion(
+ error + Untranslated(".\n\n") + _("Do you want to rebuild the block database now?"),
+ error.original + ".\nPlease restart with -reindex or -reindex-chainstate to recover.",
+ "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT);
+ if (!do_retry) {
+ LogError("Aborted block database rebuild. Exiting.\n");
+ return false;
}
-
- if (!fLoaded && !ShutdownRequested(node)) {
- // first suggest a reindex
- if (!do_reindex) {
- bool fRet = uiInterface.ThreadSafeQuestion(
- error + Untranslated(".\n\n") + _("Do you want to rebuild the block database now?"),
- error.original + ".\nPlease restart with -reindex or -reindex-chainstate to recover.",
- "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT);
- if (fRet) {
- do_reindex = true;
- if (!Assert(node.shutdown)->reset()) {
- LogError("Internal error: failed to reset shutdown signal.\n");
- }
- } else {
- LogError("Aborted block database rebuild. Exiting.\n");
- return false;
- }
- } else {
- return InitError(error);
- }
+ do_reindex = true;
+ if (!Assert(node.shutdown_signal)->reset()) {
+ LogError("Internal error: failed to reset shutdown signal.\n");
}
+ std::tie(status, error) = InitAndLoadChainstate(
+ node,
+ do_reindex,
+ do_reindex_chainstate,
+ cache_sizes,
+ args);
+ }
+ if (status != ChainstateLoadStatus::SUCCESS && status != ChainstateLoadStatus::INTERRUPTED) {
+ return InitError(error);
}
// As LoadBlockIndex can take several minutes, it's possible the user
@@ -1695,8 +1713,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}
} else {
- LogPrintf("Setting NODE_NETWORK on non-prune mode\n");
- nLocalServices = ServiceFlags(nLocalServices | NODE_NETWORK);
+ // Prior to setting NODE_NETWORK, check if we can provide historical blocks.
+ if (!WITH_LOCK(chainman.GetMutex(), return chainman.BackgroundSyncInProgress())) {
+ LogPrintf("Setting NODE_NETWORK on non-prune mode\n");
+ g_local_services = ServiceFlags(g_local_services | NODE_NETWORK);
+ } else {
+ LogPrintf("Running node in NODE_NETWORK_LIMITED mode until snapshot background sync completes\n");
+ }
}
// ********************************************************* Step 11: import blocks
@@ -1731,15 +1754,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}
- // Either install a handler to notify us when genesis activates, or set fHaveGenesis directly.
- // No locking, as this happens before any background thread is started.
- boost::signals2::connection block_notify_genesis_wait_connection;
- if (WITH_LOCK(chainman.GetMutex(), return chainman.ActiveChain().Tip() == nullptr)) {
- block_notify_genesis_wait_connection = uiInterface.NotifyBlockTip_connect(std::bind(BlockNotifyGenesisWait, std::placeholders::_2));
- } else {
- fHaveGenesis = true;
- }
-
#if HAVE_SYSTEM
const std::string block_notify = args.GetArg("-blocknotify", "");
if (!block_notify.empty()) {
@@ -1758,13 +1772,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
vImportFiles.push_back(fs::PathFromString(strFile));
}
- chainman.m_thread_load = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] {
+ node.background_init_thread = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] {
ScheduleBatchPriority();
// Import blocks
ImportBlocks(chainman, vImportFiles);
if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
- if (!(*Assert(node.shutdown))()) {
+ if (!(Assert(node.shutdown_request))()) {
LogError("Failed to send shutdown signal after finishing block import\n");
}
return;
@@ -1784,15 +1798,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
});
// Wait for genesis block to be processed
- {
- WAIT_LOCK(g_genesis_wait_mutex, lock);
- // We previously could hang here if shutdown was requested prior to
- // ImportBlocks getting started, so instead we just wait on a timer to
- // check ShutdownRequested() regularly.
- while (!fHaveGenesis && !ShutdownRequested(node)) {
- g_genesis_wait_cv.wait_for(lock, std::chrono::milliseconds(500));
- }
- block_notify_genesis_wait_connection.disconnect();
+ if (WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip() == nullptr)) {
+ WAIT_LOCK(kernel_notifications.m_tip_block_mutex, lock);
+ kernel_notifications.m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) {
+ return !kernel_notifications.m_tip_block.IsNull() || ShutdownRequested(node);
+ });
}
if (ShutdownRequested(node)) {
@@ -1801,17 +1811,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 12: start node
- //// debug print
int64_t best_block_time{};
{
- LOCK(cs_main);
+ LOCK(chainman.GetMutex());
+ const auto& tip{*Assert(chainman.ActiveTip())};
LogPrintf("block tree size = %u\n", chainman.BlockIndex().size());
- chain_active_height = chainman.ActiveChain().Height();
- best_block_time = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockTime() : chainman.GetParams().GenesisBlock().GetBlockTime();
+ chain_active_height = tip.nHeight;
+ best_block_time = tip.GetBlockTime();
if (tip_info) {
tip_info->block_height = chain_active_height;
tip_info->block_time = best_block_time;
- tip_info->verification_progress = GuessVerificationProgress(chainman.GetParams().TxData(), chainman.ActiveChain().Tip());
+ tip_info->verification_progress = GuessVerificationProgress(chainman.GetParams().TxData(), &tip);
}
if (tip_info && chainman.m_best_header) {
tip_info->header_height = chainman.m_best_header->nHeight;
@@ -1821,11 +1831,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
LogPrintf("nBestHeight = %d\n", chain_active_height);
if (node.peerman) node.peerman->SetBestBlock(chain_active_height, std::chrono::seconds{best_block_time});
- // Map ports with UPnP or NAT-PMP.
+ // Map ports with UPnP or NAT-PMP
StartMapPort(args.GetBoolArg("-upnp", DEFAULT_UPNP), args.GetBoolArg("-natpmp", DEFAULT_NATPMP));
CConnman::Options connOptions;
- connOptions.nLocalServices = nLocalServices;
+ connOptions.m_local_services = g_local_services;
connOptions.m_max_automatic_connections = nMaxConnections;
connOptions.uiInterface = &uiInterface;
connOptions.m_banman = node.banman.get();
@@ -1980,11 +1990,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// cannot yet be called. Before we make it callable, we need to make sure
// that the RPC's view of the best block is valid and consistent with
// ChainstateManager's active tip.
- //
- // If we do not do this, RPC's view of the best block will be height=0 and
- // hash=0x0. This will lead to erroroneous responses for things like
- // waitforblockheight.
- RPCNotifyBlockChange(WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()));
SetRPCWarmupFinished();
uiInterface.InitMessage(_("Done loading").translated);
diff --git a/src/init.h b/src/init.h
index 40a5da3c0b..6d8a35d80e 100644
--- a/src/init.h
+++ b/src/init.h
@@ -74,7 +74,7 @@ bool AppInitMain(node::NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip
/**
* Register all arguments with the ArgsManager
*/
-void SetupServerArgs(ArgsManager& argsman);
+void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc=false);
/** Validates requirements to run the indexes and spawns each index initial sync thread */
bool StartIndexBackgroundSync(node::NodeContext& node);
diff --git a/src/init/bitcoin-gui.cpp b/src/init/bitcoin-gui.cpp
index aceff1e40f..eae30bc995 100644
--- a/src/init/bitcoin-gui.cpp
+++ b/src/init/bitcoin-gui.cpp
@@ -34,6 +34,11 @@ public:
}
std::unique_ptr<interfaces::Echo> makeEcho() override { return interfaces::MakeEcho(); }
interfaces::Ipc* ipc() override { return m_ipc.get(); }
+ // bitcoin-gui accepts -ipcbind option even though it does not use it
+ // directly. It just returns true here to accept the option because
+ // bitcoin-node accepts the option, and bitcoin-gui accepts all bitcoin-node
+ // options and will start the node with those options.
+ bool canListenIpc() override { return true; }
node::NodeContext m_node;
std::unique_ptr<interfaces::Ipc> m_ipc;
};
diff --git a/src/init/bitcoin-node.cpp b/src/init/bitcoin-node.cpp
index 00a3822791..3f8c50b8d6 100644
--- a/src/init/bitcoin-node.cpp
+++ b/src/init/bitcoin-node.cpp
@@ -37,6 +37,7 @@ public:
}
std::unique_ptr<interfaces::Echo> makeEcho() override { return interfaces::MakeEcho(); }
interfaces::Ipc* ipc() override { return m_ipc.get(); }
+ bool canListenIpc() override { return true; }
node::NodeContext& m_node;
std::unique_ptr<interfaces::Ipc> m_ipc;
};
diff --git a/src/init/common.cpp b/src/init/common.cpp
index 36142c2b9a..dd8ca020d2 100644
--- a/src/init/common.cpp
+++ b/src/init/common.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <clientversion.h>
#include <common/args.h>
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index be596b1765..4e858d1f89 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -41,12 +41,6 @@ namespace interfaces {
class Handler;
class Wallet;
-//! Hash/height pair to help track and identify blocks.
-struct BlockKey {
- uint256 hash;
- int height = -1;
-};
-
//! Helper for findBlock to selectively return pieces of block data. If block is
//! found, data will be returned by setting specified output variables. If block
//! is not found, output variables will keep their previous values.
@@ -356,15 +350,22 @@ public:
virtual common::SettingsValue getRwSetting(const std::string& name) = 0;
//! Updates a setting in <datadir>/settings.json.
+ //! Null can be passed to erase the setting. There is intentionally no
+ //! support for writing null values to settings.json.
//! Depending on the action returned by the update function, this will either
//! update the setting in memory or write the updated settings to disk.
virtual bool updateRwSetting(const std::string& name, const SettingsUpdate& update_function) = 0;
//! Replace a setting in <datadir>/settings.json with a new value.
- virtual bool overwriteRwSetting(const std::string& name, common::SettingsValue& value, bool write = true) = 0;
+ //! Null can be passed to erase the setting.
+ //! This method provides a simpler alternative to updateRwSetting when
+ //! atomically reading and updating the setting is not required.
+ virtual bool overwriteRwSetting(const std::string& name, common::SettingsValue value, SettingsAction action = SettingsAction::WRITE) = 0;
//! Delete a given setting in <datadir>/settings.json.
- virtual bool deleteRwSettings(const std::string& name, bool write = true) = 0;
+ //! This method provides a simpler alternative to overwriteRwSetting when
+ //! erasing a setting, for ease of use and readability.
+ virtual bool deleteRwSettings(const std::string& name, SettingsAction action = SettingsAction::WRITE) = 0;
//! Synchronously send transactionAddedToMempool notifications about all
//! current mempool transactions to the specified handler and return after
diff --git a/src/interfaces/init.h b/src/interfaces/init.h
index 094ead399d..b496ada05f 100644
--- a/src/interfaces/init.h
+++ b/src/interfaces/init.h
@@ -37,6 +37,7 @@ public:
virtual std::unique_ptr<WalletLoader> makeWalletLoader(Chain& chain) { return nullptr; }
virtual std::unique_ptr<Echo> makeEcho() { return nullptr; }
virtual Ipc* ipc() { return nullptr; }
+ virtual bool canListenIpc() { return false; }
};
//! Return implementation of Init interface for the node process. If the argv
diff --git a/src/interfaces/ipc.h b/src/interfaces/ipc.h
index 963649fc9a..fb340552c5 100644
--- a/src/interfaces/ipc.h
+++ b/src/interfaces/ipc.h
@@ -41,6 +41,11 @@ class Init;
//! to make other proxy objects calling other remote interfaces. It can also
//! destroy the initial interfaces::Init object to close the connection and
//! shut down the spawned process.
+//!
+//! When connecting to an existing process, the steps are similar to spawning a
+//! new process, except a socket is created instead of a socketpair, and
+//! destroying an Init interface doesn't end the process, since there can be
+//! multiple connections.
class Ipc
{
public:
@@ -54,6 +59,17 @@ public:
//! true. If this is not a spawned child process, return false.
virtual bool startSpawnedProcess(int argc, char* argv[], int& exit_status) = 0;
+ //! Connect to a socket address and make a client interface proxy object
+ //! using provided callback. connectAddress returns an interface pointer if
+ //! the connection was established, returns null if address is empty ("") or
+ //! disabled ("0") or if a connection was refused but not required ("auto"),
+ //! and throws an exception if there was an unexpected error.
+ virtual std::unique_ptr<Init> connectAddress(std::string& address) = 0;
+
+ //! Connect to a socket address and make a client interface proxy object
+ //! using provided callback. Throws an exception if there was an error.
+ virtual void listenAddress(std::string& address) = 0;
+
//! Add cleanup callback to remote interface that will run when the
//! interface is deleted.
template<typename Interface>
diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h
index cebe97edb7..c77f3c30a2 100644
--- a/src/interfaces/mining.h
+++ b/src/interfaces/mining.h
@@ -5,26 +5,61 @@
#ifndef BITCOIN_INTERFACES_MINING_H
#define BITCOIN_INTERFACES_MINING_H
-#include <node/types.h>
-#include <uint256.h>
-
-#include <memory>
-#include <optional>
+#include <consensus/amount.h> // for CAmount
+#include <interfaces/types.h> // for BlockRef
+#include <node/types.h> // for BlockCreateOptions
+#include <primitives/block.h> // for CBlock, CBlockHeader
+#include <primitives/transaction.h> // for CTransactionRef
+#include <stdint.h> // for int64_t
+#include <uint256.h> // for uint256
+#include <util/time.h> // for MillisecondsDouble
+
+#include <memory> // for unique_ptr, shared_ptr
+#include <optional> // for optional
+#include <vector> // for vector
namespace node {
-struct CBlockTemplate;
struct NodeContext;
} // namespace node
class BlockValidationState;
-class CBlock;
class CScript;
namespace interfaces {
+//! Block template interface
+class BlockTemplate
+{
+public:
+ virtual ~BlockTemplate() = default;
+
+ virtual CBlockHeader getBlockHeader() = 0;
+ virtual CBlock getBlock() = 0;
+
+ virtual std::vector<CAmount> getTxFees() = 0;
+ virtual std::vector<int64_t> getTxSigops() = 0;
+
+ virtual CTransactionRef getCoinbaseTx() = 0;
+ virtual std::vector<unsigned char> getCoinbaseCommitment() = 0;
+ virtual int getWitnessCommitmentIndex() = 0;
+
+ /**
+ * Compute merkle path to the coinbase transaction
+ *
+ * @return merkle path ordered from the deepest
+ */
+ virtual std::vector<uint256> getCoinbaseMerklePath() = 0;
+
+ /**
+ * Construct and broadcast the block.
+ *
+ * @returns if the block was processed, independent of block validity
+ */
+ virtual bool submitSolution(uint32_t version, uint32_t timestamp, uint32_t nonce, CMutableTransaction coinbase) = 0;
+};
+
//! Interface giving clients (RPC, Stratum v2 Template Provider in the future)
//! ability to create block templates.
-
class Mining
{
public:
@@ -36,8 +71,19 @@ public:
//! Returns whether IBD is still in progress.
virtual bool isInitialBlockDownload() = 0;
- //! Returns the hash for the tip of this chain
- virtual std::optional<uint256> getTipHash() = 0;
+ //! Returns the hash and height for the tip of this chain
+ virtual std::optional<BlockRef> getTip() = 0;
+
+ /**
+ * Waits for the connected tip to change. If the tip was not connected on
+ * startup, this will wait.
+ *
+ * @param[in] current_tip block hash of the current chain tip. Function waits
+ * for the chain tip to differ from this.
+ * @param[in] timeout how long to wait for a new tip
+ * @returns Hash and height of the current chain tip after this call.
+ */
+ virtual BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0;
/**
* Construct a new block template
@@ -46,7 +92,7 @@ public:
* @param[in] options options for creating the block
* @returns a block template
*/
- virtual std::unique_ptr<node::CBlockTemplate> createNewBlock(const CScript& script_pub_key, const node::BlockCreateOptions& options={}) = 0;
+ virtual std::unique_ptr<BlockTemplate> createNewBlock(const CScript& script_pub_key, const node::BlockCreateOptions& options = {}) = 0;
/**
* Processes new block. A valid new block is automatically relayed to peers.
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index 81844c6185..91a623a65d 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -7,6 +7,7 @@
#include <common/settings.h>
#include <consensus/amount.h> // For CAmount
+#include <logging.h> // For BCLog::CategoryMask
#include <net.h> // For NodeId
#include <net_types.h> // For banmap_t
#include <netaddress.h> // For Network
@@ -84,7 +85,7 @@ public:
virtual int getExitStatus() = 0;
// Get log flags.
- virtual uint32_t getLogCategories() = 0;
+ virtual BCLog::CategoryMask getLogCategories() = 0;
//! Initialize app dependencies.
virtual bool baseInitialize() = 0;
@@ -120,7 +121,7 @@ public:
virtual void resetSettings() = 0;
//! Map port.
- virtual void mapPort(bool use_upnp, bool use_natpmp) = 0;
+ virtual void mapPort(bool use_upnp, bool use_pcp) = 0;
//! Get proxy.
virtual bool getProxy(Network net, Proxy& proxy_info) = 0;
diff --git a/src/interfaces/types.h b/src/interfaces/types.h
new file mode 100644
index 0000000000..e5edd301a7
--- /dev/null
+++ b/src/interfaces/types.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INTERFACES_TYPES_H
+#define BITCOIN_INTERFACES_TYPES_H
+
+#include <uint256.h>
+
+namespace interfaces {
+
+//! Hash/height pair to help track and identify blocks.
+struct BlockRef {
+ uint256 hash;
+ int height = -1;
+};
+
+} // namespace interfaces
+
+#endif // BITCOIN_INTERFACES_TYPES_H
diff --git a/src/ipc/CMakeLists.txt b/src/ipc/CMakeLists.txt
index 94b1ceb54e..904d72f56e 100644
--- a/src/ipc/CMakeLists.txt
+++ b/src/ipc/CMakeLists.txt
@@ -3,16 +3,21 @@
# file COPYING or https://opensource.org/license/mit/.
add_library(bitcoin_ipc STATIC EXCLUDE_FROM_ALL
+ capnp/mining.cpp
capnp/protocol.cpp
interfaces.cpp
process.cpp
)
target_capnp_sources(bitcoin_ipc ${PROJECT_SOURCE_DIR}
- capnp/echo.capnp capnp/init.capnp
+ capnp/common.capnp
+ capnp/echo.capnp
+ capnp/init.capnp
+ capnp/mining.capnp
)
target_link_libraries(bitcoin_ipc
PRIVATE
core_interface
+ univalue
)
diff --git a/src/ipc/capnp/common-types.h b/src/ipc/capnp/common-types.h
index 39e368491b..51af6a5f0a 100644
--- a/src/ipc/capnp/common-types.h
+++ b/src/ipc/capnp/common-types.h
@@ -6,6 +6,9 @@
#define BITCOIN_IPC_CAPNP_COMMON_TYPES_H
#include <clientversion.h>
+#include <interfaces/types.h>
+#include <primitives/transaction.h>
+#include <serialize.h>
#include <streams.h>
#include <univalue.h>
@@ -16,33 +19,24 @@
namespace ipc {
namespace capnp {
-//! Use SFINAE to define Serializeable<T> trait which is true if type T has a
-//! Serialize(stream) method, false otherwise.
-template <typename T>
-struct Serializable {
-private:
- template <typename C>
- static std::true_type test(decltype(std::declval<C>().Serialize(std::declval<std::nullptr_t&>()))*);
- template <typename>
- static std::false_type test(...);
-
-public:
- static constexpr bool value = decltype(test<T>(nullptr))::value;
-};
+//! Construct a ParamStream wrapping a data stream with serialization parameters
+//! needed to pass transaction objects between bitcoin processes.
+//! In the future, more params may be added here to serialize other objects that
+//! require serialization parameters. Params should just be chosen to serialize
+//! objects completely and ensure that serializing and deserializing objects
+//! with the specified parameters produces equivalent objects. It's also
+//! harmless to specify serialization parameters here that are not used.
+template <typename S>
+auto Wrap(S& s)
+{
+ return ParamsStream{s, TX_WITH_WITNESS};
+}
-//! Use SFINAE to define Unserializeable<T> trait which is true if type T has
-//! an Unserialize(stream) method, false otherwise.
+//! Detect if type has a deserialize_type constructor, which is
+//! used to deserialize types like CTransaction that can't be unserialized into
+//! existing objects because they are immutable.
template <typename T>
-struct Unserializable {
-private:
- template <typename C>
- static std::true_type test(decltype(std::declval<C>().Unserialize(std::declval<std::nullptr_t&>()))*);
- template <typename>
- static std::false_type test(...);
-
-public:
- static constexpr bool value = decltype(test<T>(nullptr))::value;
-};
+concept Deserializable = std::is_constructible_v<T, ::deserialize_type, ::DataStream&>;
} // namespace capnp
} // namespace ipc
@@ -50,42 +44,78 @@ public:
namespace mp {
//! Overload multiprocess library's CustomBuildField hook to allow any
//! serializable object to be stored in a capnproto Data field or passed to a
-//! canproto interface. Use Priority<1> so this hook has medium priority, and
+//! capnproto interface. Use Priority<1> so this hook has medium priority, and
//! higher priority hooks could take precedence over this one.
template <typename LocalType, typename Value, typename Output>
-void CustomBuildField(
- TypeList<LocalType>, Priority<1>, InvokeContext& invoke_context, Value&& value, Output&& output,
- // Enable if serializeable and if LocalType is not cv or reference
- // qualified. If LocalType is cv or reference qualified, it is important to
- // fall back to lower-priority Priority<0> implementation of this function
- // that strips cv references, to prevent this CustomBuildField overload from
- // taking precedence over more narrow overloads for specific LocalTypes.
- std::enable_if_t<ipc::capnp::Serializable<LocalType>::value &&
- std::is_same_v<LocalType, std::remove_cv_t<std::remove_reference_t<LocalType>>>>* enable = nullptr)
+void CustomBuildField(TypeList<LocalType>, Priority<1>, InvokeContext& invoke_context, Value&& value, Output&& output)
+// Enable if serializeable and if LocalType is not cv or reference qualified. If
+// LocalType is cv or reference qualified, it is important to fall back to
+// lower-priority Priority<0> implementation of this function that strips cv
+// references, to prevent this CustomBuildField overload from taking precedence
+// over more narrow overloads for specific LocalTypes.
+requires Serializable<LocalType, DataStream> && std::is_same_v<LocalType, std::remove_cv_t<std::remove_reference_t<LocalType>>>
{
DataStream stream;
- value.Serialize(stream);
+ auto wrapper{ipc::capnp::Wrap(stream)};
+ value.Serialize(wrapper);
auto result = output.init(stream.size());
memcpy(result.begin(), stream.data(), stream.size());
}
//! Overload multiprocess library's CustomReadField hook to allow any object
//! with an Unserialize method to be read from a capnproto Data field or
-//! returned from canproto interface. Use Priority<1> so this hook has medium
+//! returned from capnproto interface. Use Priority<1> so this hook has medium
//! priority, and higher priority hooks could take precedence over this one.
template <typename LocalType, typename Input, typename ReadDest>
-decltype(auto)
-CustomReadField(TypeList<LocalType>, Priority<1>, InvokeContext& invoke_context, Input&& input, ReadDest&& read_dest,
- std::enable_if_t<ipc::capnp::Unserializable<LocalType>::value>* enable = nullptr)
+decltype(auto) CustomReadField(TypeList<LocalType>, Priority<1>, InvokeContext& invoke_context, Input&& input, ReadDest&& read_dest)
+requires Unserializable<LocalType, DataStream> && (!ipc::capnp::Deserializable<LocalType>)
{
return read_dest.update([&](auto& value) {
if (!input.has()) return;
auto data = input.get();
SpanReader stream({data.begin(), data.end()});
- value.Unserialize(stream);
+ auto wrapper{ipc::capnp::Wrap(stream)};
+ value.Unserialize(wrapper);
});
}
+//! Overload multiprocess library's CustomReadField hook to allow any object
+//! with a deserialize constructor to be read from a capnproto Data field or
+//! returned from capnproto interface. Use Priority<1> so this hook has medium
+//! priority, and higher priority hooks could take precedence over this one.
+template <typename LocalType, typename Input, typename ReadDest>
+decltype(auto) CustomReadField(TypeList<LocalType>, Priority<1>, InvokeContext& invoke_context, Input&& input, ReadDest&& read_dest)
+requires ipc::capnp::Deserializable<LocalType>
+{
+ assert(input.has());
+ auto data = input.get();
+ SpanReader stream({data.begin(), data.end()});
+ auto wrapper{ipc::capnp::Wrap(stream)};
+ return read_dest.construct(::deserialize, wrapper);
+}
+
+//! Overload CustomBuildField and CustomReadField to serialize std::chrono
+//! parameters and return values as numbers.
+template <class Rep, class Period, typename Value, typename Output>
+void CustomBuildField(TypeList<std::chrono::duration<Rep, Period>>, Priority<1>, InvokeContext& invoke_context, Value&& value,
+ Output&& output)
+{
+ static_assert(std::numeric_limits<decltype(output.get())>::lowest() <= std::numeric_limits<Rep>::lowest(),
+ "capnp type does not have enough range to hold lowest std::chrono::duration value");
+ static_assert(std::numeric_limits<decltype(output.get())>::max() >= std::numeric_limits<Rep>::max(),
+ "capnp type does not have enough range to hold highest std::chrono::duration value");
+ output.set(value.count());
+}
+
+template <class Rep, class Period, typename Input, typename ReadDest>
+decltype(auto) CustomReadField(TypeList<std::chrono::duration<Rep, Period>>, Priority<1>, InvokeContext& invoke_context,
+ Input&& input, ReadDest&& read_dest)
+{
+ return read_dest.construct(input.get());
+}
+
+//! Overload CustomBuildField and CustomReadField to serialize UniValue
+//! parameters and return values as JSON strings.
template <typename Value, typename Output>
void CustomBuildField(TypeList<UniValue>, Priority<1>, InvokeContext& invoke_context, Value&& value, Output&& output)
{
@@ -103,6 +133,33 @@ decltype(auto) CustomReadField(TypeList<UniValue>, Priority<1>, InvokeContext& i
value.read(std::string_view{data.begin(), data.size()});
});
}
+
+//! Generic ::capnp::Data field builder for any C++ type that can be converted
+//! to a span of bytes, like std::vector<char> or std::array<uint8_t>, or custom
+//! blob types like uint256 or PKHash with data() and size() methods pointing to
+//! bytes.
+//!
+//! Note: it might make sense to move this function into libmultiprocess, since
+//! it is fairly generic. However this would require decreasing its priority so
+//! it can be overridden, which would require more changes inside
+//! libmultiprocess to avoid conflicting with the Priority<1> CustomBuildField
+//! function it already provides for std::vector. Also, it might make sense to
+//! provide a CustomReadField counterpart to this function, which could be
+//! called to read C++ types that can be constructed from spans of bytes from
+//! ::capnp::Data fields. But so far there hasn't been a need for this.
+template <typename LocalType, typename Value, typename Output>
+void CustomBuildField(TypeList<LocalType>, Priority<2>, InvokeContext& invoke_context, Value&& value, Output&& output)
+requires
+ (std::is_same_v<decltype(output.get()), ::capnp::Data::Builder>) &&
+ (std::convertible_to<Value, std::span<const std::byte>> ||
+ std::convertible_to<Value, std::span<const char>> ||
+ std::convertible_to<Value, std::span<const unsigned char>> ||
+ std::convertible_to<Value, std::span<const signed char>>)
+{
+ auto data = std::span{value};
+ auto result = output.init(data.size());
+ memcpy(result.begin(), data.data(), data.size());
+}
} // namespace mp
#endif // BITCOIN_IPC_CAPNP_COMMON_TYPES_H
diff --git a/src/ipc/capnp/common.capnp b/src/ipc/capnp/common.capnp
new file mode 100644
index 0000000000..b3359f3f07
--- /dev/null
+++ b/src/ipc/capnp/common.capnp
@@ -0,0 +1,16 @@
+# Copyright (c) 2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+@0xcd2c6232cb484a28;
+
+using Cxx = import "/capnp/c++.capnp";
+$Cxx.namespace("ipc::capnp::messages");
+
+using Proxy = import "/mp/proxy.capnp";
+$Proxy.includeTypes("ipc/capnp/common-types.h");
+
+struct BlockRef $Proxy.wrap("interfaces::BlockRef") {
+ hash @0 :Data;
+ height @1 :Int32;
+}
diff --git a/src/ipc/capnp/init-types.h b/src/ipc/capnp/init-types.h
index 42031441b5..c3ddca27c0 100644
--- a/src/ipc/capnp/init-types.h
+++ b/src/ipc/capnp/init-types.h
@@ -6,5 +6,6 @@
#define BITCOIN_IPC_CAPNP_INIT_TYPES_H
#include <ipc/capnp/echo.capnp.proxy-types.h>
+#include <ipc/capnp/mining.capnp.proxy-types.h>
#endif // BITCOIN_IPC_CAPNP_INIT_TYPES_H
diff --git a/src/ipc/capnp/init.capnp b/src/ipc/capnp/init.capnp
index e6d358c665..1001ee5336 100644
--- a/src/ipc/capnp/init.capnp
+++ b/src/ipc/capnp/init.capnp
@@ -10,11 +10,14 @@ $Cxx.namespace("ipc::capnp::messages");
using Proxy = import "/mp/proxy.capnp";
$Proxy.include("interfaces/echo.h");
$Proxy.include("interfaces/init.h");
+$Proxy.include("interfaces/mining.h");
$Proxy.includeTypes("ipc/capnp/init-types.h");
using Echo = import "echo.capnp";
+using Mining = import "mining.capnp";
interface Init $Proxy.wrap("interfaces::Init") {
construct @0 (threadMap: Proxy.ThreadMap) -> (threadMap :Proxy.ThreadMap);
makeEcho @1 (context :Proxy.Context) -> (result :Echo.Echo);
+ makeMining @2 (context :Proxy.Context) -> (result :Mining.Mining);
}
diff --git a/src/ipc/capnp/mining-types.h b/src/ipc/capnp/mining-types.h
new file mode 100644
index 0000000000..2e60b43fcf
--- /dev/null
+++ b/src/ipc/capnp/mining-types.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_IPC_CAPNP_MINING_TYPES_H
+#define BITCOIN_IPC_CAPNP_MINING_TYPES_H
+
+#include <interfaces/mining.h>
+#include <ipc/capnp/common.capnp.proxy-types.h>
+#include <ipc/capnp/common-types.h>
+#include <ipc/capnp/mining.capnp.proxy.h>
+#include <node/miner.h>
+#include <node/types.h>
+#include <validation.h>
+
+namespace mp {
+// Custom serialization for BlockValidationState.
+void CustomBuildMessage(InvokeContext& invoke_context,
+ const BlockValidationState& src,
+ ipc::capnp::messages::BlockValidationState::Builder&& builder);
+void CustomReadMessage(InvokeContext& invoke_context,
+ const ipc::capnp::messages::BlockValidationState::Reader& reader,
+ BlockValidationState& dest);
+} // namespace mp
+
+#endif // BITCOIN_IPC_CAPNP_MINING_TYPES_H
diff --git a/src/ipc/capnp/mining.capnp b/src/ipc/capnp/mining.capnp
new file mode 100644
index 0000000000..5e0216acea
--- /dev/null
+++ b/src/ipc/capnp/mining.capnp
@@ -0,0 +1,52 @@
+# Copyright (c) 2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+@0xc77d03df6a41b505;
+
+using Cxx = import "/capnp/c++.capnp";
+$Cxx.namespace("ipc::capnp::messages");
+
+using Common = import "common.capnp";
+using Proxy = import "/mp/proxy.capnp";
+$Proxy.include("interfaces/mining.h");
+$Proxy.includeTypes("ipc/capnp/mining-types.h");
+
+interface Mining $Proxy.wrap("interfaces::Mining") {
+ isTestChain @0 (context :Proxy.Context) -> (result: Bool);
+ isInitialBlockDownload @1 (context :Proxy.Context) -> (result: Bool);
+ getTip @2 (context :Proxy.Context) -> (result: Common.BlockRef, hasResult: Bool);
+ waitTipChanged @3 (context :Proxy.Context, currentTip: Data, timeout: Float64) -> (result: Common.BlockRef);
+ createNewBlock @4 (scriptPubKey: Data, options: BlockCreateOptions) -> (result: BlockTemplate);
+ processNewBlock @5 (context :Proxy.Context, block: Data) -> (newBlock: Bool, result: Bool);
+ getTransactionsUpdated @6 (context :Proxy.Context) -> (result: UInt32);
+ testBlockValidity @7 (context :Proxy.Context, block: Data, checkMerkleRoot: Bool) -> (state: BlockValidationState, result: Bool);
+}
+
+interface BlockTemplate $Proxy.wrap("interfaces::BlockTemplate") {
+ getBlockHeader @0 (context: Proxy.Context) -> (result: Data);
+ getBlock @1 (context: Proxy.Context) -> (result: Data);
+ getTxFees @2 (context: Proxy.Context) -> (result: List(Int64));
+ getTxSigops @3 (context: Proxy.Context) -> (result: List(Int64));
+ getCoinbaseTx @4 (context: Proxy.Context) -> (result: Data);
+ getCoinbaseCommitment @5 (context: Proxy.Context) -> (result: Data);
+ getWitnessCommitmentIndex @6 (context: Proxy.Context) -> (result: Int32);
+ getCoinbaseMerklePath @7 (context: Proxy.Context) -> (result: List(Data));
+ submitSolution@8 (context: Proxy.Context, version: UInt32, timestamp: UInt32, nonce: UInt32, coinbase :Data) -> (result: Bool);
+}
+
+struct BlockCreateOptions $Proxy.wrap("node::BlockCreateOptions") {
+ useMempool @0 :Bool $Proxy.name("use_mempool");
+ coinbaseMaxAdditionalWeight @1 :UInt64 $Proxy.name("coinbase_max_additional_weight");
+ coinbaseOutputMaxAdditionalSigops @2 :UInt64 $Proxy.name("coinbase_output_max_additional_sigops");
+}
+
+# Note: serialization of the BlockValidationState C++ type is somewhat fragile
+# and using the struct can be awkward. It would be good if testBlockValidity
+# method were changed to return validity information in a simpler format.
+struct BlockValidationState {
+ mode @0 :Int32;
+ result @1 :Int32;
+ rejectReason @2 :Text;
+ debugMessage @3 :Text;
+}
diff --git a/src/ipc/capnp/mining.cpp b/src/ipc/capnp/mining.cpp
new file mode 100644
index 0000000000..0f9533c1c7
--- /dev/null
+++ b/src/ipc/capnp/mining.cpp
@@ -0,0 +1,47 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <ipc/capnp/mining-types.h>
+#include <ipc/capnp/mining.capnp.proxy-types.h>
+
+#include <mp/proxy-types.h>
+
+namespace mp {
+void CustomBuildMessage(InvokeContext& invoke_context,
+ const BlockValidationState& src,
+ ipc::capnp::messages::BlockValidationState::Builder&& builder)
+{
+ if (src.IsValid()) {
+ builder.setMode(0);
+ } else if (src.IsInvalid()) {
+ builder.setMode(1);
+ } else if (src.IsError()) {
+ builder.setMode(2);
+ } else {
+ assert(false);
+ }
+ builder.setResult(static_cast<int>(src.GetResult()));
+ builder.setRejectReason(src.GetRejectReason());
+ builder.setDebugMessage(src.GetDebugMessage());
+}
+
+void CustomReadMessage(InvokeContext& invoke_context,
+ const ipc::capnp::messages::BlockValidationState::Reader& reader,
+ BlockValidationState& dest)
+{
+ if (reader.getMode() == 0) {
+ assert(reader.getResult() == 0);
+ assert(reader.getRejectReason().size() == 0);
+ assert(reader.getDebugMessage().size() == 0);
+ } else if (reader.getMode() == 1) {
+ dest.Invalid(static_cast<BlockValidationResult>(reader.getResult()), reader.getRejectReason(), reader.getDebugMessage());
+ } else if (reader.getMode() == 2) {
+ assert(reader.getResult() == 0);
+ dest.Error(reader.getRejectReason());
+ assert(reader.getDebugMessage().size() == 0);
+ } else {
+ assert(false);
+ }
+}
+} // namespace mp
diff --git a/src/ipc/capnp/protocol.cpp b/src/ipc/capnp/protocol.cpp
index 73276d6d90..4b67a5bd1e 100644
--- a/src/ipc/capnp/protocol.cpp
+++ b/src/ipc/capnp/protocol.cpp
@@ -23,6 +23,8 @@
#include <mutex>
#include <optional>
#include <string>
+#include <sys/socket.h>
+#include <system_error>
#include <thread>
namespace ipc {
@@ -51,11 +53,20 @@ public:
startLoop(exe_name);
return mp::ConnectStream<messages::Init>(*m_loop, fd);
}
- void serve(int fd, const char* exe_name, interfaces::Init& init) override
+ void listen(int listen_fd, const char* exe_name, interfaces::Init& init) override
+ {
+ startLoop(exe_name);
+ if (::listen(listen_fd, /*backlog=*/5) != 0) {
+ throw std::system_error(errno, std::system_category());
+ }
+ mp::ListenConnections<messages::Init>(*m_loop, listen_fd, init);
+ }
+ void serve(int fd, const char* exe_name, interfaces::Init& init, const std::function<void()>& ready_fn = {}) override
{
assert(!m_loop);
mp::g_thread_context.thread_name = mp::ThreadName(exe_name);
m_loop.emplace(exe_name, &IpcLogFn, &m_context);
+ if (ready_fn) ready_fn();
mp::ServeStream<messages::Init>(*m_loop, fd, init);
m_loop->loop();
m_loop.reset();
diff --git a/src/ipc/interfaces.cpp b/src/ipc/interfaces.cpp
index b409443f64..33555f05d4 100644
--- a/src/ipc/interfaces.cpp
+++ b/src/ipc/interfaces.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <common/args.h>
#include <common/system.h>
#include <interfaces/init.h>
#include <interfaces/ipc.h>
@@ -56,6 +57,35 @@ public:
exit_status = EXIT_SUCCESS;
return true;
}
+ std::unique_ptr<interfaces::Init> connectAddress(std::string& address) override
+ {
+ if (address.empty() || address == "0") return nullptr;
+ int fd;
+ if (address == "auto") {
+ // Treat "auto" the same as "unix" except don't treat it an as error
+ // if the connection is not accepted. Just return null so the caller
+ // can work offline without a connection, or spawn a new
+ // bitcoin-node process and connect to it.
+ address = "unix";
+ try {
+ fd = m_process->connect(gArgs.GetDataDirNet(), "bitcoin-node", address);
+ } catch (const std::system_error& e) {
+ // If connection type is auto and socket path isn't accepting connections, or doesn't exist, catch the error and return null;
+ if (e.code() == std::errc::connection_refused || e.code() == std::errc::no_such_file_or_directory) {
+ return nullptr;
+ }
+ throw;
+ }
+ } else {
+ fd = m_process->connect(gArgs.GetDataDirNet(), "bitcoin-node", address);
+ }
+ return m_protocol->connect(fd, m_exe_name);
+ }
+ void listenAddress(std::string& address) override
+ {
+ int fd = m_process->bind(gArgs.GetDataDirNet(), m_exe_name, address);
+ m_protocol->listen(fd, m_exe_name, m_init);
+ }
void addCleanup(std::type_index type, void* iface, std::function<void()> cleanup) override
{
m_protocol->addCleanup(type, iface, std::move(cleanup));
diff --git a/src/ipc/process.cpp b/src/ipc/process.cpp
index 9657dcd092..432c365d8f 100644
--- a/src/ipc/process.cpp
+++ b/src/ipc/process.cpp
@@ -4,22 +4,28 @@
#include <ipc/process.h>
#include <ipc/protocol.h>
+#include <logging.h>
#include <mp/util.h>
#include <tinyformat.h>
#include <util/fs.h>
#include <util/strencodings.h>
+#include <util/syserror.h>
#include <cstdint>
#include <cstdlib>
+#include <errno.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string.h>
-#include <system_error>
+#include <sys/socket.h>
+#include <sys/un.h>
#include <unistd.h>
#include <utility>
#include <vector>
+using util::RemovePrefixView;
+
namespace ipc {
namespace {
class ProcessImpl : public Process
@@ -54,7 +60,95 @@ public:
}
return true;
}
+ int connect(const fs::path& data_dir,
+ const std::string& dest_exe_name,
+ std::string& address) override;
+ int bind(const fs::path& data_dir, const std::string& exe_name, std::string& address) override;
};
+
+static bool ParseAddress(std::string& address,
+ const fs::path& data_dir,
+ const std::string& dest_exe_name,
+ struct sockaddr_un& addr,
+ std::string& error)
+{
+ if (address.compare(0, 4, "unix") == 0 && (address.size() == 4 || address[4] == ':')) {
+ fs::path path;
+ if (address.size() <= 5) {
+ path = data_dir / fs::PathFromString(strprintf("%s.sock", RemovePrefixView(dest_exe_name, "bitcoin-")));
+ } else {
+ path = data_dir / fs::PathFromString(address.substr(5));
+ }
+ std::string path_str = fs::PathToString(path);
+ address = strprintf("unix:%s", path_str);
+ if (path_str.size() >= sizeof(addr.sun_path)) {
+ error = strprintf("Unix address path %s exceeded maximum socket path length", fs::quoted(fs::PathToString(path)));
+ return false;
+ }
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, path_str.c_str(), sizeof(addr.sun_path)-1);
+ return true;
+ }
+
+ error = strprintf("Unrecognized address '%s'", address);
+ return false;
+}
+
+int ProcessImpl::connect(const fs::path& data_dir,
+ const std::string& dest_exe_name,
+ std::string& address)
+{
+ struct sockaddr_un addr;
+ std::string error;
+ if (!ParseAddress(address, data_dir, dest_exe_name, addr, error)) {
+ throw std::invalid_argument(error);
+ }
+
+ int fd;
+ if ((fd = ::socket(addr.sun_family, SOCK_STREAM, 0)) == -1) {
+ throw std::system_error(errno, std::system_category());
+ }
+ if (::connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0) {
+ return fd;
+ }
+ int connect_error = errno;
+ if (::close(fd) != 0) {
+ LogPrintf("Error closing file descriptor %i '%s': %s\n", fd, address, SysErrorString(errno));
+ }
+ throw std::system_error(connect_error, std::system_category());
+}
+
+int ProcessImpl::bind(const fs::path& data_dir, const std::string& exe_name, std::string& address)
+{
+ struct sockaddr_un addr;
+ std::string error;
+ if (!ParseAddress(address, data_dir, exe_name, addr, error)) {
+ throw std::invalid_argument(error);
+ }
+
+ if (addr.sun_family == AF_UNIX) {
+ fs::path path = addr.sun_path;
+ if (path.has_parent_path()) fs::create_directories(path.parent_path());
+ if (fs::symlink_status(path).type() == fs::file_type::socket) {
+ fs::remove(path);
+ }
+ }
+
+ int fd;
+ if ((fd = ::socket(addr.sun_family, SOCK_STREAM, 0)) == -1) {
+ throw std::system_error(errno, std::system_category());
+ }
+
+ if (::bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0) {
+ return fd;
+ }
+ int bind_error = errno;
+ if (::close(fd) != 0) {
+ LogPrintf("Error closing file descriptor %i: %s\n", fd, SysErrorString(errno));
+ }
+ throw std::system_error(bind_error, std::system_category());
+}
} // namespace
std::unique_ptr<Process> MakeProcess() { return std::make_unique<ProcessImpl>(); }
diff --git a/src/ipc/process.h b/src/ipc/process.h
index 40f2d2acf6..2ed8b73fab 100644
--- a/src/ipc/process.h
+++ b/src/ipc/process.h
@@ -34,6 +34,16 @@ public:
//! process. If so, return true and a file descriptor for communicating
//! with the parent process.
virtual bool checkSpawned(int argc, char* argv[], int& fd) = 0;
+
+ //! Canonicalize and connect to address, returning socket descriptor.
+ virtual int connect(const fs::path& data_dir,
+ const std::string& dest_exe_name,
+ std::string& address) = 0;
+
+ //! Create listening socket, bind and canonicalize address, and return socket descriptor.
+ virtual int bind(const fs::path& data_dir,
+ const std::string& exe_name,
+ std::string& address) = 0;
};
//! Constructor for Process interface. Implementation will vary depending on
diff --git a/src/ipc/protocol.h b/src/ipc/protocol.h
index 4cd892e411..b2ebf99e8c 100644
--- a/src/ipc/protocol.h
+++ b/src/ipc/protocol.h
@@ -25,12 +25,38 @@ public:
//! Return Init interface that forwards requests over given socket descriptor.
//! Socket communication is handled on a background thread.
+ //!
+ //! @note It could be potentially useful in the future to add
+ //! std::function<void()> on_disconnect callback argument here. But there
+ //! isn't an immediate need, because the protocol implementation can clean
+ //! up its own state (calling ProxyServer destructors, etc) on disconnect,
+ //! and any client calls will just throw ipc::Exception errors after a
+ //! disconnect.
virtual std::unique_ptr<interfaces::Init> connect(int fd, const char* exe_name) = 0;
+ //! Listen for connections on provided socket descriptor, accept them, and
+ //! handle requests on accepted connections. This method doesn't block, and
+ //! performs I/O on a background thread.
+ virtual void listen(int listen_fd, const char* exe_name, interfaces::Init& init) = 0;
+
//! Handle requests on provided socket descriptor, forwarding them to the
//! provided Init interface. Socket communication is handled on the
//! current thread, and this call blocks until the socket is closed.
- virtual void serve(int fd, const char* exe_name, interfaces::Init& init) = 0;
+ //!
+ //! @note: If this method is called, it needs be called before connect() or
+ //! listen() methods, because for ease of implementation it's inflexible and
+ //! always runs the event loop in the foreground thread. It can share its
+ //! event loop with the other methods but can't share an event loop that was
+ //! created by them. This isn't really a problem because serve() is only
+ //! called by spawned child processes that call it immediately to
+ //! communicate back with parent processes.
+ //
+ //! The optional `ready_fn` callback will be called after the event loop is
+ //! created but before it is started. This can be useful in tests to trigger
+ //! client connections from another thread as soon as the event loop is
+ //! available, but should not be neccessary in normal code which starts
+ //! clients and servers independently.
+ virtual void serve(int fd, const char* exe_name, interfaces::Init& init, const std::function<void()>& ready_fn = {}) = 0;
//! Add cleanup callback to interface that will run when the interface is
//! deleted.
diff --git a/src/kernel/CMakeLists.txt b/src/kernel/CMakeLists.txt
index ffb1a857ac..7bf8efc516 100644
--- a/src/kernel/CMakeLists.txt
+++ b/src/kernel/CMakeLists.txt
@@ -84,6 +84,7 @@ target_link_libraries(bitcoinkernel
bitcoin_crypto
leveldb
secp256k1
+ $<TARGET_NAME_IF_EXISTS:USDT::headers>
PUBLIC
Boost::headers
)
@@ -98,9 +99,49 @@ set_target_properties(bitcoinkernel PROPERTIES
CXX_VISIBILITY_PRESET default
)
+# When building the static library, install all static libraries the
+# bitcoinkernel depends on.
+if(NOT BUILD_SHARED_LIBS)
+ # Recursively get all the static libraries a target depends on and put them in libs_out
+ function(get_target_static_link_libs target libs_out)
+ get_target_property(linked_libraries ${target} LINK_LIBRARIES)
+ foreach(dep ${linked_libraries})
+ if(TARGET ${dep})
+ get_target_property(dep_type ${dep} TYPE)
+ if(dep_type STREQUAL "STATIC_LIBRARY")
+ list(APPEND ${libs_out} ${dep})
+ get_target_static_link_libs(${dep} ${libs_out})
+ endif()
+ endif()
+ endforeach()
+ set(${libs_out} ${${libs_out}} PARENT_SCOPE)
+ endfunction()
+
+ set(all_kernel_static_link_libs "")
+ get_target_static_link_libs(bitcoinkernel all_kernel_static_link_libs)
+
+ # LIBS_PRIVATE is substituted in the pkg-config file.
+ set(LIBS_PRIVATE "")
+ foreach(lib ${all_kernel_static_link_libs})
+ install(TARGETS ${lib} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ string(APPEND LIBS_PRIVATE " -l${lib}")
+ endforeach()
+
+ string(STRIP "${LIBS_PRIVATE}" LIBS_PRIVATE)
+endif()
+
+configure_file(${PROJECT_SOURCE_DIR}/libbitcoinkernel.pc.in ${PROJECT_BINARY_DIR}/libbitcoinkernel.pc @ONLY)
+install(FILES ${PROJECT_BINARY_DIR}/libbitcoinkernel.pc DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+
include(GNUInstallDirs)
install(TARGETS bitcoinkernel
- RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
- LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
- ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ RUNTIME
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ COMPONENT Kernel
+ LIBRARY
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT Kernel
+ ARCHIVE
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT Kernel
)
diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp
index 18c2026a88..0f128d4c56 100644
--- a/src/kernel/chainparams.cpp
+++ b/src/kernel/chainparams.cpp
@@ -73,7 +73,7 @@ static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesi
static CBlock CreateGenesisBlock(uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward)
{
const char* pszTimestamp = "The Times 03/Jan/2009 Chancellor on brink of second bailout for banks";
- const CScript genesisOutputScript = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex_v_u8 << OP_CHECKSIG;
+ const CScript genesisOutputScript = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex << OP_CHECKSIG;
return CreateGenesisBlock(pszTimestamp, genesisOutputScript, nTime, nNonce, nBits, nVersion, genesisReward);
}
@@ -353,7 +353,7 @@ public:
m_assumed_chain_state_size = 0;
const char* testnet4_genesis_msg = "03/May/2024 000000000000000000001ebd58c244970b3aa9d783bb001011fbe8ea8e98e00e";
- const CScript testnet4_genesis_script = CScript() << "000000000000000000000000000000000000000000000000000000000000000000"_hex_v_u8 << OP_CHECKSIG;
+ const CScript testnet4_genesis_script = CScript() << "000000000000000000000000000000000000000000000000000000000000000000"_hex << OP_CHECKSIG;
genesis = CreateGenesisBlock(testnet4_genesis_msg,
testnet4_genesis_script,
1714777860,
diff --git a/src/logging.cpp b/src/logging.cpp
index 9a54a12b42..5f055566ef 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -1,10 +1,11 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2022 The Bitcoin Core developers
+// Copyright (c) 2009-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <logging.h>
#include <memusage.h>
+#include <util/check.h>
#include <util/fs.h>
#include <util/string.h>
#include <util/threadnames.h>
@@ -103,7 +104,6 @@ void BCLog::Logger::DisconnectTestLogger()
m_cur_buffer_memusage = 0;
m_buffer_lines_discarded = 0;
m_msgs_before_open.clear();
-
}
void BCLog::Logger::DisableLogging()
@@ -369,6 +369,8 @@ static size_t MemUsage(const BCLog::Logger::BufferedLog& buflog)
void BCLog::Logger::FormatLogStrInPlace(std::string& str, BCLog::LogFlags category, BCLog::Level level, std::string_view source_file, int source_line, std::string_view logging_function, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const
{
+ if (!str.ends_with('\n')) str.push_back('\n');
+
str.insert(0, GetLogPrefix(category, level));
if (m_log_sourcelocations) {
@@ -392,21 +394,7 @@ void BCLog::Logger::LogPrintStr_(std::string_view str, std::string_view logging_
{
std::string str_prefixed = LogEscapeMessage(str);
- const bool starts_new_line = m_started_new_line;
- m_started_new_line = !str.empty() && str[str.size()-1] == '\n';
-
if (m_buffering) {
- if (!starts_new_line) {
- if (!m_msgs_before_open.empty()) {
- m_msgs_before_open.back().str += str_prefixed;
- m_cur_buffer_memusage += str_prefixed.size();
- return;
- } else {
- // unlikely edge case; add a marker that something was trimmed
- str_prefixed.insert(0, "[...] ");
- }
- }
-
{
BufferedLog buf{
.now=SystemClock::now(),
@@ -436,9 +424,7 @@ void BCLog::Logger::LogPrintStr_(std::string_view str, std::string_view logging_
return;
}
- if (starts_new_line) {
- FormatLogStrInPlace(str_prefixed, category, level, source_file, source_line, logging_function, util::ThreadGetInternalName(), SystemClock::now(), GetMockTime());
- }
+ FormatLogStrInPlace(str_prefixed, category, level, source_file, source_line, logging_function, util::ThreadGetInternalName(), SystemClock::now(), GetMockTime());
if (m_print_to_console) {
// print to console
diff --git a/src/logging.h b/src/logging.h
index c522cdf348..fdc12c79b3 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2022 The Bitcoin Core developers
+// Copyright (c) 2009-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -37,40 +37,41 @@ struct LogCategory {
};
namespace BCLog {
- enum LogFlags : uint32_t {
- NONE = 0,
- NET = (1 << 0),
- TOR = (1 << 1),
- MEMPOOL = (1 << 2),
- HTTP = (1 << 3),
- BENCH = (1 << 4),
- ZMQ = (1 << 5),
- WALLETDB = (1 << 6),
- RPC = (1 << 7),
- ESTIMATEFEE = (1 << 8),
- ADDRMAN = (1 << 9),
- SELECTCOINS = (1 << 10),
- REINDEX = (1 << 11),
- CMPCTBLOCK = (1 << 12),
- RAND = (1 << 13),
- PRUNE = (1 << 14),
- PROXY = (1 << 15),
- MEMPOOLREJ = (1 << 16),
- LIBEVENT = (1 << 17),
- COINDB = (1 << 18),
- QT = (1 << 19),
- LEVELDB = (1 << 20),
- VALIDATION = (1 << 21),
- I2P = (1 << 22),
- IPC = (1 << 23),
+ using CategoryMask = uint64_t;
+ enum LogFlags : CategoryMask {
+ NONE = CategoryMask{0},
+ NET = (CategoryMask{1} << 0),
+ TOR = (CategoryMask{1} << 1),
+ MEMPOOL = (CategoryMask{1} << 2),
+ HTTP = (CategoryMask{1} << 3),
+ BENCH = (CategoryMask{1} << 4),
+ ZMQ = (CategoryMask{1} << 5),
+ WALLETDB = (CategoryMask{1} << 6),
+ RPC = (CategoryMask{1} << 7),
+ ESTIMATEFEE = (CategoryMask{1} << 8),
+ ADDRMAN = (CategoryMask{1} << 9),
+ SELECTCOINS = (CategoryMask{1} << 10),
+ REINDEX = (CategoryMask{1} << 11),
+ CMPCTBLOCK = (CategoryMask{1} << 12),
+ RAND = (CategoryMask{1} << 13),
+ PRUNE = (CategoryMask{1} << 14),
+ PROXY = (CategoryMask{1} << 15),
+ MEMPOOLREJ = (CategoryMask{1} << 16),
+ LIBEVENT = (CategoryMask{1} << 17),
+ COINDB = (CategoryMask{1} << 18),
+ QT = (CategoryMask{1} << 19),
+ LEVELDB = (CategoryMask{1} << 20),
+ VALIDATION = (CategoryMask{1} << 21),
+ I2P = (CategoryMask{1} << 22),
+ IPC = (CategoryMask{1} << 23),
#ifdef DEBUG_LOCKCONTENTION
- LOCK = (1 << 24),
+ LOCK = (CategoryMask{1} << 24),
#endif
- BLOCKSTORAGE = (1 << 25),
- TXRECONCILIATION = (1 << 26),
- SCAN = (1 << 27),
- TXPACKAGES = (1 << 28),
- ALL = ~(uint32_t)0,
+ BLOCKSTORAGE = (CategoryMask{1} << 25),
+ TXRECONCILIATION = (CategoryMask{1} << 26),
+ SCAN = (CategoryMask{1} << 27),
+ TXPACKAGES = (CategoryMask{1} << 28),
+ ALL = ~NONE,
};
enum class Level {
Trace = 0, // High-volume or detailed logging for development/debugging
@@ -104,13 +105,6 @@ namespace BCLog {
size_t m_cur_buffer_memusage GUARDED_BY(m_cs){0};
size_t m_buffer_lines_discarded GUARDED_BY(m_cs){0};
- /**
- * m_started_new_line is a state variable that will suppress printing of
- * the timestamp when multiple calls are made that don't end in a
- * newline.
- */
- std::atomic_bool m_started_new_line{true};
-
//! Category-specific log level. Overrides `m_log_level`.
std::unordered_map<LogFlags, Level> m_category_log_levels GUARDED_BY(m_cs);
@@ -119,7 +113,7 @@ namespace BCLog {
std::atomic<Level> m_log_level{DEFAULT_LOG_LEVEL};
/** Log categories bitfield. */
- std::atomic<uint32_t> m_categories{BCLog::NONE};
+ std::atomic<CategoryMask> m_categories{BCLog::NONE};
void FormatLogStrInPlace(std::string& str, LogFlags category, Level level, std::string_view source_file, int source_line, std::string_view logging_function, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const;
@@ -204,7 +198,7 @@ namespace BCLog {
void SetLogLevel(Level level) { m_log_level = level; }
bool SetLogLevel(std::string_view level);
- uint32_t GetCategoryMask() const { return m_categories.load(); }
+ CategoryMask GetCategoryMask() const { return m_categories.load(); }
void EnableCategory(LogFlags flag);
bool EnableCategory(std::string_view str);
@@ -244,35 +238,32 @@ static inline bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level leve
/** Return true if str parses as a log category and set the flag */
bool GetLogCategory(BCLog::LogFlags& flag, std::string_view str);
-// Be conservative when using functions that
-// unconditionally log to debug.log! It should not be the case that an inbound
-// peer can fill up a user's disk with debug.log entries.
-
template <typename... Args>
-static inline void LogPrintf_(std::string_view logging_function, std::string_view source_file, const int source_line, const BCLog::LogFlags flag, const BCLog::Level level, const char* fmt, const Args&... args)
+inline void LogPrintFormatInternal(std::string_view logging_function, std::string_view source_file, const int source_line, const BCLog::LogFlags flag, const BCLog::Level level, util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
{
if (LogInstance().Enabled()) {
std::string log_msg;
try {
log_msg = tfm::format(fmt, args...);
} catch (tinyformat::format_error& fmterr) {
- /* Original format string will have newline so don't add one here */
- log_msg = "Error \"" + std::string(fmterr.what()) + "\" while formatting log message: " + fmt;
+ log_msg = "Error \"" + std::string{fmterr.what()} + "\" while formatting log message: " + fmt.fmt;
}
LogInstance().LogPrintStr(log_msg, logging_function, source_file, source_line, flag, level);
}
}
-#define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
+#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
// Log unconditionally.
+// Be conservative when using functions that unconditionally log to debug.log!
+// It should not be the case that an inbound peer can fill up a user's storage
+// with debug.log entries.
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__)
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, __VA_ARGS__)
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__)
// Deprecated unconditional logging.
#define LogPrintf(...) LogInfo(__VA_ARGS__)
-#define LogPrintfCategory(category, ...) LogPrintLevel_(category, BCLog::Level::Info, __VA_ARGS__)
// Use a macro instead of a function for conditional logging to prevent
// evaluating arguments when logging for the category is not enabled.
diff --git a/src/mapport.cpp b/src/mapport.cpp
index 1920297be6..bdeda6da34 100644
--- a/src/mapport.cpp
+++ b/src/mapport.cpp
@@ -2,24 +2,22 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <mapport.h>
#include <clientversion.h>
+#include <common/netif.h>
+#include <common/pcp.h>
#include <common/system.h>
#include <logging.h>
#include <net.h>
#include <netaddress.h>
#include <netbase.h>
+#include <random.h>
#include <util/thread.h>
#include <util/threadinterrupt.h>
-#ifdef USE_NATPMP
-#include <compat/compat.h>
-#include <natpmp.h>
-#endif // USE_NATPMP
-
#ifdef USE_UPNP
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/upnpcommands.h>
@@ -36,7 +34,6 @@ static_assert(MINIUPNPC_API_VERSION >= 17, "miniUPnPc API version >= 17 assumed"
#include <string>
#include <thread>
-#if defined(USE_NATPMP) || defined(USE_UPNP)
static CThreadInterrupt g_mapport_interrupt;
static std::thread g_mapport_thread;
static std::atomic_uint g_mapport_enabled_protos{MapPortProtoFlag::NONE};
@@ -46,104 +43,96 @@ using namespace std::chrono_literals;
static constexpr auto PORT_MAPPING_REANNOUNCE_PERIOD{20min};
static constexpr auto PORT_MAPPING_RETRY_PERIOD{5min};
-#ifdef USE_NATPMP
-static uint16_t g_mapport_external_port = 0;
-static bool NatpmpInit(natpmp_t* natpmp)
+static bool ProcessPCP()
{
- const int r_init = initnatpmp(natpmp, /* detect gateway automatically */ 0, /* forced gateway - NOT APPLIED*/ 0);
- if (r_init == 0) return true;
- LogPrintf("natpmp: initnatpmp() failed with %d error.\n", r_init);
- return false;
-}
+ // The same nonce is used for all mappings, this is allowed by the spec, and simplifies keeping track of them.
+ PCPMappingNonce pcp_nonce;
+ GetRandBytes(pcp_nonce);
-static bool NatpmpDiscover(natpmp_t* natpmp, struct in_addr& external_ipv4_addr)
-{
- const int r_send = sendpublicaddressrequest(natpmp);
- if (r_send == 2 /* OK */) {
- int r_read;
- natpmpresp_t response;
- do {
- r_read = readnatpmpresponseorretry(natpmp, &response);
- } while (r_read == NATPMP_TRYAGAIN);
-
- if (r_read == 0) {
- external_ipv4_addr = response.pnu.publicaddress.addr;
- return true;
- } else if (r_read == NATPMP_ERR_NOGATEWAYSUPPORT) {
- LogPrintf("natpmp: The gateway does not support NAT-PMP.\n");
+ bool ret = false;
+ bool no_resources = false;
+ const uint16_t private_port = GetListenPort();
+ // Multiply the reannounce period by two, as we'll try to renew approximately halfway.
+ const uint32_t requested_lifetime = std::chrono::seconds(PORT_MAPPING_REANNOUNCE_PERIOD * 2).count();
+ uint32_t actual_lifetime = 0;
+ std::chrono::milliseconds sleep_time;
+
+ // Local functor to handle result from PCP/NATPMP mapping.
+ auto handle_mapping = [&](std::variant<MappingResult, MappingError> &res) -> void {
+ if (MappingResult* mapping = std::get_if<MappingResult>(&res)) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Info, "portmap: Added mapping %s\n", mapping->ToString());
+ AddLocal(mapping->external, LOCAL_MAPPED);
+ ret = true;
+ actual_lifetime = std::min(actual_lifetime, mapping->lifetime);
+ } else if (MappingError *err = std::get_if<MappingError>(&res)) {
+ // Detailed error will already have been logged internally in respective Portmap function.
+ if (*err == MappingError::NO_RESOURCES) {
+ no_resources = true;
+ }
+ }
+ };
+
+ do {
+ actual_lifetime = requested_lifetime;
+ no_resources = false; // Set to true if there was any "no resources" error.
+ ret = false; // Set to true if any mapping succeeds.
+
+ // IPv4
+ std::optional<CNetAddr> gateway4 = QueryDefaultGateway(NET_IPV4);
+ if (!gateway4) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: Could not determine IPv4 default gateway\n");
} else {
- LogPrintf("natpmp: readnatpmpresponseorretry() for public address failed with %d error.\n", r_read);
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: gateway [IPv4]: %s\n", gateway4->ToStringAddr());
+
+ // Open a port mapping on whatever local address we have toward the gateway.
+ struct in_addr inaddr_any;
+ inaddr_any.s_addr = htonl(INADDR_ANY);
+ auto res = PCPRequestPortMap(pcp_nonce, *gateway4, CNetAddr(inaddr_any), private_port, requested_lifetime);
+ MappingError* pcp_err = std::get_if<MappingError>(&res);
+ if (pcp_err && *pcp_err == MappingError::UNSUPP_VERSION) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: Got unsupported PCP version response, falling back to NAT-PMP\n");
+ res = NATPMPRequestPortMap(*gateway4, private_port, requested_lifetime);
+ }
+ handle_mapping(res);
}
- } else {
- LogPrintf("natpmp: sendpublicaddressrequest() failed with %d error.\n", r_send);
- }
- return false;
-}
+ // IPv6
+ std::optional<CNetAddr> gateway6 = QueryDefaultGateway(NET_IPV6);
+ if (!gateway6) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: Could not determine IPv6 default gateway\n");
+ } else {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: gateway [IPv6]: %s\n", gateway6->ToStringAddr());
-static bool NatpmpMapping(natpmp_t* natpmp, const struct in_addr& external_ipv4_addr, uint16_t private_port, bool& external_ip_discovered)
-{
- const uint16_t suggested_external_port = g_mapport_external_port ? g_mapport_external_port : private_port;
- const int r_send = sendnewportmappingrequest(natpmp, NATPMP_PROTOCOL_TCP, private_port, suggested_external_port, 3600 /*seconds*/);
- if (r_send == 12 /* OK */) {
- int r_read;
- natpmpresp_t response;
- do {
- r_read = readnatpmpresponseorretry(natpmp, &response);
- } while (r_read == NATPMP_TRYAGAIN);
-
- if (r_read == 0) {
- auto pm = response.pnu.newportmapping;
- if (private_port == pm.privateport && pm.lifetime > 0) {
- g_mapport_external_port = pm.mappedpublicport;
- const CService external{external_ipv4_addr, pm.mappedpublicport};
- if (!external_ip_discovered && fDiscover) {
- AddLocal(external, LOCAL_MAPPED);
- external_ip_discovered = true;
- }
- LogPrintf("natpmp: Port mapping successful. External address = %s\n", external.ToStringAddrPort());
- return true;
- } else {
- LogPrintf("natpmp: Port mapping failed.\n");
+ // Try to open pinholes for all routable local IPv6 addresses.
+ for (const auto &addr: GetLocalAddresses()) {
+ if (!addr.IsRoutable() || !addr.IsIPv6()) continue;
+ auto res = PCPRequestPortMap(pcp_nonce, *gateway6, addr, private_port, requested_lifetime);
+ handle_mapping(res);
}
- } else if (r_read == NATPMP_ERR_NOGATEWAYSUPPORT) {
- LogPrintf("natpmp: The gateway does not support NAT-PMP.\n");
- } else {
- LogPrintf("natpmp: readnatpmpresponseorretry() for port mapping failed with %d error.\n", r_read);
}
- } else {
- LogPrintf("natpmp: sendnewportmappingrequest() failed with %d error.\n", r_send);
- }
-
- return false;
-}
-static bool ProcessNatpmp()
-{
- bool ret = false;
- natpmp_t natpmp;
- struct in_addr external_ipv4_addr;
- if (NatpmpInit(&natpmp) && NatpmpDiscover(&natpmp, external_ipv4_addr)) {
- bool external_ip_discovered = false;
- const uint16_t private_port = GetListenPort();
- do {
- ret = NatpmpMapping(&natpmp, external_ipv4_addr, private_port, external_ip_discovered);
- } while (ret && g_mapport_interrupt.sleep_for(PORT_MAPPING_REANNOUNCE_PERIOD));
- g_mapport_interrupt.reset();
+ // Log message if we got NO_RESOURCES.
+ if (no_resources) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "portmap: At least one mapping failed because of a NO_RESOURCES error. This usually indicates that the port is already used on the router. If this is the only instance of bitcoin running on the network, this will resolve itself automatically. Otherwise, you might want to choose a different P2P port to prevent this conflict.\n");
+ }
- const int r_send = sendnewportmappingrequest(&natpmp, NATPMP_PROTOCOL_TCP, private_port, g_mapport_external_port, /* remove a port mapping */ 0);
- g_mapport_external_port = 0;
- if (r_send == 12 /* OK */) {
- LogPrintf("natpmp: Port mapping removed successfully.\n");
- } else {
- LogPrintf("natpmp: sendnewportmappingrequest(0) failed with %d error.\n", r_send);
+ // Sanity-check returned lifetime.
+ if (actual_lifetime < 30) {
+ LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "portmap: Got impossibly short mapping lifetime of %d seconds\n", actual_lifetime);
+ return false;
}
- }
+ // RFC6887 11.2.1 recommends that clients send their first renewal packet at a time chosen with uniform random
+ // distribution in the range 1/2 to 5/8 of expiration time.
+ std::chrono::seconds sleep_time_min(actual_lifetime / 2);
+ std::chrono::seconds sleep_time_max(actual_lifetime * 5 / 8);
+ sleep_time = sleep_time_min + FastRandomContext().randrange<std::chrono::milliseconds>(sleep_time_max - sleep_time_min);
+ } while (ret && g_mapport_interrupt.sleep_for(sleep_time));
+
+ // We don't delete the mappings when the thread is interrupted because this would add additional complexity, so
+ // we rather just choose a fairly short expiry time.
- closenatpmp(&natpmp);
return ret;
}
-#endif // USE_NATPMP
#ifdef USE_UPNP
static bool ProcessUpnp()
@@ -223,23 +212,21 @@ static void ThreadMapPort()
do {
ok = false;
-#ifdef USE_UPNP
// High priority protocol.
- if (g_mapport_enabled_protos & MapPortProtoFlag::UPNP) {
- g_mapport_current_proto = MapPortProtoFlag::UPNP;
- ok = ProcessUpnp();
+ if (g_mapport_enabled_protos & MapPortProtoFlag::PCP) {
+ g_mapport_current_proto = MapPortProtoFlag::PCP;
+ ok = ProcessPCP();
if (ok) continue;
}
-#endif // USE_UPNP
-#ifdef USE_NATPMP
+#ifdef USE_UPNP
// Low priority protocol.
- if (g_mapport_enabled_protos & MapPortProtoFlag::NAT_PMP) {
- g_mapport_current_proto = MapPortProtoFlag::NAT_PMP;
- ok = ProcessNatpmp();
+ if (g_mapport_enabled_protos & MapPortProtoFlag::UPNP) {
+ g_mapport_current_proto = MapPortProtoFlag::UPNP;
+ ok = ProcessUpnp();
if (ok) continue;
}
-#endif // USE_NATPMP
+#endif // USE_UPNP
g_mapport_current_proto = MapPortProtoFlag::NONE;
if (g_mapport_enabled_protos == MapPortProtoFlag::NONE) {
@@ -281,7 +268,7 @@ static void DispatchMapPort()
assert(g_mapport_thread.joinable());
assert(!g_mapport_interrupt);
- // Interrupt a protocol-specific loop in the ThreadUpnp() or in the ThreadNatpmp()
+ // Interrupt a protocol-specific loop in the ThreadUpnp() or in the ThreadPCP()
// to force trying the next protocol in the ThreadMapPort() loop.
g_mapport_interrupt();
}
@@ -295,10 +282,10 @@ static void MapPortProtoSetEnabled(MapPortProtoFlag proto, bool enabled)
}
}
-void StartMapPort(bool use_upnp, bool use_natpmp)
+void StartMapPort(bool use_upnp, bool use_pcp)
{
MapPortProtoSetEnabled(MapPortProtoFlag::UPNP, use_upnp);
- MapPortProtoSetEnabled(MapPortProtoFlag::NAT_PMP, use_natpmp);
+ MapPortProtoSetEnabled(MapPortProtoFlag::PCP, use_pcp);
DispatchMapPort();
}
@@ -317,18 +304,3 @@ void StopMapPort()
g_mapport_interrupt.reset();
}
}
-
-#else // #if defined(USE_NATPMP) || defined(USE_UPNP)
-void StartMapPort(bool use_upnp, bool use_natpmp)
-{
- // Intentionally left blank.
-}
-void InterruptMapPort()
-{
- // Intentionally left blank.
-}
-void StopMapPort()
-{
- // Intentionally left blank.
-}
-#endif // #if defined(USE_NATPMP) || defined(USE_UPNP)
diff --git a/src/mapport.h b/src/mapport.h
index 6f55c46f6c..51202687f2 100644
--- a/src/mapport.h
+++ b/src/mapport.h
@@ -12,10 +12,10 @@ static constexpr bool DEFAULT_NATPMP = false;
enum MapPortProtoFlag : unsigned int {
NONE = 0x00,
UPNP = 0x01,
- NAT_PMP = 0x02,
+ PCP = 0x02, // PCP with NAT-PMP fallback.
};
-void StartMapPort(bool use_upnp, bool use_natpmp);
+void StartMapPort(bool use_upnp, bool use_pcp);
void InterruptMapPort();
void StopMapPort();
diff --git a/src/net.cpp b/src/net.cpp
index d4a7373725..477240cdf2 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <net.h>
@@ -12,6 +12,7 @@
#include <banman.h>
#include <clientversion.h>
#include <common/args.h>
+#include <common/netif.h>
#include <compat/compat.h>
#include <consensus/consensus.h>
#include <crypto/sha256.h>
@@ -64,6 +65,9 @@ static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
/** Number of DNS seeds to query when the number of connections is low. */
static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3;
+/** Minimum number of outbound connections under which we will keep fetching our address seeds. */
+static constexpr int SEED_OUTBOUND_CONNECTION_THRESHOLD = 2;
+
/** How long to delay before querying DNS seeds
*
* If we have more than THRESHOLD entries in addrman, then it's likely
@@ -1788,7 +1792,8 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
// The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
// detected, so use it whenever we signal NODE_P2P_V2.
- const bool use_v2transport(nLocalServices & NODE_P2P_V2);
+ ServiceFlags local_services = GetLocalServices();
+ const bool use_v2transport(local_services & NODE_P2P_V2);
CNode* pnode = new CNode(id,
std::move(sock),
@@ -1806,7 +1811,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
.use_v2transport = use_v2transport,
});
pnode->AddRef();
- m_msgproc->InitializeNode(*pnode, nLocalServices);
+ m_msgproc->InitializeNode(*pnode, local_services);
{
LOCK(m_nodes_mutex);
m_nodes.push_back(pnode);
@@ -2179,7 +2184,6 @@ void CConnman::WakeMessageHandler()
void CConnman::ThreadDNSAddressSeed()
{
- constexpr int TARGET_OUTBOUND_CONNECTIONS = 2;
int outbound_connection_count = 0;
if (gArgs.IsArgSet("-seednode")) {
@@ -2198,7 +2202,7 @@ void CConnman::ThreadDNSAddressSeed()
}
outbound_connection_count = GetFullOutboundConnCount();
- if (outbound_connection_count >= TARGET_OUTBOUND_CONNECTIONS) {
+ if (outbound_connection_count >= SEED_OUTBOUND_CONNECTION_THRESHOLD) {
LogPrintf("P2P peers available. Finished fetching data from seed nodes.\n");
break;
}
@@ -2221,7 +2225,7 @@ void CConnman::ThreadDNSAddressSeed()
}
// Proceed with dnsseeds if seednodes hasn't reached the target or if forcednsseed is set
- if (outbound_connection_count < TARGET_OUTBOUND_CONNECTIONS || seeds_right_now) {
+ if (outbound_connection_count < SEED_OUTBOUND_CONNECTION_THRESHOLD || seeds_right_now) {
// goal: only query DNS seed if address need is acute
// * If we have a reasonable number of peers in addrman, spend
// some time trying them first. This improves user privacy by
@@ -2252,7 +2256,7 @@ void CConnman::ThreadDNSAddressSeed()
if (!interruptNet.sleep_for(w)) return;
to_wait -= w;
- if (GetFullOutboundConnCount() >= TARGET_OUTBOUND_CONNECTIONS) {
+ if (GetFullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) {
if (found > 0) {
LogPrintf("%d addresses found from DNS seeds\n", found);
LogPrintf("P2P peers available. Finished DNS seeding.\n");
@@ -2447,7 +2451,7 @@ bool CConnman::MaybePickPreferredNetwork(std::optional<Network>& network)
return false;
}
-void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
+void CConnman::ThreadOpenConnections(const std::vector<std::string> connect, Span<const std::string> seed_nodes)
{
AssertLockNotHeld(m_unused_i2p_sessions_mutex);
AssertLockNotHeld(m_reconnections_mutex);
@@ -2487,12 +2491,28 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
const bool use_seednodes{gArgs.IsArgSet("-seednode")};
+ auto seed_node_timer = NodeClock::now();
+ bool add_addr_fetch{addrman.Size() == 0 && !seed_nodes.empty()};
+ constexpr std::chrono::seconds ADD_NEXT_SEEDNODE = 10s;
+
if (!add_fixed_seeds) {
LogPrintf("Fixed seeds are disabled\n");
}
while (!interruptNet)
{
+ if (add_addr_fetch) {
+ add_addr_fetch = false;
+ const auto& seed{SpanPopBack(seed_nodes)};
+ AddAddrFetch(seed);
+
+ if (addrman.Size() == 0) {
+ LogInfo("Empty addrman, adding seednode (%s) to addrfetch\n", seed);
+ } else {
+ LogInfo("Couldn't connect to peers from addrman after %d seconds. Adding seednode (%s) to addrfetch\n", ADD_NEXT_SEEDNODE.count(), seed);
+ }
+ }
+
ProcessAddrFetch();
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
@@ -2593,6 +2613,13 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
}
+ if (!seed_nodes.empty() && nOutboundFullRelay < SEED_OUTBOUND_CONNECTION_THRESHOLD) {
+ if (NodeClock::now() > seed_node_timer + ADD_NEXT_SEEDNODE) {
+ seed_node_timer = NodeClock::now();
+ add_addr_fetch = true;
+ }
+ }
+
ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
auto now = GetTime<std::chrono::microseconds>();
bool anchor = false;
@@ -2667,6 +2694,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
const auto current_time{NodeClock::now()};
int nTries = 0;
+ const auto reachable_nets{g_reachable_nets.All()};
+
while (!interruptNet)
{
if (anchor && !m_anchors.empty()) {
@@ -2698,7 +2727,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
if (!addr.IsValid()) {
// No tried table collisions. Select a new table address
// for our feeler.
- std::tie(addr, addr_last_try) = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets);
} else if (AlreadyConnectedToAddress(addr)) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
@@ -2707,14 +2736,16 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// a currently-connected peer.
addrman.Good(addr);
// Select a new table address for our feeler instead.
- std::tie(addr, addr_last_try) = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true, reachable_nets);
}
} else {
// Not a feeler
// If preferred_net has a value set, pick an extra outbound
// peer from that network. The eviction logic in net_processing
// ensures that a peer from another network will be evicted.
- std::tie(addr, addr_last_try) = addrman.Select(false, preferred_net);
+ std::tie(addr, addr_last_try) = preferred_net.has_value()
+ ? addrman.Select(false, {*preferred_net})
+ : addrman.Select(false, reachable_nets);
}
// Require outbound IPv4/IPv6 connections, other than feelers, to be to distinct network groups
@@ -2919,7 +2950,7 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai
return;
pnode->grantOutbound = std::move(grant_outbound);
- m_msgproc->InitializeNode(*pnode, nLocalServices);
+ m_msgproc->InitializeNode(*pnode, m_local_services);
{
LOCK(m_nodes_mutex);
m_nodes.push_back(pnode);
@@ -3088,46 +3119,10 @@ void Discover()
if (!fDiscover)
return;
-#ifdef WIN32
- // Get local host IP
- char pszHostName[256] = "";
- if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR)
- {
- const std::vector<CNetAddr> addresses{LookupHost(pszHostName, 0, true)};
- for (const CNetAddr& addr : addresses)
- {
- if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToStringAddr());
- }
- }
-#elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
- // Get local host ip
- struct ifaddrs* myaddrs;
- if (getifaddrs(&myaddrs) == 0)
- {
- for (struct ifaddrs* ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next)
- {
- if (ifa->ifa_addr == nullptr) continue;
- if ((ifa->ifa_flags & IFF_UP) == 0) continue;
- if ((ifa->ifa_flags & IFF_LOOPBACK) != 0) continue;
- if (ifa->ifa_addr->sa_family == AF_INET)
- {
- struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
- CNetAddr addr(s4->sin_addr);
- if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
- }
- else if (ifa->ifa_addr->sa_family == AF_INET6)
- {
- struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
- CNetAddr addr(s6->sin6_addr);
- if (AddLocal(addr, LOCAL_IF))
- LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToStringAddr());
- }
- }
- freeifaddrs(myaddrs);
+ for (const CNetAddr &addr: GetLocalAddresses()) {
+ if (AddLocal(addr, LOCAL_IF))
+ LogPrintf("%s: %s\n", __func__, addr.ToStringAddr());
}
-#endif
}
void CConnman::SetNetworkActive(bool active)
@@ -3249,8 +3244,10 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
i2p_sam, &interruptNet);
}
- for (const auto& strDest : connOptions.vSeedNodes) {
- AddAddrFetch(strDest);
+ // Randomize the order in which we may query seednode to potentially prevent connecting to the same one every restart (and signal that we have restarted)
+ std::vector<std::string> seed_nodes = connOptions.vSeedNodes;
+ if (!seed_nodes.empty()) {
+ std::shuffle(seed_nodes.begin(), seed_nodes.end(), FastRandomContext{});
}
if (m_use_addrman_outgoing) {
@@ -3311,7 +3308,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
if (connOptions.m_use_addrman_outgoing || !connOptions.m_specified_outgoing.empty()) {
threadOpenConnections = std::thread(
&util::TraceThread, "opencon",
- [this, connect = connOptions.m_specified_outgoing] { ThreadOpenConnections(connect); });
+ [this, connect = connOptions.m_specified_outgoing, seed_nodes = std::move(seed_nodes)] { ThreadOpenConnections(connect, seed_nodes); });
}
// Process messages
@@ -3714,7 +3711,7 @@ uint64_t CConnman::GetTotalBytesSent() const
ServiceFlags CConnman::GetLocalServices() const
{
- return nLocalServices;
+ return m_local_services;
}
static std::unique_ptr<Transport> MakeTransport(NodeId id, bool use_v2transport, bool inbound) noexcept
diff --git a/src/net.h b/src/net.h
index beec58c389..6e8b91b5f9 100644
--- a/src/net.h
+++ b/src/net.h
@@ -148,7 +148,7 @@ enum
LOCAL_NONE, // unknown
LOCAL_IF, // address a local interface listens on
LOCAL_BIND, // address explicit bound to
- LOCAL_MAPPED, // address reported by UPnP or NAT-PMP
+ LOCAL_MAPPED, // address reported by UPnP or PCP
LOCAL_MANUAL, // address explicitly specified (-externalip=)
LOCAL_MAX
@@ -1035,7 +1035,7 @@ public:
struct Options
{
- ServiceFlags nLocalServices = NODE_NONE;
+ ServiceFlags m_local_services = NODE_NONE;
int m_max_automatic_connections = 0;
CClientUIInterface* uiInterface = nullptr;
NetEventsInterface* m_msgproc = nullptr;
@@ -1065,7 +1065,7 @@ public:
{
AssertLockNotHeld(m_total_bytes_sent_mutex);
- nLocalServices = connOptions.nLocalServices;
+ m_local_services = connOptions.m_local_services;
m_max_automatic_connections = connOptions.m_max_automatic_connections;
m_max_outbound_full_relay = std::min(MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, m_max_automatic_connections);
m_max_outbound_block_relay = std::min(MAX_BLOCK_RELAY_ONLY_CONNECTIONS, m_max_automatic_connections - m_max_outbound_full_relay);
@@ -1221,6 +1221,11 @@ public:
//! that peer during `net_processing.cpp:PushNodeVersion()`.
ServiceFlags GetLocalServices() const;
+ //! Updates the local services that this node advertises to other peers
+ //! during connection handshake.
+ void AddLocalServices(ServiceFlags services) { m_local_services = ServiceFlags(m_local_services | services); };
+ void RemoveLocalServices(ServiceFlags services) { m_local_services = ServiceFlags(m_local_services & ~services); }
+
uint64_t GetMaxOutboundTarget() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
std::chrono::seconds GetMaxOutboundTimeframe() const;
@@ -1273,7 +1278,7 @@ private:
void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex);
void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex);
void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_unused_i2p_sessions_mutex);
- void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex);
+ void ThreadOpenConnections(std::vector<std::string> connect, Span<const std::string> seed_nodes) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex);
void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
void ThreadI2PAcceptIncoming();
void AcceptConnection(const ListenSocket& hListenSocket);
@@ -1460,11 +1465,12 @@ private:
* This data is replicated in each Peer instance we create.
*
* This data is not marked const, but after being set it should not
- * change.
+ * change. Unless AssumeUTXO is started, in which case, the peer
+ * will be limited until the background chain sync finishes.
*
* \sa Peer::our_services
*/
- ServiceFlags nLocalServices;
+ std::atomic<ServiceFlags> m_local_services;
std::unique_ptr<CSemaphore> semOutbound;
std::unique_ptr<CSemaphore> semAddnode;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index fe17910741..be16884011 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -113,9 +113,6 @@ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
/** Maximum timeout for stalling block download. */
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
-/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
- * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
-static const unsigned int MAX_HEADERS_RESULTS = 2000;
/** Maximum depth of blocks we're willing to serve as compact blocks to peers
* when requested. For older blocks, a regular BLOCK response will be sent. */
static const int MAX_CMPCTBLOCK_DEPTH = 5;
@@ -224,6 +221,9 @@ struct Peer {
/** Services this peer offered to us. */
std::atomic<ServiceFlags> m_their_services{NODE_NONE};
+ //! Whether this peer is an inbound connection
+ const bool m_is_inbound;
+
/** Protects misbehavior data members */
Mutex m_misbehavior_mutex;
/** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
@@ -394,9 +394,10 @@ struct Peer {
* timestamp the peer sent in the version message. */
std::atomic<std::chrono::seconds> m_time_offset{0s};
- explicit Peer(NodeId id, ServiceFlags our_services)
+ explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound)
: m_id{id}
, m_our_services{our_services}
+ , m_is_inbound{is_inbound}
{}
private:
@@ -476,11 +477,6 @@ struct CNodeState {
//! Time of last new block announcement
int64_t m_last_block_announcement{0};
-
- //! Whether this peer is an inbound connection
- const bool m_is_inbound;
-
- CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
};
class PeerManagerImpl final : public PeerManager
@@ -519,6 +515,7 @@ public:
std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
@@ -1015,7 +1012,7 @@ private:
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Have we requested this block from an outbound peer */
- bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
/** Remove this block from our tracked requested blocks. Called if:
* - the block has been received from a peer
@@ -1099,7 +1096,7 @@ private:
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
* removing the first element if necessary.
*/
- void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
@@ -1302,8 +1299,8 @@ bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
{
for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
auto [nodeid, block_it] = range.first->second;
- CNodeState& nodestate = *Assert(State(nodeid));
- if (!nodestate.m_is_inbound) return true;
+ PeerRef peer{GetPeerRef(nodeid)};
+ if (peer && !peer->m_is_inbound) return true;
}
return false;
@@ -1392,6 +1389,7 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
if (m_opts.ignore_incoming_txs) return;
CNodeState* nodestate = State(nodeid);
+ PeerRef peer{GetPeerRef(nodeid)};
if (!nodestate || !nodestate->m_provides_cmpctblocks) {
// Don't request compact blocks if the peer has not signalled support
return;
@@ -1404,15 +1402,15 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
return;
}
- CNodeState *state = State(*it);
- if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
+ PeerRef peer_ref{GetPeerRef(*it)};
+ if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
}
- if (nodestate->m_is_inbound) {
+ if (peer && peer->m_is_inbound) {
// If we're adding an inbound HB peer, make sure we're not removing
// our last outbound HB peer in the process.
if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
- CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
- if (remove_node != nullptr && !remove_node->m_is_inbound) {
+ PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
+ if (remove_peer && !remove_peer->m_is_inbound) {
// Put the HB outbound peer in the second slot, so that it
// doesn't get removed.
std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
@@ -1720,7 +1718,7 @@ void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_service
NodeId nodeid = node.GetId();
{
LOCK(cs_main); // For m_node_states
- m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
+ m_node_states.try_emplace(m_node_states.end(), nodeid);
}
{
LOCK(m_tx_download_mutex);
@@ -1731,7 +1729,7 @@ void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_service
our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
}
- PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
+ PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn());
{
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
@@ -1920,6 +1918,12 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
return true;
}
+std::vector<TxOrphanage::OrphanTxBase> PeerManagerImpl::GetOrphanTransactions()
+{
+ LOCK(m_tx_download_mutex);
+ return m_orphanage.GetOrphanTransactions();
+}
+
PeerManagerInfo PeerManagerImpl::GetInfo() const
{
return PeerManagerInfo{
@@ -1968,15 +1972,9 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
break;
case BlockValidationResult::BLOCK_CACHED_INVALID:
{
- LOCK(cs_main);
- CNodeState *node_state = State(nodeid);
- if (node_state == nullptr) {
- break;
- }
-
// Discourage outbound (but not inbound) peers if on an invalid chain.
// Exempt HB compact block peers. Manual connections are always protected from discouragement.
- if (!via_compact_block && !node_state->m_is_inbound) {
+ if (peer && !via_compact_block && !peer->m_is_inbound) {
if (peer) Misbehaving(*peer, message);
return;
}
@@ -2786,7 +2784,7 @@ bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>&
bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
{
if (peer.m_headers_sync) {
- auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
+ auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
// If it is a valid continuation, we should treat the existing getheaders request as responded to.
if (result.success) peer.m_last_getheaders_timestamp = {};
if (result.request_more) {
@@ -2880,7 +2878,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo
// Only try to sync with this peer if their headers message was full;
// otherwise they don't have more headers after this so no point in
// trying to sync their too-little-work chain.
- if (headers.size() == MAX_HEADERS_RESULTS) {
+ if (headers.size() == m_opts.max_headers_result) {
// Note: we could advance to the last header in this set that is
// known to us, rather than starting at the first header (which we
// may already have); however this is unlikely to matter much since
@@ -3192,7 +3190,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
assert(pindexLast);
// Consider fetching more headers if we are not using our headers-sync mechanism.
- if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
+ if (nCount == m_opts.max_headers_result && !have_headers_sync) {
// Headers message had its maximum size; the peer may have more headers.
if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
@@ -3200,7 +3198,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
}
}
- UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
+ UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
// Consider immediately downloading blocks.
HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
@@ -4518,7 +4516,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
std::vector<CBlock> vHeaders;
- int nLimit = MAX_HEADERS_RESULTS;
+ int nLimit = m_opts.max_headers_result;
LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
{
@@ -4753,7 +4751,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
}
return;
- } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) {
+ } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) {
// If we get a low-work header in a compact block, we can ignore it.
LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
return;
@@ -4766,7 +4764,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
const CBlockIndex *pindex = nullptr;
BlockValidationState state;
- if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) {
+ if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
return;
@@ -5002,7 +5000,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
- if (nCount > MAX_HEADERS_RESULTS) {
+ if (nCount > m_opts.max_headers_result) {
Misbehaving(*peer, strprintf("headers message size = %u", nCount));
return;
}
@@ -5070,7 +5068,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
// Check claimed work on this block against our anti-dos thresholds.
- if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) {
+ if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) {
min_pow_checked = true;
}
}
diff --git a/src/net_processing.h b/src/net_processing.h
index a413db98e8..0d2dc59c5a 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -7,6 +7,7 @@
#define BITCOIN_NET_PROCESSING_H
#include <net.h>
+#include <txorphanage.h>
#include <validationinterface.h>
#include <chrono>
@@ -31,6 +32,9 @@ static const bool DEFAULT_PEERBLOOMFILTERS = false;
static const bool DEFAULT_PEERBLOCKFILTERS = false;
/** Maximum number of outstanding CMPCTBLOCK requests for the same block. */
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK = 3;
+/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
+ * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
+static const unsigned int MAX_HEADERS_RESULTS = 2000;
struct CNodeStateStats {
int nSyncHeight = -1;
@@ -71,6 +75,9 @@ public:
//! Whether or not the internal RNG behaves deterministically (this is
//! a test-only option).
bool deterministic_rng{false};
+ //! Number of headers sent in one getheaders message result (this is
+ //! a test-only option).
+ uint32_t max_headers_result{MAX_HEADERS_RESULTS};
};
static std::unique_ptr<PeerManager> make(CConnman& connman, AddrMan& addrman,
@@ -93,6 +100,8 @@ public:
/** Get statistics from node state */
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const = 0;
+ virtual std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() = 0;
+
/** Get peer manager info. */
virtual PeerManagerInfo GetInfo() const = 0;
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 1a96443d4a..eaca5a16c1 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <netbase.h>
@@ -230,7 +230,7 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault, DNSLookupF
bool IsUnixSocketPath(const std::string& name)
{
#ifdef HAVE_SOCKADDR_UN
- if (name.find(ADDR_PREFIX_UNIX) != 0) return false;
+ if (!name.starts_with(ADDR_PREFIX_UNIX)) return false;
// Split off "unix:" prefix
std::string str{name.substr(ADDR_PREFIX_UNIX.length())};
@@ -557,7 +557,8 @@ std::unique_ptr<Sock> CreateSockOS(int domain, int type, int protocol)
std::function<std::unique_ptr<Sock>(int, int, int)> CreateSock = CreateSockOS;
template<typename... Args>
-static void LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args) {
+static void LogConnectFailure(bool manual_connection, util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
+{
std::string error_message = tfm::format(fmt, args...);
if (manual_connection) {
LogPrintf("%s\n", error_message);
diff --git a/src/netbase.h b/src/netbase.h
index 8ef6c28996..bf4d7ececc 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -134,6 +134,13 @@ public:
return Contains(addr.GetNetwork());
}
+ [[nodiscard]] std::unordered_set<Network> All() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
+ {
+ AssertLockNotHeld(m_mutex);
+ LOCK(m_mutex);
+ return m_reachable;
+ }
+
private:
mutable Mutex m_mutex;
diff --git a/src/node/abort.cpp b/src/node/abort.cpp
index 8a17c41fd2..c15bf047c8 100644
--- a/src/node/abort.cpp
+++ b/src/node/abort.cpp
@@ -15,12 +15,12 @@
namespace node {
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings)
+void AbortNode(const std::function<bool()>& shutdown_request, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings)
{
if (warnings) warnings->Set(Warning::FATAL_INTERNAL_ERROR, message);
InitError(_("A fatal internal error occurred, see debug.log for details: ") + message);
exit_status.store(EXIT_FAILURE);
- if (shutdown && !(*shutdown)()) {
+ if (shutdown_request && !shutdown_request()) {
LogError("Failed to send shutdown signal\n");
};
}
diff --git a/src/node/abort.h b/src/node/abort.h
index c881af4634..c8514628bc 100644
--- a/src/node/abort.h
+++ b/src/node/abort.h
@@ -6,16 +6,13 @@
#define BITCOIN_NODE_ABORT_H
#include <atomic>
+#include <functional>
struct bilingual_str;
-namespace util {
-class SignalInterrupt;
-} // namespace util
-
namespace node {
class Warnings;
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings);
+void AbortNode(const std::function<bool()>& shutdown_request, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings);
} // namespace node
#endif // BITCOIN_NODE_ABORT_H
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 702b2c9ade..07878a5602 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -683,11 +683,7 @@ bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos
fileout << GetParams().MessageStart() << nSize;
// Write undo data
- long fileOutPos = ftell(fileout.Get());
- if (fileOutPos < 0) {
- LogError("%s: ftell failed\n", __func__);
- return false;
- }
+ long fileOutPos = fileout.tell();
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
@@ -981,11 +977,7 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
fileout << GetParams().MessageStart() << nSize;
// Write block
- long fileOutPos = ftell(fileout.Get());
- if (fileOutPos < 0) {
- LogError("%s: ftell failed\n", __func__);
- return false;
- }
+ long fileOutPos = fileout.tell();
pos.nPos = (unsigned int)fileOutPos;
fileout << TX_WITH_WITNESS(block);
@@ -1210,7 +1202,7 @@ public:
}
};
-void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
+void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
{
ImportingNow imp{chainman.m_blockman.m_importing};
@@ -1245,7 +1237,7 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile
}
// -loadblock=
- for (const fs::path& path : vImportFiles) {
+ for (const fs::path& path : import_paths) {
AutoFile file{fsbridge::fopen(path, "rb")};
if (!file.IsNull()) {
LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 821bbf5109..03bc5f4600 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -29,6 +29,7 @@
#include <memory>
#include <optional>
#include <set>
+#include <span>
#include <string>
#include <unordered_map>
#include <utility>
@@ -429,7 +430,7 @@ public:
void CleanupBlockRevFiles() const;
};
-void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles);
+void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths);
} // namespace node
#endif // BITCOIN_NODE_BLOCKSTORAGE_H
diff --git a/src/node/caches.cpp b/src/node/caches.cpp
index 7403f7ddea..dc4d98f592 100644
--- a/src/node/caches.cpp
+++ b/src/node/caches.cpp
@@ -13,7 +13,6 @@ CacheSizes CalculateCacheSizes(const ArgsManager& args, size_t n_indexes)
{
int64_t nTotalCache = (args.GetIntArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
- nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greater than nMaxDbcache
CacheSizes sizes;
sizes.block_tree_db = std::min(nTotalCache / 8, nMaxBlockDBCache << 20);
nTotalCache -= sizes.block_tree_db;
diff --git a/src/node/context.h b/src/node/context.h
index a664fad80b..debc122120 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -9,6 +9,7 @@
#include <cstdlib>
#include <functional>
#include <memory>
+#include <thread>
#include <vector>
class ArgsManager;
@@ -58,8 +59,10 @@ struct NodeContext {
std::unique_ptr<ECC_Context> ecc_context;
//! Init interface for initializing current process and connecting to other processes.
interfaces::Init* init{nullptr};
+ //! Function to request a shutdown.
+ std::function<bool()> shutdown_request;
//! Interrupt object used to track whether node shutdown was requested.
- util::SignalInterrupt* shutdown{nullptr};
+ util::SignalInterrupt* shutdown_signal{nullptr};
std::unique_ptr<AddrMan> addrman;
std::unique_ptr<CConnman> connman;
std::unique_ptr<CTxMemPool> mempool;
@@ -86,6 +89,7 @@ struct NodeContext {
std::atomic<int> exit_status{EXIT_SUCCESS};
//! Manages all the node warnings
std::unique_ptr<node::Warnings> warnings;
+ std::thread background_init_thread;
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the NodeContext struct doesn't need to #include class
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 54b986c926..0010c104a8 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -8,6 +8,7 @@
#include <chain.h>
#include <chainparams.h>
#include <common/args.h>
+#include <consensus/merkle.h>
#include <consensus/validation.h>
#include <deploymentstatus.h>
#include <external_signer.h>
@@ -17,6 +18,7 @@
#include <interfaces/handler.h>
#include <interfaces/mining.h>
#include <interfaces/node.h>
+#include <interfaces/types.h>
#include <interfaces/wallet.h>
#include <kernel/chain.h>
#include <kernel/context.h>
@@ -33,6 +35,7 @@
#include <node/interface_ui.h>
#include <node/mini_miner.h>
#include <node/miner.h>
+#include <node/kernel_notifications.h>
#include <node/transaction.h>
#include <node/types.h>
#include <node/warnings.h>
@@ -58,7 +61,7 @@
#include <validation.h>
#include <validationinterface.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <any>
#include <memory>
@@ -67,6 +70,8 @@
#include <boost/signals2/signal.hpp>
+using interfaces::BlockRef;
+using interfaces::BlockTemplate;
using interfaces::BlockTip;
using interfaces::Chain;
using interfaces::FoundBlock;
@@ -100,7 +105,7 @@ public:
void initParameterInteraction() override { InitParameterInteraction(args()); }
bilingual_str getWarnings() override { return Join(Assert(m_context->warnings)->GetMessages(), Untranslated("<hr />")); }
int getExitStatus() override { return Assert(m_context)->exit_status.load(); }
- uint32_t getLogCategories() override { return LogInstance().GetCategoryMask(); }
+ BCLog::CategoryMask getLogCategories() override { return LogInstance().GetCategoryMask(); }
bool baseInitialize() override
{
if (!AppInitBasicSetup(args(), Assert(context())->exit_status)) return false;
@@ -130,9 +135,11 @@ public:
}
void startShutdown() override
{
- if (!(*Assert(Assert(m_context)->shutdown))()) {
+ NodeContext& ctx{*Assert(m_context)};
+ if (!(Assert(ctx.shutdown_request))()) {
LogError("Failed to send shutdown signal\n");
}
+
// Stop RPC for clean shutdown if any of waitfor* commands is executed.
if (args().GetBoolArg("-server", false)) {
InterruptRPC();
@@ -180,7 +187,7 @@ public:
});
args().WriteSettingsFile();
}
- void mapPort(bool use_upnp, bool use_natpmp) override { StartMapPort(use_upnp, use_natpmp); }
+ void mapPort(bool use_upnp, bool use_pcp) override { StartMapPort(use_upnp, use_pcp); }
bool getProxy(Network net, Proxy& proxy_info) override { return GetProxy(net, proxy_info); }
size_t getNodeCount(ConnectionDirection flags) override
{
@@ -819,29 +826,29 @@ public:
{
std::optional<interfaces::SettingsAction> action;
args().LockSettings([&](common::Settings& settings) {
- auto* ptr_value = common::FindKey(settings.rw_settings, name);
- // Create value if it doesn't exist
- auto& value = ptr_value ? *ptr_value : settings.rw_settings[name];
- action = update_settings_func(value);
+ if (auto* value = common::FindKey(settings.rw_settings, name)) {
+ action = update_settings_func(*value);
+ if (value->isNull()) settings.rw_settings.erase(name);
+ } else {
+ UniValue new_value;
+ action = update_settings_func(new_value);
+ if (!new_value.isNull()) settings.rw_settings[name] = std::move(new_value);
+ }
});
if (!action) return false;
// Now dump value to disk if requested
- return *action == interfaces::SettingsAction::SKIP_WRITE || args().WriteSettingsFile();
+ return *action != interfaces::SettingsAction::WRITE || args().WriteSettingsFile();
}
- bool overwriteRwSetting(const std::string& name, common::SettingsValue& value, bool write) override
+ bool overwriteRwSetting(const std::string& name, common::SettingsValue value, interfaces::SettingsAction action) override
{
- if (value.isNull()) return deleteRwSettings(name, write);
return updateRwSetting(name, [&](common::SettingsValue& settings) {
settings = std::move(value);
- return write ? interfaces::SettingsAction::WRITE : interfaces::SettingsAction::SKIP_WRITE;
+ return action;
});
}
- bool deleteRwSettings(const std::string& name, bool write) override
+ bool deleteRwSettings(const std::string& name, interfaces::SettingsAction action) override
{
- args().LockSettings([&](common::Settings& settings) {
- settings.rw_settings.erase(name);
- });
- return !write || args().WriteSettingsFile();
+ return overwriteRwSetting(name, {}, action);
}
void requestMempoolTransactions(Notifications& notifications) override
{
@@ -863,6 +870,82 @@ public:
NodeContext& m_node;
};
+class BlockTemplateImpl : public BlockTemplate
+{
+public:
+ explicit BlockTemplateImpl(std::unique_ptr<CBlockTemplate> block_template, NodeContext& node) : m_block_template(std::move(block_template)), m_node(node)
+ {
+ assert(m_block_template);
+ }
+
+ CBlockHeader getBlockHeader() override
+ {
+ return m_block_template->block;
+ }
+
+ CBlock getBlock() override
+ {
+ return m_block_template->block;
+ }
+
+ std::vector<CAmount> getTxFees() override
+ {
+ return m_block_template->vTxFees;
+ }
+
+ std::vector<int64_t> getTxSigops() override
+ {
+ return m_block_template->vTxSigOpsCost;
+ }
+
+ CTransactionRef getCoinbaseTx() override
+ {
+ return m_block_template->block.vtx[0];
+ }
+
+ std::vector<unsigned char> getCoinbaseCommitment() override
+ {
+ return m_block_template->vchCoinbaseCommitment;
+ }
+
+ int getWitnessCommitmentIndex() override
+ {
+ return GetWitnessCommitmentIndex(m_block_template->block);
+ }
+
+ std::vector<uint256> getCoinbaseMerklePath() override
+ {
+ return BlockMerkleBranch(m_block_template->block);
+ }
+
+ bool submitSolution(uint32_t version, uint32_t timestamp, uint32_t nonce, CMutableTransaction coinbase) override
+ {
+ CBlock block{m_block_template->block};
+
+ auto cb = MakeTransactionRef(std::move(coinbase));
+
+ if (block.vtx.size() == 0) {
+ block.vtx.push_back(cb);
+ } else {
+ block.vtx[0] = cb;
+ }
+
+ block.nVersion = version;
+ block.nTime = timestamp;
+ block.nNonce = nonce;
+
+ block.hashMerkleRoot = BlockMerkleRoot(block);
+
+ auto block_ptr = std::make_shared<const CBlock>(block);
+ return chainman().ProcessNewBlock(block_ptr, /*force_processing=*/true, /*min_pow_checked=*/true, /*new_block=*/nullptr);
+ }
+
+ const std::unique_ptr<CBlockTemplate> m_block_template;
+
+ ChainstateManager& chainman() { return *Assert(m_node.chainman); }
+ NodeContext& m_node;
+};
+
class MinerImpl : public Mining
{
public:
@@ -878,12 +961,26 @@ public:
return chainman().IsInitialBlockDownload();
}
- std::optional<uint256> getTipHash() override
+ std::optional<BlockRef> getTip() override
{
LOCK(::cs_main);
CBlockIndex* tip{chainman().ActiveChain().Tip()};
if (!tip) return {};
- return tip->GetBlockHash();
+ return BlockRef{tip->GetBlockHash(), tip->nHeight};
+ }
+
+ BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override
+ {
+ if (timeout > std::chrono::years{100}) timeout = std::chrono::years{100}; // Upper bound to avoid UB in std::chrono
+ {
+ WAIT_LOCK(notifications().m_tip_block_mutex, lock);
+ notifications().m_tip_block_cv.wait_for(lock, timeout, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) {
+ return (notifications().m_tip_block != current_tip && notifications().m_tip_block != uint256::ZERO) || chainman().m_interrupt;
+ });
+ }
+ // Must release m_tip_block_mutex before locking cs_main, to avoid deadlocks.
+ LOCK(::cs_main);
+ return BlockRef{chainman().ActiveChain().Tip()->GetBlockHash(), chainman().ActiveChain().Tip()->nHeight};
}
bool processNewBlock(const std::shared_ptr<const CBlock>& block, bool* new_block) override
@@ -909,15 +1006,16 @@ public:
return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, tip, /*fCheckPOW=*/false, check_merkle_root);
}
- std::unique_ptr<CBlockTemplate> createNewBlock(const CScript& script_pub_key, const BlockCreateOptions& options) override
+ std::unique_ptr<BlockTemplate> createNewBlock(const CScript& script_pub_key, const BlockCreateOptions& options) override
{
BlockAssembler::Options assemble_options{options};
ApplyArgsManOptions(*Assert(m_node.args), assemble_options);
- return BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(script_pub_key);
+ return std::make_unique<BlockTemplateImpl>(BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(script_pub_key), m_node);
}
NodeContext* context() override { return &m_node; }
ChainstateManager& chainman() { return *Assert(m_node.chainman); }
+ KernelNotifications& notifications() { return *Assert(m_node.notifications); }
NodeContext& m_node;
};
} // namespace
diff --git a/src/node/kernel_notifications.cpp b/src/node/kernel_notifications.cpp
index 9894052a3a..a09803165c 100644
--- a/src/node/kernel_notifications.cpp
+++ b/src/node/kernel_notifications.cpp
@@ -4,7 +4,7 @@
#include <node/kernel_notifications.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chain.h>
#include <common/args.h>
@@ -50,9 +50,15 @@ namespace node {
kernel::InterruptResult KernelNotifications::blockTip(SynchronizationState state, CBlockIndex& index)
{
+ {
+ LOCK(m_tip_block_mutex);
+ m_tip_block = index.GetBlockHash();
+ m_tip_block_cv.notify_all();
+ }
+
uiInterface.NotifyBlockTip(state, &index);
if (m_stop_at_height && index.nHeight >= m_stop_at_height) {
- if (!m_shutdown()) {
+ if (!m_shutdown_request()) {
LogError("Failed to send shutdown signal after reaching stop height\n");
}
return kernel::Interrupted{};
@@ -84,12 +90,12 @@ void KernelNotifications::warningUnset(kernel::Warning id)
void KernelNotifications::flushError(const bilingual_str& message)
{
- AbortNode(&m_shutdown, m_exit_status, message, &m_warnings);
+ AbortNode(m_shutdown_request, m_exit_status, message, &m_warnings);
}
void KernelNotifications::fatalError(const bilingual_str& message)
{
- node::AbortNode(m_shutdown_on_fatal_error ? &m_shutdown : nullptr,
+ node::AbortNode(m_shutdown_on_fatal_error ? m_shutdown_request : nullptr,
m_exit_status, message, &m_warnings);
}
diff --git a/src/node/kernel_notifications.h b/src/node/kernel_notifications.h
index e37f4d4e1e..296b9c426d 100644
--- a/src/node/kernel_notifications.h
+++ b/src/node/kernel_notifications.h
@@ -7,8 +7,13 @@
#include <kernel/notifications_interface.h>
+#include <sync.h>
+#include <threadsafety.h>
+#include <uint256.h>
+
#include <atomic>
#include <cstdint>
+#include <functional>
class ArgsManager;
class CBlockIndex;
@@ -19,10 +24,6 @@ namespace kernel {
enum class Warning;
} // namespace kernel
-namespace util {
-class SignalInterrupt;
-} // namespace util
-
namespace node {
class Warnings;
@@ -31,10 +32,10 @@ static constexpr int DEFAULT_STOPATHEIGHT{0};
class KernelNotifications : public kernel::Notifications
{
public:
- KernelNotifications(util::SignalInterrupt& shutdown, std::atomic<int>& exit_status, node::Warnings& warnings)
- : m_shutdown(shutdown), m_exit_status{exit_status}, m_warnings{warnings} {}
+ KernelNotifications(const std::function<bool()>& shutdown_request, std::atomic<int>& exit_status, node::Warnings& warnings)
+ : m_shutdown_request(shutdown_request), m_exit_status{exit_status}, m_warnings{warnings} {}
- [[nodiscard]] kernel::InterruptResult blockTip(SynchronizationState state, CBlockIndex& index) override;
+ [[nodiscard]] kernel::InterruptResult blockTip(SynchronizationState state, CBlockIndex& index) override EXCLUSIVE_LOCKS_REQUIRED(!m_tip_block_mutex);
void headerTip(SynchronizationState state, int64_t height, int64_t timestamp, bool presync) override;
@@ -52,8 +53,16 @@ public:
int m_stop_at_height{DEFAULT_STOPATHEIGHT};
//! Useful for tests, can be set to false to avoid shutdown on fatal error.
bool m_shutdown_on_fatal_error{true};
+
+ Mutex m_tip_block_mutex;
+ std::condition_variable m_tip_block_cv GUARDED_BY(m_tip_block_mutex);
+ //! The block for which the last blockTip notification was received for.
+ //! The initial ZERO means that no block has been connected yet, which may
+ //! be true even long after startup, until shutdown.
+ uint256 m_tip_block GUARDED_BY(m_tip_block_mutex){uint256::ZERO};
+
private:
- util::SignalInterrupt& m_shutdown;
+ const std::function<bool()>& m_shutdown_request;
std::atomic<int>& m_exit_status;
node::Warnings& m_warnings;
};
diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp
index a265c2e12d..ff7de8c64a 100644
--- a/src/node/mempool_persist.cpp
+++ b/src/node/mempool_persist.cpp
@@ -199,8 +199,8 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock
LogInfo("Writing %d unbroadcast transactions to file.\n", unbroadcast_txids.size());
file << unbroadcast_txids;
- if (!skip_file_commit && !FileCommit(file.Get()))
- throw std::runtime_error("FileCommit failed");
+ if (!skip_file_commit && !file.Commit())
+ throw std::runtime_error("Commit failed");
file.fclose();
if (!RenameOver(dump_path + ".new", dump_path)) {
throw std::runtime_error("Rename failed");
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index 97f6ac346a..181ae2ef05 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -113,10 +113,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
-
- if (!pblocktemplate.get()) {
- return nullptr;
- }
CBlock* const pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp
index 976421e455..ca5491bdc2 100644
--- a/src/node/utxo_snapshot.cpp
+++ b/src/node/utxo_snapshot.cpp
@@ -73,10 +73,10 @@ std::optional<uint256> ReadSnapshotBaseBlockhash(fs::path chaindir)
}
afile >> base_blockhash;
- if (std::fgetc(afile.Get()) != EOF) {
+ int64_t position = afile.tell();
+ afile.seek(0, SEEK_END);
+ if (position != afile.tell()) {
LogPrintf("[snapshot] warning: unexpected trailing data in %s\n", read_from_str);
- } else if (std::ferror(afile.Get())) {
- LogPrintf("[snapshot] warning: i/o error reading %s\n", read_from_str);
}
return base_blockhash;
}
diff --git a/src/node/warnings.cpp b/src/node/warnings.cpp
index 87389e472b..255d8dba6e 100644
--- a/src/node/warnings.cpp
+++ b/src/node/warnings.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <node/warnings.h>
diff --git a/src/pow.cpp b/src/pow.cpp
index 50de8946be..6c8e7e5d98 100644
--- a/src/pow.cpp
+++ b/src/pow.cpp
@@ -134,8 +134,19 @@ bool PermittedDifficultyTransition(const Consensus::Params& params, int64_t heig
return true;
}
+// Bypasses the actual proof of work check during fuzz testing with a simplified validation checking whether
+// the most signficant bit of the last byte of the hash is set.
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& params)
{
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ return (hash.data()[31] & 0x80) == 0;
+#else
+ return CheckProofOfWorkImpl(hash, nBits, params);
+#endif
+}
+
+bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params)
+{
bool fNegative;
bool fOverflow;
arith_uint256 bnTarget;
diff --git a/src/pow.h b/src/pow.h
index ec03f318a4..2b28ade273 100644
--- a/src/pow.h
+++ b/src/pow.h
@@ -19,6 +19,7 @@ unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nF
/** Check whether a block hash satisfies the proof-of-work requirement specified by nBits */
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&);
+bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params&);
/**
* Return false if the proof-of-work requirement specified by new_nbits at a
diff --git a/src/prevector.h b/src/prevector.h
index 0c47137910..d14e5f64e9 100644
--- a/src/prevector.h
+++ b/src/prevector.h
@@ -363,7 +363,8 @@ public:
change_capacity(new_size + (new_size >> 1));
}
T* ptr = item_ptr(p);
- memmove(ptr + 1, ptr, (size() - p) * sizeof(T));
+ T* dst = ptr + 1;
+ memmove(dst, ptr, (size() - p) * sizeof(T));
_size++;
new(static_cast<void*>(ptr)) T(value);
return iterator(ptr);
@@ -376,7 +377,8 @@ public:
change_capacity(new_size + (new_size >> 1));
}
T* ptr = item_ptr(p);
- memmove(ptr + count, ptr, (size() - p) * sizeof(T));
+ T* dst = ptr + count;
+ memmove(dst, ptr, (size() - p) * sizeof(T));
_size += count;
fill(item_ptr(p), count, value);
}
@@ -390,7 +392,8 @@ public:
change_capacity(new_size + (new_size >> 1));
}
T* ptr = item_ptr(p);
- memmove(ptr + count, ptr, (size() - p) * sizeof(T));
+ T* dst = ptr + count;
+ memmove(dst, ptr, (size() - p) * sizeof(T));
_size += count;
fill(ptr, first, last);
}
diff --git a/src/qt/CMakeLists.txt b/src/qt/CMakeLists.txt
index dc62d0f57e..7ec2b74cc8 100644
--- a/src/qt/CMakeLists.txt
+++ b/src/qt/CMakeLists.txt
@@ -13,9 +13,6 @@ endif()
get_target_property(qt_lib_type Qt5::Core TYPE)
-# TODO: After the transition from Autotools to CMake,
-# all `Q_IMPORT_PLUGIN` macros can be deleted from the
-# qt/bitcoin.cpp and qt/test/test_main.cpp source files.
function(import_plugins target)
if(qt_lib_type STREQUAL "STATIC_LIBRARY")
set(plugins Qt5::QMinimalIntegrationPlugin)
@@ -49,7 +46,7 @@ file(GLOB ts_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} locale/*.ts)
set_source_files_properties(${ts_files} PROPERTIES OUTPUT_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/locale)
qt5_add_translation(qm_files ${ts_files})
-configure_file(bitcoin_locale.qrc bitcoin_locale.qrc COPYONLY)
+configure_file(bitcoin_locale.qrc bitcoin_locale.qrc USE_SOURCE_PERMISSIONS COPYONLY)
# The bitcoinqt sources have to include headers in
# order to parse them to collect translatable strings.
@@ -136,7 +133,6 @@ target_link_libraries(bitcoinqt
bitcoin_cli
leveldb
Boost::headers
- $<TARGET_NAME_IF_EXISTS:NATPMP::NATPMP>
$<TARGET_NAME_IF_EXISTS:MiniUPnPc::MiniUPnPc>
$<TARGET_NAME_IF_EXISTS:PkgConfig::libqrencode>
$<$<PLATFORM_ID:Darwin>:-framework\ AppKit>
diff --git a/src/qt/README.md b/src/qt/README.md
index 1c6f963ccf..3ecdd0888e 100644
--- a/src/qt/README.md
+++ b/src/qt/README.md
@@ -99,7 +99,7 @@ sudo apt-get install qtcreator
#### Setup Qt Creator
1. Make sure you've installed all dependencies specified in your systems build instructions
-2. Follow the compile instructions for your system, run `./configure` with the `--enable-debug` flag
+2. Follow the compile instructions for your system, adding the `-DCMAKE_BUILD_TYPE=Debug` build flag
3. Start Qt Creator. At the start page, do: `New` -> `Import Project` -> `Import Existing Project`
4. Enter `bitcoin-qt` as the Project Name and enter the absolute path to `src/qt` as Location
5. Check over the file selection, you may need to select the `forms` directory (necessary if you intend to edit *.ui files)
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index d51558908a..c43cb77866 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/bitcoin.h>
@@ -60,21 +60,6 @@
#include <QTranslator>
#include <QWindow>
-#if defined(QT_STATIC)
-#include <QtPlugin>
-#if defined(QT_QPA_PLATFORM_XCB)
-Q_IMPORT_PLUGIN(QXcbIntegrationPlugin);
-#elif defined(QT_QPA_PLATFORM_WINDOWS)
-Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin);
-Q_IMPORT_PLUGIN(QWindowsVistaStylePlugin);
-#elif defined(QT_QPA_PLATFORM_COCOA)
-Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin);
-Q_IMPORT_PLUGIN(QMacStylePlugin);
-#elif defined(QT_QPA_PLATFORM_ANDROID)
-Q_IMPORT_PLUGIN(QAndroidPlatformIntegrationPlugin)
-#endif
-#endif
-
// Declare meta types used for QMetaObject::invokeMethod
Q_DECLARE_METATYPE(bool*)
Q_DECLARE_METATYPE(CAmount)
@@ -540,7 +525,7 @@ int GuiMain(int argc, char* argv[])
/// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these
// Command-line options take precedence:
- SetupServerArgs(gArgs);
+ SetupServerArgs(gArgs, init->canListenIpc());
SetupUIArgs(gArgs);
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h
index 1423a8bbc6..52b117eed7 100644
--- a/src/qt/bitcoin.h
+++ b/src/qt/bitcoin.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_QT_BITCOIN_H
#define BITCOIN_QT_BITCOIN_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <interfaces/node.h>
#include <qt/initexecutor.h>
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 6d66c7473b..6d7e183e98 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/bitcoingui.h>
diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h
index 73adbda5a5..32fb7488fb 100644
--- a/src/qt/bitcoingui.h
+++ b/src/qt/bitcoingui.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_QT_BITCOINGUI_H
#define BITCOIN_QT_BITCOINGUI_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/bitcoinunits.h>
#include <qt/clientmodel.h>
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index 5c70c2695c..fb81dee8da 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/clientmodel.h>
diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp
index 3e8d1461e5..2908043d28 100644
--- a/src/qt/createwalletdialog.cpp
+++ b/src/qt/createwalletdialog.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <interfaces/node.h>
#include <qt/createwalletdialog.h>
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index 99fb238772..2056b2cccd 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -105,7 +105,7 @@
<item>
<widget class="QLabel" name="databaseCacheLabel">
<property name="toolTip">
- <string extracomment="Tooltip text for Options window setting that sets the size of the database cache. Explains the corresponding effects of increasing/decreasing this value.">Maximum database cache size. A larger cache can contribute to faster sync, after which the benefit is less pronounced for most use cases. Lowering the cache size will reduce memory usage. Unused mempool memory is shared for this cache.</string>
+ <string extracomment="Tooltip text for Options window setting that sets the size of the database cache. Explains the corresponding effects of increasing/decreasing this value.">Maximum database cache size. Make sure you have enough RAM. A larger cache can contribute to faster sync, after which the benefit is less pronounced for most use cases. Lowering the cache size will reduce memory usage. Unused mempool memory is shared for this cache.</string>
</property>
<property name="text">
<string>Size of &amp;database cache</string>
@@ -328,10 +328,10 @@
<item>
<widget class="QCheckBox" name="mapPortNatpmp">
<property name="toolTip">
- <string>Automatically open the Bitcoin client port on the router. This only works when your router supports NAT-PMP and it is enabled. The external port could be random.</string>
+ <string>Automatically open the Bitcoin client port on the router. This only works when your router supports PCP or NAT-PMP and it is enabled. The external port could be random.</string>
</property>
<property name="text">
- <string>Map port using NA&amp;T-PMP</string>
+ <string>Map port using PCP or NA&amp;T-PMP</string>
</property>
</widget>
</item>
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index 26b42deb64..bc770b71aa 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparams.h>
#include <qt/intro.h>
diff --git a/src/qt/locale/bitcoin_am.ts b/src/qt/locale/bitcoin_am.ts
index 2fcf7a1e63..c02049b984 100644
--- a/src/qt/locale/bitcoin_am.ts
+++ b/src/qt/locale/bitcoin_am.ts
@@ -176,6 +176,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ቦርሳዎ ምስጢር ተደርጓል</translation>
</message>
<message>
+ <source>Back</source>
+ <translation type="unfinished">ተመለስ</translation>
+ </message>
+ <message>
<source>Wallet to be encrypted</source>
<translation type="unfinished">ለመመስጠር የተዘጋጀ ዋሌት</translation>
</message>
@@ -254,6 +258,18 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ስህተት፥ %1</translation>
</message>
<message>
+ <source>Embedded "%1"</source>
+ <translation type="unfinished">የተከተተ "%1"</translation>
+ </message>
+ <message>
+ <source>Default system font "%1"</source>
+ <translation type="unfinished">ነባሪ የስርዓት ቅርጸ-ቁምፊ "%1</translation>
+ </message>
+ <message>
+ <source>Custom…</source>
+ <translation type="unfinished">ብጁ…</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">መጠን</translation>
</message>
@@ -363,6 +379,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">&amp;ተቀበል</translation>
</message>
<message>
+ <source>&amp;Change Passphrase…</source>
+ <translation type="unfinished">&amp;የይለፍ ቃል ቀይር…</translation>
+ </message>
+ <message>
+ <source>Sign messages with your Bitcoin addresses to prove you own them</source>
+ <translation type="unfinished">በእርሶ የተያዙ መሆኑን ለማረጋገጥ በBitcoin አድራሻዎችዎ መልዕክቶችን ይፈርሙ</translation>
+ </message>
+ <message>
<source>&amp;File</source>
<translation type="unfinished">&amp;ፋይል</translation>
</message>
@@ -406,6 +430,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">ዋሌት ዝጋ</translation>
</message>
<message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrate a wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<extracomment>Label of the input field where the name of the wallet is entered.</extracomment>
<translation type="unfinished">ዋሌት ስም</translation>
@@ -423,6 +455,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</translation>
</message>
<message>
+ <source>Error creating wallet</source>
+ <translation type="unfinished">ዋሌትዎን ለፍጠር ተሳስተዋል </translation>
+ </message>
+ <message>
+ <source>Cannot create new wallet, the software was compiled without sqlite support (required for descriptor wallets)</source>
+ <translation type="unfinished">አዲስ ዋሌት መፍጠር አልተቻለም፣ ሶፍትዌሩ የተቀናበረው ያለ ስኩላይት ድጋፍ ነው (ለገላጭ ዋሌቶች ያስፈልጋል)</translation>
+ </message>
+ <message>
<source>Error: %1</source>
<translation type="unfinished">ስህተት፥ %1</translation>
</message>
@@ -485,6 +525,49 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>MigrateWalletActivity</name>
+ <message>
+ <source>Migrate wallet</source>
+ <translation type="unfinished">ዋሌት ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrating the wallet will convert this wallet to one or more descriptor wallets. A new wallet backup will need to be made.
+If this wallet contains any watchonly scripts, a new wallet will be created which contains those watchonly scripts.
+If this wallet contains any solvable but not watched scripts, a different and new wallet will be created which contains those scripts.
+
+The migration process will create a backup of the wallet before migrating. This backup file will be named &lt;wallet name&gt;-&lt;timestamp&gt;.legacy.bak and can be found in the directory for this wallet. In the event of an incorrect migration, the backup can be restored with the "Restore Wallet" functionality.</source>
+ <translation type="unfinished">ዋሌትን ማዛወር ይህንን ዋሌት አንድ ወይም ከዚያ በላይ ወደሆነ ገላጭ ዋሌቶች ይቀይረዋል።አዲስ ዋሌት ማዘጋጀት ያስፈልጋል ።ይህ ዋሌት ምንም ዓይነት የመመልከት ብቻ ስክሪፕቶችን የያዘ ከሆነ፣እነዚያን የመመልከት ብቻ ስክሪፕቶችን የያዘ አዲስ ዋሌት ይፈጠራል።ይህ ዋሌት ሊፈቱ የሚችሉ ነገር ግን የመመልከት ብቻ ያልሆኑ ስክሪፕቶችን የያዘ ከሆነ ፣ እነዚህን የያዘ አዲስ እና ልዩ የሆነ ዋሌት ይፈጠራል ።የማዛወር ሂደቱ ማዘዋወር ከመፈጸሙ በፊት የነዚህን ዋሌቶች መጠባበቂያ ቅጂ ይይዛል።ይህ መጠባበቂያ ቅጂ 1-2 legacy.bak ተብሎ ተሰይሞ በዋሌቱ ማውጫ ውስጥ ይገኛል።የተሳሳተ ዝውውር በሚከሰትበት ጊዜየመጠባበቂያ ቅጂው በ ዋሌት መመለሻ መተግበሪያ ውስጥ ይከማቻል።</translation>
+ </message>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">ዋሌትዎን ያዛውሩ</translation>
+ </message>
+ <message>
+ <source>Migrating Wallet &lt;b&gt;%1&lt;/b&gt;…</source>
+ <translation type="unfinished">ዋሌት ማዘዋወር &lt;b&gt;%1&lt;/b&gt;…</translation>
+ </message>
+ <message>
+ <source>The wallet '%1' was migrated successfully.</source>
+ <translation type="unfinished">ዋሌት '%1' በትክክል ተዛውሯል </translation>
+ </message>
+ <message>
+ <source>Watchonly scripts have been migrated to a new wallet named '%1'.</source>
+ <translation type="unfinished">የመመልከት ብቻ ስክሪፕቶች'%1'.ወደ ተሰኘው ዋሌት ተዛውረዋል </translation>
+ </message>
+ <message>
+ <source>Solvable but not watched scripts have been migrated to a new wallet named '%1'.</source>
+ <translation type="unfinished">ሊፈቱ የሚችሉ ነገር ግን የማይታዩ ስክሪፕቶች ወደ አዲስ ዋሌት ተዛውረዋል '%1'።</translation>
+ </message>
+ <message>
+ <source>Migration failed</source>
+ <translation type="unfinished">ዝውውሩ አልተሳካም </translation>
+ </message>
+ <message>
+ <source>Migration Successful</source>
+ <translation type="unfinished">ዝውውር ተሳክቷል </translation>
+ </message>
+</context>
+<context>
<name>OpenWalletActivity</name>
<message>
<source>Open Wallet</source>
@@ -502,6 +585,14 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<context>
<name>CreateWalletDialog</name>
<message>
+ <source>You are one step away from creating your new wallet!</source>
+ <translation type="unfinished">አዲሱን ዋሌትዎን ለመፍጠር አንድ እርምጃ ይቀርዎታል</translation>
+ </message>
+ <message>
+ <source>Please provide a name and, if desired, enable any advanced options</source>
+ <translation type="unfinished">እባክዎ ስም ያስገቡ እና ከተፈለገ ማንኛውንም የላቁ አማራጮችን ያንቁ</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<translation type="unfinished">ዋሌት ስም</translation>
</message>
@@ -590,6 +681,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<context>
<name>OptionsDialog</name>
<message>
+ <source>Font in the Overview tab: </source>
+ <translation type="unfinished">በአጠቃላይ እይታ ትር ውስጥ ያለ ፊደል</translation>
+ </message>
+ <message>
<source>Error</source>
<translation type="unfinished">ስህተት</translation>
</message>
@@ -602,6 +697,13 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>PSBTOperationsDialog</name>
+ <message>
+ <source>Sends %1 to %2</source>
+ <translation type="unfinished">%1 ወደ %2 ይልካል</translation>
+ </message>
+ </context>
+<context>
<name>PeerTableModel</name>
<message>
<source>Address</source>
@@ -610,6 +712,56 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>RPCConsole</name>
+ <message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">የአካባቢ አድራሻዎች</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">የእርስዎ Bitcoin ኖድ ከሌሎች ኖዶች ጋር ለመገናኘት በአሁኑ ጊዜ እየተጠቀመበት ያለው የአውታረ መረብ አድራሻ።</translation>
+ </message>
+ <message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">የአቻዎችን ዝርዝር ደብቅ</translation>
+ </message>
+ <message>
+ <source>The transport layer version: %1</source>
+ <translation type="unfinished">የማጓጓዣ ንብርብር ስሪት፡ %1</translation>
+ </message>
+ <message>
+ <source>Transport</source>
+ <translation type="unfinished">መጓጓዣ</translation>
+ </message>
+ <message>
+ <source>Session ID</source>
+ <translation type="unfinished">የክፍለ ጊዜ መለያ</translation>
+ </message>
+ <message>
+ <source>The BIP324 session ID string in hex.</source>
+ <translation type="unfinished">የBIP324 ክፍለ ጊዜ መለያ ሕብረቁምፊ በሄክስ።</translation>
+ </message>
+ <message>
+ <source>detecting: peer could be v1 or v2</source>
+ <extracomment>Explanatory text for "detecting" transport type.</extracomment>
+ <translation type="unfinished">ማወቅ፡ እኩያ v1 ወይም v2 ሊሆን ይችላል።</translation>
+ </message>
+ <message>
+ <source>v1: unencrypted, plaintext transport protocol</source>
+ <extracomment>Explanatory text for v1 transport type.</extracomment>
+ <translation type="unfinished">v1፡ ያልተመሰጠረ፣ ግልጽ የጽሑፍ ትራንስፖርት ፕሮቶኮል</translation>
+ </message>
+ <message>
+ <source>v2: BIP324 encrypted transport protocol</source>
+ <extracomment>Explanatory text for v2 transport type.</extracomment>
+ <translation type="unfinished">v2፡ BIP324 የተመሰጠረ የትራንስፖርት ፕሮቶኮል</translation>
+ </message>
+ <message>
+ <source>Node window - [%1]</source>
+ <translation type="unfinished">የኖድ መስኮት - [%1]</translation>
+ </message>
+ </context>
+<context>
<name>ReceiveRequestDialog</name>
<message>
<source>Amount:</source>
@@ -661,6 +813,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<source>Copy fee</source>
<translation type="unfinished">ክፍያው ቅዳ</translation>
</message>
+ <message>
+ <source>%1 from wallet '%2'</source>
+ <translation type="unfinished">%1 ከዋሌት %2'</translation>
+ </message>
<message numerus="yes">
<source>Estimated to begin confirmation within %n block(s).</source>
<translation type="unfinished">
@@ -674,6 +830,17 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>SignVerifyMessageDialog</name>
+ <message>
+ <source>You can sign messages/agreements with your legacy (P2PKH) addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
+ <translation type="unfinished">ወደ እነርሱ የተላኩ ቢትኮይን መቀበል እንደሚችሉ ለማረጋገጥ ከርስዎ (P2PKH) አድራሻዎች ጋር መልዕክቶችን/ስምምነቶችን መፈረም ይችላሉ። የማስገር ጥቃቶች እርስዎን ማንነትዎን በእነሱ ላይ እንዲፈርሙ ሊያታልሉዎት ስለሚችሉ ግልጽ ያልሆነ ወይም በዘፈቀደ ላለመፈረም ይጠንቀቁ። የተስማሙባቸውን ሙሉ ዝርዝር መግለጫዎች ብቻ ይፈርሙ።</translation>
+ </message>
+ <message>
+ <source>The entered address does not refer to a legacy (P2PKH) key. Message signing for SegWit and other non-P2PKH address types is not supported in this version of %1. Please check the address and try again.</source>
+ <translation type="unfinished">የገባው አድራሻ የቅርስ (P2PKH) ቁልፍን አያመለክትም። ለሴግዊት እና ሌሎች P2PKH ላልሆኑ የአድራሻ አይነቶች የመልዕክት መፈረም በዚህ የ%1 ስሪት ውስጥ አይደገፍም። እባክዎ አድራሻውን ያረጋግጡ እና እንደገና ይሞክሩ።</translation>
+ </message>
+ </context>
+<context>
<name>TransactionDesc</name>
<message>
<source>Date</source>
@@ -687,6 +854,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</translation>
</message>
<message>
+ <source>%1 (Certificate was not verified)</source>
+ <translation type="unfinished">%1 (ማረጋገጫው አልተረጋገጠም)</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">መጠን</translation>
</message>
@@ -742,6 +913,17 @@ Signing is only possible with addresses of the type 'legacy'.</source>
</message>
</context>
<context>
+ <name>WalletModel</name>
+ <message>
+ <source>Fee-bump PSBT copied to clipboard</source>
+ <translation type="unfinished">ከክፍያ-ነፃ PSBT ወደ ቅንጥብ ሰሌዳ ተቀድቷል።</translation>
+ </message>
+ <message>
+ <source>Signer error</source>
+ <translation type="unfinished">የፈራሚ ስህተት</translation>
+ </message>
+ </context>
+<context>
<name>WalletView</name>
<message>
<source>&amp;Export</source>
diff --git a/src/qt/locale/bitcoin_bn.ts b/src/qt/locale/bitcoin_bn.ts
index d22624ad85..bfb104a7a3 100644
--- a/src/qt/locale/bitcoin_bn.ts
+++ b/src/qt/locale/bitcoin_bn.ts
@@ -276,6 +276,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<translation type="unfinished">আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন লোড করুন</translation>
</message>
<message>
+ <source>Load PSBT from &amp;clipboard…</source>
+ <translation type="unfinished">&amp;ক্লিপবোর্ড থেকে আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন আনুন</translation>
+ </message>
+ <message>
<source>Load Partially Signed Bitcoin Transaction from clipboard</source>
<translation type="unfinished">ক্লিপবোর্ড থেকে আংশিক স্বাক্ষরিত বিটকয়েন লেনদেন লোড করুন</translation>
</message>
diff --git a/src/qt/locale/bitcoin_de.ts b/src/qt/locale/bitcoin_de.ts
index 474ce2a088..807a1c64ee 100644
--- a/src/qt/locale/bitcoin_de.ts
+++ b/src/qt/locale/bitcoin_de.ts
@@ -615,11 +615,15 @@ Das Signieren ist nur mit Adressen vom Typ 'Legacy' möglich.</translation>
<source>Processing blocks on disk…</source>
<translation type="unfinished">Verarbeite Blöcke auf Datenträger...</translation>
</message>
+ <message>
+ <source>Connecting to peers…</source>
+ <translation type="unfinished">Verbindung zu Peers wird hergestellt…</translation>
+ </message>
<message numerus="yes">
<source>Processed %n block(s) of transaction history.</source>
<translation type="unfinished">
- <numerusform>Processed %n block(s) of transaction history.</numerusform>
- <numerusform>Processed %n block(s) of transaction history.</numerusform>
+ <numerusform>%n Block der Transaktionshistorie prozessiert.</numerusform>
+ <numerusform>%n Block/Blöcke der Transaktionshistorie prozessiert.</numerusform>
</translation>
</message>
<message>
@@ -710,7 +714,7 @@ Das Signieren ist nur mit Adressen vom Typ 'Legacy' möglich.</translation>
<message>
<source>Show Peers tab</source>
<extracomment>A context menu item. The "Peers tab" is an element of the "Node window".</extracomment>
- <translation type="unfinished">Gegenstellen Reiter anzeigen</translation>
+ <translation type="unfinished">Reiter mit Peers anzeigen</translation>
</message>
<message>
<source>Disable network activity</source>
@@ -1420,7 +1424,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>%1 is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain.</source>
- <translation type="unfinished">%1 synchronisiert gerade. Es lädt Header und Blöcke von Gegenstellen und validiert sie bis zum Erreichen der Spitze der Blockchain.</translation>
+ <translation type="unfinished">%1 synchronisiert gerade. Es lädt Header und Blöcke von Peers und validiert sie bis zum Erreichen der Spitze der Blockchain.</translation>
</message>
<message>
<source>Unknown. Syncing Headers (%1, %2%)…</source>
@@ -1639,7 +1643,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>Used for reaching peers via:</source>
- <translation type="unfinished">Benutzt um Gegenstellen zu erreichen über:</translation>
+ <translation type="unfinished">Benutzt um Peers zu erreichen über:</translation>
</message>
<message>
<source>&amp;Window</source>
@@ -1703,7 +1707,7 @@ Während des Migrationsprozesses wird vor der Migration ein Backup der Wallet er
</message>
<message>
<source>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</source>
- <translation type="unfinished">Nutze separaten SOCKS&amp;5-Proxy um Gegenstellen über Tor-Onion-Dienste zu erreichen:</translation>
+ <translation type="unfinished">Nutze separaten SOCKS&amp;5-Proxy um Peers über Tor-Onion-Dienste zu erreichen:</translation>
</message>
<message>
<source>&amp;Cancel</source>
@@ -2042,11 +2046,6 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">User-Agent</translation>
</message>
<message>
- <source>Peer</source>
- <extracomment>Title of Peers Table column which contains a unique number used to identify a connection.</extracomment>
- <translation type="unfinished">Gegenstelle</translation>
- </message>
- <message>
<source>Age</source>
<extracomment>Title of Peers Table column which indicates the duration (length of time) since the peer connection started.</extracomment>
<translation type="unfinished">Alter</translation>
@@ -2171,6 +2170,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Anzahl der Verbindungen</translation>
</message>
<message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">Lokale Adressen</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">Netzwerk-Adressen, die dein Bitcoin-Node aktuell verwendet, um mit anderen Nodes zu kommunizieren.</translation>
+ </message>
+ <message>
<source>Block chain</source>
<translation type="unfinished">Blockchain</translation>
</message>
@@ -2203,16 +2210,20 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Gesendet</translation>
</message>
<message>
- <source>&amp;Peers</source>
- <translation type="unfinished">&amp;Gegenstellen</translation>
- </message>
- <message>
<source>Banned peers</source>
- <translation type="unfinished">Gesperrte Gegenstellen</translation>
+ <translation type="unfinished">Gesperrte Peers</translation>
</message>
<message>
<source>Select a peer to view detailed information.</source>
- <translation type="unfinished">Gegenstelle auswählen, um detaillierte Informationen zu erhalten.</translation>
+ <translation type="unfinished">Peers auswählen, um detaillierte Informationen zu erhalten.</translation>
+ </message>
+ <message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">Reiter mit Peers verstecken</translation>
+ </message>
+ <message>
+ <source>Ctrl+X</source>
+ <translation type="unfinished">Strg+X</translation>
</message>
<message>
<source>The transport layer version: %1</source>
@@ -2220,7 +2231,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>Whether we relay transactions to this peer.</source>
- <translation type="unfinished">Ob wir Adressen an diese Gegenstelle weiterleiten.</translation>
+ <translation type="unfinished">Ob wir Adressen an diesen Peer weiterleiten.</translation>
</message>
<message>
<source>Transaction Relay</source>
@@ -2244,7 +2255,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The mapped Autonomous System used for diversifying peer selection.</source>
- <translation type="unfinished">Das zugeordnete autonome System zur Diversifizierung der Gegenstellen-Auswahl.</translation>
+ <translation type="unfinished">Das zugeordnete autonome System zur Diversifizierung der Peer-Auswahl.</translation>
</message>
<message>
<source>Mapped AS</source>
@@ -2253,7 +2264,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Whether we relay addresses to this peer.</source>
<extracomment>Tooltip text for the Address Relay field in the peer details area, which displays whether we relay addresses to this peer (Yes/No).</extracomment>
- <translation type="unfinished">Ob wir Adressen an diese Gegenstelle weiterleiten.</translation>
+ <translation type="unfinished">Ob wir Adressen an diesen Peer weiterleiten.</translation>
</message>
<message>
<source>Address Relay</source>
@@ -2263,12 +2274,12 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>The total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting).</source>
<extracomment>Tooltip text for the Addresses Processed field in the peer details area, which displays the total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting).</extracomment>
- <translation type="unfinished">Die Gesamtzahl der von dieser Gegenstelle empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
+ <translation type="unfinished">Die Gesamtzahl der von diesem Peer empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
</message>
<message>
<source>The total number of addresses received from this peer that were dropped (not processed) due to rate-limiting.</source>
<extracomment>Tooltip text for the Addresses Rate-Limited field in the peer details area, which displays the total number of addresses received from this peer that were dropped (not processed) due to rate-limiting.</extracomment>
- <translation type="unfinished">Die Gesamtzahl der von dieser Gegenstelle empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
+ <translation type="unfinished">Die Gesamtzahl der von diesem Peer empfangenen Adressen, die aufgrund von Ratenbegrenzung verworfen (nicht verarbeitet) wurden.</translation>
</message>
<message>
<source>Addresses Processed</source>
@@ -2306,7 +2317,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The direction and type of peer connection: %1</source>
- <translation type="unfinished">Die Richtung und der Typ der Gegenstellen-Verbindung: %1</translation>
+ <translation type="unfinished">Die Richtung und der Typ der Peer-Verbindung: %1</translation>
</message>
<message>
<source>Direction/Type</source>
@@ -2318,7 +2329,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>The network protocol this peer is connected through: IPv4, IPv6, Onion, I2P, or CJDNS.</source>
- <translation type="unfinished">Das Netzwerkprotokoll, über das diese Gegenstelle verbunden ist, ist: IPv4, IPv6, Onion, I2P oder CJDNS.</translation>
+ <translation type="unfinished">Das Netzwerkprotokoll, über das dieser Peer verbunden ist, ist: IPv4, IPv6, Onion, I2P oder CJDNS.</translation>
</message>
<message>
<source>Services</source>
@@ -2338,7 +2349,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>Elapsed time since a novel block passing initial validity checks was received from this peer.</source>
- <translation type="unfinished">Abgelaufene Zeit seitdem ein neuer Block mit erfolgreichen initialen Gültigkeitsprüfungen von dieser Gegenstelle empfangen wurde.</translation>
+ <translation type="unfinished">Verstrichene Zeit, seit ein neuer Block, der initiale Validierungsprüfungen bestanden hat, von diesem Peer empfangen wurde.</translation>
</message>
<message>
<source>Last Block</source>
@@ -2347,7 +2358,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Elapsed time since a novel transaction accepted into our mempool was received from this peer.</source>
<extracomment>Tooltip text for the Last Transaction field in the peer details area.</extracomment>
- <translation type="unfinished">Abgelaufene Zeit seit eine neue Transaktion, die in unseren Speicherpool hineingelassen wurde, von dieser Gegenstelle empfangen wurde.</translation>
+ <translation type="unfinished">Verstrichene Zeit, seit eine neue Transaktion, die in unseren Mempool aufgenommen wurde, von diesem Peer empfangen wurde.</translation>
</message>
<message>
<source>Last Send</source>
@@ -2416,7 +2427,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>Inbound: initiated by peer</source>
<extracomment>Explanatory text for an inbound peer connection.</extracomment>
- <translation type="unfinished">Eingehend: wurde von Gegenstelle initiiert</translation>
+ <translation type="unfinished">Eingehend: wurde vom Peer initiiert</translation>
</message>
<message>
<source>Outbound Full Relay: default</source>
@@ -2446,7 +2457,7 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<message>
<source>detecting: peer could be v1 or v2</source>
<extracomment>Explanatory text for "detecting" transport type.</extracomment>
- <translation type="unfinished">Erkennen: Peer könnte v1 oder v2 sein</translation>
+ <translation type="unfinished">Erkenne: Peer könnte v1 oder v2 sein</translation>
</message>
<message>
<source>v1: unencrypted, plaintext transport protocol</source>
@@ -2460,11 +2471,11 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
</message>
<message>
<source>we selected the peer for high bandwidth relay</source>
- <translation type="unfinished">Wir haben die Gegenstelle zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
+ <translation type="unfinished">Wir haben den Peer zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
</message>
<message>
<source>the peer selected us for high bandwidth relay</source>
- <translation type="unfinished">Die Gegenstelle hat uns zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
+ <translation type="unfinished">Der Peer hat uns zum Weiterleiten mit hoher Bandbreite ausgewählt</translation>
</message>
<message>
<source>no high bandwidth relay selected</source>
@@ -2584,7 +2595,7 @@ Für weitere Informationen über diese Konsole, tippe %6.
</message>
<message>
<source>(peer: %1)</source>
- <translation type="unfinished">(Gegenstelle: %1)</translation>
+ <translation type="unfinished">(Peer: %1)</translation>
</message>
<message>
<source>via %1</source>
@@ -3996,7 +4007,7 @@ Gehen Sie zu Datei &gt; Wallet Öffnen, um eine Wallet zu laden.
</message>
<message>
<source>%s request to listen on port %u. This port is considered "bad" and thus it is unlikely that any peer will connect to it. See doc/p2p-bad-ports.md for details and a full list.</source>
- <translation type="unfinished">%s Aufforderung, auf Port %u zu lauschen. Dieser Port wird als "schlecht" eingeschätzt und es ist daher unwahrscheinlich, dass sich Bitcoin Core Gegenstellen mit ihm verbinden. Siehe doc/p2p-bad-ports.md für Details und eine vollständige Liste.</translation>
+ <translation type="unfinished">%s Aufforderung, auf Port %u zu lauschen. Dieser Port wird als "schlecht" eingeschätzt und es ist daher unwahrscheinlich, dass sich Peers mit ihm verbinden. Siehe doc/p2p-bad-ports.md für Details und eine vollständige Liste.</translation>
</message>
<message>
<source>Cannot downgrade wallet from version %i to version %i. Wallet version unchanged.</source>
@@ -4161,7 +4172,7 @@ Bitte nutzen Sie entweder "bdb" oder "sqlite".</translation>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
- <translation type="unfinished">Warnung: Wir scheinen nicht vollständig mit unseren Gegenstellen übereinzustimmen! Sie oder die anderen Knoten müssen unter Umständen Ihre Client-Software aktualisieren.</translation>
+ <translation type="unfinished">Warnung: Wir scheinen nicht vollständig mit unseren Peers übereinzustimmen! Sie oder die anderen Knoten müssen unter Umständen Ihre Client-Software aktualisieren.</translation>
</message>
<message>
<source>Witness data for blocks after height %d requires validation. Please restart with -reindex.</source>
diff --git a/src/qt/locale/bitcoin_de_CH.ts b/src/qt/locale/bitcoin_de_CH.ts
index a9a15f08ff..ffbc761ac1 100644
--- a/src/qt/locale/bitcoin_de_CH.ts
+++ b/src/qt/locale/bitcoin_de_CH.ts
@@ -2227,6 +2227,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Anzahl der Verbindungen</translation>
</message>
<message>
+ <source>Local Addresses</source>
+ <translation type="unfinished">Lokale Adressen</translation>
+ </message>
+ <message>
+ <source>Network addresses that your Bitcoin node is currently using to communicate with other nodes.</source>
+ <translation type="unfinished">Netzwerk-Adressen, die dein Bitcoin-Node aktuell verwendet, um mit anderen Nodes zu kommunizieren.</translation>
+ </message>
+ <message>
<source>Block chain</source>
<translation type="unfinished">Blockchain</translation>
</message>
@@ -2271,6 +2279,14 @@ Wenn Sie diese Fehlermeldung erhalten, sollten Sie den Händler bitten, einen BI
<translation type="unfinished">Gegenstelle auswählen, um detaillierte Informationen zu erhalten.</translation>
</message>
<message>
+ <source>Hide Peers Detail</source>
+ <translation type="unfinished">Gegenstellen Reiter verstecken</translation>
+ </message>
+ <message>
+ <source>Ctrl+X</source>
+ <translation type="unfinished">Strg+X</translation>
+ </message>
+ <message>
<source>The transport layer version: %1</source>
<translation type="unfinished">Die Transportschicht-Version: %1</translation>
</message>
diff --git a/src/qt/locale/bitcoin_gl_ES.ts b/src/qt/locale/bitcoin_gl_ES.ts
index 77f9f3278c..47951fa6f2 100644
--- a/src/qt/locale/bitcoin_gl_ES.ts
+++ b/src/qt/locale/bitcoin_gl_ES.ts
@@ -2,10 +2,6 @@
<context>
<name>AddressBookPage</name>
<message>
- <source>Right-click to edit address or label</source>
- <translation type="unfinished">cd vcpkg/buildtrees/libvpx/srccd *./configuresed -i 's/CFLAGS+=-I/CFLAGS+=-fPIC -I/g' Makefilesed -i 's/CXXFLAGS+=-I/CXXFLAGS+=-fPIC -I/g' Makefilemakecp libvpx.a $HOME/vcpkg/installed/x64-linux/lib/cd</translation>
- </message>
- <message>
<source>Create a new address</source>
<translation type="unfinished">Crea un novo enderezo</translation>
</message>
@@ -94,6 +90,14 @@ Firmar é posible unicamente con enderezos de tipo 'legacy'.</translation>
<translation type="unfinished">Houbo un erro tentando gardar a lista de enderezos en %1. Por favor proba de novo.</translation>
</message>
<message>
+ <source>Sending addresses - %1</source>
+ <translation type="unfinished">Enviando enderezos - %1</translation>
+ </message>
+ <message>
+ <source>Receiving addresses - %1</source>
+ <translation type="unfinished">Recibindo enderezos - %1</translation>
+ </message>
+ <message>
<source>Exporting Failed</source>
<translation type="unfinished">Exportación Fallida</translation>
</message>
diff --git a/src/qt/locale/bitcoin_ru.ts b/src/qt/locale/bitcoin_ru.ts
index 204b66774e..7a54b45768 100644
--- a/src/qt/locale/bitcoin_ru.ts
+++ b/src/qt/locale/bitcoin_ru.ts
@@ -310,6 +310,10 @@ Signing is only possible with addresses of the type 'legacy'.</source>
<source>unknown</source>
<translation type="unfinished">неизвестно</translation>
</message>
+ <message>
+ <source>Default system font "%1"</source>
+ <translation type="unfinished">Системный шрифт по умолчанию "%1"</translation>
+ </message>
<message numerus="yes">
<source>%n second(s)</source>
<translation type="unfinished">
@@ -1526,6 +1530,10 @@ The migration process will create a backup of the wallet before migrating. This
<translation type="unfinished">Сворачивать вместо выхода из приложения при закрытии окна. Если данный параметр включён, приложение закроется только после нажатия "Выход" в меню.</translation>
</message>
<message>
+ <source>Font in the Overview tab: </source>
+ <translation type="unfinished">Шрифт на вкладке «Обзор»:</translation>
+ </message>
+ <message>
<source>Options set in this dialog are overridden by the command line:</source>
<translation type="unfinished">Параметры командной строки, которые переопределили параметры из этого окна:</translation>
</message>
@@ -1951,6 +1959,17 @@ The migration process will create a backup of the wallet before migrating. This
</message>
</context>
<context>
+ <name>SignVerifyMessageDialog</name>
+ <message>
+ <source>You can sign messages/agreements with your legacy (P2PKH) addresses to prove you can receive bitcoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
+ <translation type="unfinished">Вы можете подписывать сообщения/соглашения своими устаревшими (P2PKH) адресами, чтобы доказать, что вы можете получать биткоины на них. Будьте осторожны и не подписывайте непонятные или случайные сообщения, так как мошенники могут таким образом пытаться присвоить вашу личность. Подписывайте только такие сообщения, с которыми вы согласны вплоть до мелочей.</translation>
+ </message>
+ <message>
+ <source>The entered address does not refer to a legacy (P2PKH) key. Message signing for SegWit and other non-P2PKH address types is not supported in this version of %1. Please check the address and try again.</source>
+ <translation type="unfinished">Введенный адрес не относится к устаревшему (P2PKH) ключу. Подписывание сообщений для SegWit и других не--P2PKH типов адресов не поддерживается в этой версии %1. Пожалуйста, проверьте адрес и попробуйте ещё раз.</translation>
+ </message>
+ </context>
+<context>
<name>TransactionDesc</name>
<message numerus="yes">
<source>matures in %n more block(s)</source>
@@ -1961,6 +1980,10 @@ The migration process will create a backup of the wallet before migrating. This
</translation>
</message>
<message>
+ <source>%1 (Certificate was not verified)</source>
+ <translation type="unfinished">%1 (Сертификат не был подтверждён)</translation>
+ </message>
+ <message>
<source>Amount</source>
<translation type="unfinished">Сумма</translation>
</message>
@@ -2634,5 +2657,21 @@ Unable to restore backup of wallet.</source>
<source>Error: Unable to read all records in the database</source>
<translation type="unfinished">Ошибка: не удалось прочитать все записи из базе данных</translation>
</message>
+ <message>
+ <source>Failed to disconnect block.</source>
+ <translation type="unfinished">Не удалось отключить блок</translation>
+ </message>
+ <message>
+ <source>Failed to read block.</source>
+ <translation type="unfinished">Не удалось прочитать блок</translation>
+ </message>
+ <message>
+ <source>Failed to write block.</source>
+ <translation type="unfinished">Не удалось записать блок</translation>
+ </message>
+ <message>
+ <source>Wallet file creation failed: %s</source>
+ <translation type="unfinished">Не удалось создать кошелёк 1%s</translation>
+ </message>
</context>
</TS> \ No newline at end of file
diff --git a/src/qt/locale/bitcoin_sw.ts b/src/qt/locale/bitcoin_sw.ts
index 199a0552b5..0497444d5e 100644
--- a/src/qt/locale/bitcoin_sw.ts
+++ b/src/qt/locale/bitcoin_sw.ts
@@ -325,7 +325,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<numerusform />
</translation>
</message>
- </context>
+ <message>
+ <source>default wallet</source>
+ <translation type="unfinished">mkoba chaguo-msingi</translation>
+ </message>
+</context>
<context>
<name>BitcoinGUI</name>
<message>
@@ -414,10 +418,19 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">&amp;Chaguo...</translation>
</message>
<message>
+ <source>&amp;Encrypt Wallet…</source>
+ <translation type="unfinished">&amp;Simba Mkoba...</translation>
+ </message>
+ <message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished">Funga funguo za siri zinazomiliki mkoba wako.</translation>
</message>
<message>
+ <source>&amp;Backup Wallet…</source>
+ <translation type="unfinished">&amp;Hifadhi Mkoba...
+</translation>
+ </message>
+ <message>
<source>&amp;Change Passphrase…</source>
<translation type="unfinished">&amp;Badilisha Nenosiri...</translation>
</message>
@@ -430,10 +443,18 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">Saini ujumbe na anwani zako za Bitcoin ili kuthibitisha umiliki wao.</translation>
</message>
<message>
+ <source>&amp;Verify message…</source>
+ <translation type="unfinished">&amp;Thibitisha ujumbe...</translation>
+ </message>
+ <message>
<source>Verify messages to ensure they were signed with specified Bitcoin addresses</source>
<translation type="unfinished">Hakikisha ujumbe umethibitishwa kuwa ulisainiwa na anwani za Bitcoin zilizotajwa</translation>
</message>
<message>
+ <source>&amp;Load PSBT from file…</source>
+ <translation type="unfinished">&amp;Pakia PSBT kutoka faili...</translation>
+ </message>
+ <message>
<source>Open &amp;URI…</source>
<translation type="unfinished">Fungua &amp;URI ...</translation>
</message>
@@ -509,6 +530,14 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</translation>
</message>
<message>
+ <source>%1 behind</source>
+ <translation type="unfinished">%1 nyuma</translation>
+ </message>
+ <message>
+ <source>Catching up…</source>
+ <translation type="unfinished">Inakamata...</translation>
+ </message>
+ <message>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished">Shughuli baada ya hii bado hazitaonekana.</translation>
</message>
@@ -525,22 +554,135 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<translation type="unfinished">Habari</translation>
</message>
<message>
+ <source>Up to date</source>
+ <translation type="unfinished">Imesasishwa</translation>
+ </message>
+ <message>
+ <source>Load Partially Signed Bitcoin Transaction</source>
+ <translation type="unfinished">Pakia Muamala wa Bitcoin Uliosainiwa Kiasi</translation>
+ </message>
+ <message>
+ <source>Load PSBT from &amp;clipboard…</source>
+ <translation type="unfinished">Pakia PSBT kutoka &amp;clipboard...</translation>
+ </message>
+ <message>
+ <source>Load Partially Signed Bitcoin Transaction from clipboard</source>
+ <translation type="unfinished">Pakia Muamala wa Bitcoin Uliosainiwa Kiasi kutoka kwenye ubao wa kunakili</translation>
+ </message>
+ <message>
+ <source>Node window</source>
+ <translation type="unfinished">Dirisha la nodi</translation>
+ </message>
+ <message>
+ <source>Open node debugging and diagnostic console</source>
+ <translation type="unfinished">Fungua utatuzi wa nodi na koni ya uchunguzi</translation>
+ </message>
+ <message>
+ <source>&amp;Sending addresses</source>
+ <translation type="unfinished">&amp;Anwani za kutuma</translation>
+ </message>
+ <message>
+ <source>&amp;Receiving addresses</source>
+ <translation type="unfinished">&amp;Inapokea anwani</translation>
+ </message>
+ <message>
+ <source>Open a bitcoin: URI</source>
+ <translation type="unfinished">Fungua bitcoin: URI</translation>
+ </message>
+ <message>
<source>Open Wallet</source>
<translation type="unfinished">Fungua Pochi</translation>
</message>
<message>
+ <source>Open a wallet</source>
+ <translation type="unfinished">Fungua pochi</translation>
+ </message>
+ <message>
<source>Close wallet</source>
<translation type="unfinished">Funga pochi</translation>
</message>
<message>
+ <source>Restore Wallet…</source>
+ <extracomment>Name of the menu item that restores wallet from a backup file.</extracomment>
+ <translation type="unfinished">Rejesha Pochi...</translation>
+ </message>
+ <message>
+ <source>Restore a wallet from a backup file</source>
+ <extracomment>Status tip for Restore Wallet menu item</extracomment>
+ <translation type="unfinished">Rejesha mkoba kutoka kwa faili ya chelezo</translation>
+ </message>
+ <message>
+ <source>Close all wallets</source>
+ <translation type="unfinished">Funga pochi zote</translation>
+ </message>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">Hamisha Pochi</translation>
+ </message>
+ <message>
+ <source>Migrate a wallet</source>
+ <translation type="unfinished">Hamisha mkoba</translation>
+ </message>
+ <message>
+ <source>Show the %1 help message to get a list with possible Bitcoin command-line options</source>
+ <translation type="unfinished">Onyesha %1 ujumbe wa usaidizi ili kupata orodha na chaguo zinazowezekana za mstari wa amri za Bitcoin</translation>
+ </message>
+ <message>
+ <source>&amp;Mask values</source>
+ <translation type="unfinished">&amp;Funga maadili</translation>
+ </message>
+ <message>
+ <source>Mask the values in the Overview tab</source>
+ <translation type="unfinished">Ficha maadili kwenye kichupo cha Muhtasari</translation>
+ </message>
+ <message>
+ <source>No wallets available</source>
+ <translation type="unfinished">Hakuna pochi zinazopatikana</translation>
+ </message>
+ <message>
+ <source>Wallet Data</source>
+ <extracomment>Name of the wallet data file format.</extracomment>
+ <translation type="unfinished">Data ya Pochi</translation>
+ </message>
+ <message>
+ <source>Load Wallet Backup</source>
+ <extracomment>The title for Restore Wallet File Windows</extracomment>
+ <translation type="unfinished">Pakia Hifadhi Nakala ya Wallet</translation>
+ </message>
+ <message>
+ <source>Restore Wallet</source>
+ <extracomment>Title of pop-up window shown when the user is attempting to restore a wallet.</extracomment>
+ <translation type="unfinished">Rejesha Pochi</translation>
+ </message>
+ <message>
<source>Wallet Name</source>
<extracomment>Label of the input field where the name of the wallet is entered.</extracomment>
<translation type="unfinished">Jina la Wallet</translation>
</message>
<message>
+ <source>&amp;Window</source>
+ <translation type="unfinished">&amp;Dirisha</translation>
+ </message>
+ <message>
+ <source>Zoom</source>
+ <translation type="unfinished">Kuza</translation>
+ </message>
+ <message>
+ <source>Main Window</source>
+ <translation type="unfinished">Dirisha Kuu</translation>
+ </message>
+ <message>
+ <source>%1 client</source>
+ <translation type="unfinished">%1 mteja</translation>
+ </message>
+ <message>
<source>&amp;Hide</source>
<translation type="unfinished">&amp;Ficha</translation>
</message>
+ <message>
+ <source>S&amp;how</source>
+ <translation type="unfinished">Jinsi &amp; jinsi</translation>
+ </message>
<message numerus="yes">
<source>%n active connection(s) to Bitcoin network.</source>
<extracomment>A substring of the tooltip.</extracomment>
@@ -550,15 +692,99 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</translation>
</message>
<message>
+ <source>Click for more actions.</source>
+ <extracomment>A substring of the tooltip. "More actions" are available via the context menu.</extracomment>
+ <translation type="unfinished">Bofya kwa vitendo zaidi.</translation>
+ </message>
+ <message>
+ <source>Show Peers tab</source>
+ <extracomment>A context menu item. The "Peers tab" is an element of the "Node window".</extracomment>
+ <translation type="unfinished">Onyesha kichupo cha Marika</translation>
+ </message>
+ <message>
+ <source>Disable network activity</source>
+ <extracomment>A context menu item.</extracomment>
+ <translation type="unfinished">Zima shughuli za mtandao</translation>
+ </message>
+ <message>
+ <source>Enable network activity</source>
+ <extracomment>A context menu item. The network activity was disabled previously.</extracomment>
+ <translation type="unfinished">Washa shughuli za mtandao</translation>
+ </message>
+ <message>
+ <source>Pre-syncing Headers (%1%)…</source>
+ <translation type="unfinished">Kusawazisha Vichwa vya awali (%1%)...</translation>
+ </message>
+ <message>
+ <source>Error creating wallet</source>
+ <translation type="unfinished">Hitilafu unapounda pochi</translation>
+ </message>
+ <message>
+ <source>Cannot create new wallet, the software was compiled without sqlite support (required for descriptor wallets)</source>
+ <translation type="unfinished">Haiwezi kuunda pochi mpya, programu iliundwa bila usaidizi wa sqlite (inahitajika kwa pochi za maelezo)</translation>
+ </message>
+ <message>
<source>Error: %1</source>
<translation type="unfinished">Kosa: %1</translation>
</message>
<message>
+ <source>Warning: %1</source>
+ <translation type="unfinished">Onyo: %1</translation>
+ </message>
+ <message>
+ <source>Date: %1
+</source>
+ <translation type="unfinished">Tarehe: %1</translation>
+ </message>
+ <message>
+ <source>Amount: %1
+</source>
+ <translation type="unfinished">Kiasi: %1
+</translation>
+ </message>
+ <message>
+ <source>Wallet: %1
+</source>
+ <translation type="unfinished">Pochi: %1
+</translation>
+ </message>
+ <message>
+ <source>Type: %1
+</source>
+ <translation type="unfinished">Aina: %1</translation>
+ </message>
+ <message>
<source>Label: %1
</source>
<translation type="unfinished">Chapa: %1
</translation>
</message>
+ <message>
+ <source>Address: %1
+</source>
+ <translation type="unfinished">Anwani: %1
+</translation>
+ </message>
+ <message>
+ <source>Sent transaction</source>
+ <translation type="unfinished">Umetuma muamala</translation>
+ </message>
+ <message>
+ <source>Incoming transaction</source>
+ <translation type="unfinished">Muamala unaoingia</translation>
+ </message>
+ <message>
+ <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source>
+ <translation type="unfinished">Uzalishaji wa ufunguo wa HD ni &lt;b&gt;kuwezeshwa &lt;/b&gt;</translation>
+ </message>
+ <message>
+ <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source>
+ <translation type="unfinished">Uzalishaji wa ufunguo wa HD ni &lt;b&gt;kutowezeshwa&lt;/b&gt;</translation>
+ </message>
+ <message>
+ <source>Private key &lt;b&gt;disabled&lt;/b&gt;</source>
+ <translation type="unfinished">Ufunguo wa kibinafsi &lt;b&gt; umezimwa &lt;/b&gt;</translation>
+ </message>
</context>
<context>
<name>CoinControlDialog</name>
@@ -592,6 +818,13 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</message>
</context>
<context>
+ <name>MigrateWalletActivity</name>
+ <message>
+ <source>Migrate Wallet</source>
+ <translation type="unfinished">Hamisha Pochi</translation>
+ </message>
+ </context>
+<context>
<name>OpenWalletActivity</name>
<message>
<source>Open Wallet</source>
@@ -602,6 +835,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<context>
<name>RestoreWalletActivity</name>
<message>
+ <source>Restore Wallet</source>
+ <extracomment>Title of progress window which is displayed when wallets are being restored.</extracomment>
+ <translation type="unfinished">Rejesha Pochi</translation>
+ </message>
+ <message>
<source>Restore wallet warning</source>
<extracomment>Title of message box which is displayed when the wallet is restored with some warning.</extracomment>
<translation type="unfinished">Rejesha onyo la pochi</translation>
@@ -617,6 +855,10 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<source>Closing the wallet for too long can result in having to resync the entire chain if pruning is enabled.</source>
<translation type="unfinished">Kufunga pochi kwa muda mrefu sana kunaweza kusababisha kusawazisha tena mnyororo mzima ikiwa upogoaji umewezeshwa.</translation>
</message>
+ <message>
+ <source>Close all wallets</source>
+ <translation type="unfinished">Funga pochi zote</translation>
+ </message>
</context>
<context>
<name>CreateWalletDialog</name>
@@ -730,6 +972,10 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<context>
<name>OptionsDialog</name>
<message>
+ <source>&amp;Window</source>
+ <translation type="unfinished">&amp;Dirisha</translation>
+ </message>
+ <message>
<source>Error</source>
<translation type="unfinished">Onyo</translation>
</message>
@@ -750,6 +996,13 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
</message>
</context>
<context>
+ <name>RPCConsole</name>
+ <message>
+ <source>Node window</source>
+ <translation type="unfinished">Dirisha la nodi</translation>
+ </message>
+ </context>
+<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&amp;Label:</source>
@@ -925,6 +1178,11 @@ Kutia sahihi kunawezekana tu kwa anwani za aina ya 'urithi'.</translation>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished">Toa data katika kichupo cha sasa hadi kwenye faili</translation>
</message>
+ <message>
+ <source>Wallet Data</source>
+ <extracomment>Name of the wallet data file format.</extracomment>
+ <translation type="unfinished">Data ya Pochi</translation>
+ </message>
</context>
<context>
<name>bitcoin-core</name>
diff --git a/src/qt/locale/bitcoin_th.ts b/src/qt/locale/bitcoin_th.ts
index 3706a7bd98..464e39cbac 100644
--- a/src/qt/locale/bitcoin_th.ts
+++ b/src/qt/locale/bitcoin_th.ts
@@ -1,5 +1,12 @@
<TS version="2.1" language="th">
<context>
+ <name>AskPassphraseDialog</name>
+ <message>
+ <source>Back</source>
+ <translation type="unfinished">ย้อนกลับ</translation>
+ </message>
+ </context>
+<context>
<name>QObject</name>
<message>
<source>%1 didn't yet exit safely…</source>
diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp
index 7580f6b47a..522ecfd801 100644
--- a/src/qt/modaloverlay.cpp
+++ b/src/qt/modaloverlay.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/modaloverlay.h>
#include <qt/forms/ui_modaloverlay.h>
diff --git a/src/qt/notificator.cpp b/src/qt/notificator.cpp
index 85bdeee49a..af97bb2143 100644
--- a/src/qt/notificator.cpp
+++ b/src/qt/notificator.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/notificator.h>
diff --git a/src/qt/notificator.h b/src/qt/notificator.h
index 8808716aa4..932cfc4651 100644
--- a/src/qt/notificator.h
+++ b/src/qt/notificator.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_QT_NOTIFICATOR_H
#define BITCOIN_QT_NOTIFICATOR_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <QIcon>
#include <QObject>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 4db2d6016c..b70769ed24 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/optionsdialog.h>
#include <qt/forms/ui_optionsdialog.h>
@@ -95,8 +95,7 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet)
ui->verticalLayout->setStretchFactor(ui->tabWidget, 1);
/* Main elements init */
- ui->databaseCache->setMinimum(nMinDbCache);
- ui->databaseCache->setMaximum(nMaxDbCache);
+ ui->databaseCache->setRange(nMinDbCache, std::numeric_limits<int>::max());
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
ui->pruneWarning->setVisible(false);
@@ -109,9 +108,6 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet)
#ifndef USE_UPNP
ui->mapPortUpnp->setEnabled(false);
#endif
-#ifndef USE_NATPMP
- ui->mapPortNatpmp->setEnabled(false);
-#endif
ui->proxyIp->setEnabled(false);
ui->proxyPort->setEnabled(false);
@@ -170,8 +166,15 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet)
/** check if the locale name consists of 2 parts (language_country) */
if(langStr.contains("_"))
{
- /** display language strings as "native language - native country (locale name)", e.g. "Deutsch - Deutschland (de)" */
- ui->lang->addItem(locale.nativeLanguageName() + QString(" - ") + locale.nativeCountryName() + QString(" (") + langStr + QString(")"), QVariant(langStr));
+ /** display language strings as "native language - native country/territory (locale name)", e.g. "Deutsch - Deutschland (de)" */
+ ui->lang->addItem(locale.nativeLanguageName() + QString(" - ") +
+#if (QT_VERSION >= QT_VERSION_CHECK(6, 2, 0))
+ locale.nativeTerritoryName() +
+#else
+ locale.nativeCountryName() +
+#endif
+ QString(" (") + langStr + QString(")"), QVariant(langStr));
+
}
else
{
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index 0c21c6748d..5bca5c5320 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/optionsmodel.h>
@@ -320,10 +320,15 @@ static ProxySetting ParseProxyString(const QString& proxy)
if (proxy.isEmpty()) {
return default_val;
}
- // contains IP at index 0 and port at index 1
- QStringList ip_port = GUIUtil::SplitSkipEmptyParts(proxy, ":");
- if (ip_port.size() == 2) {
- return {true, ip_port.at(0), ip_port.at(1)};
+ uint16_t port{0};
+ std::string hostname;
+ if (SplitHostPort(proxy.toStdString(), port, hostname) && port != 0) {
+ // Valid and port within the valid range
+ // Check if the hostname contains a colon, indicating an IPv6 address
+ if (hostname.find(':') != std::string::npos) {
+ hostname = "[" + hostname + "]"; // Wrap IPv6 address in brackets
+ }
+ return {true, QString::fromStdString(hostname), QString::number(port)};
} else { // Invalid: return default
return default_val;
}
@@ -414,11 +419,7 @@ QVariant OptionsModel::getOption(OptionID option, const std::string& suffix) con
return false;
#endif // USE_UPNP
case MapPortNatpmp:
-#ifdef USE_NATPMP
return SettingToBool(setting(), DEFAULT_NATPMP);
-#else
- return false;
-#endif // USE_NATPMP
case MinimizeOnClose:
return fMinimizeOnClose;
diff --git a/src/qt/qrimagewidget.cpp b/src/qt/qrimagewidget.cpp
index f6e712a047..e912dafa60 100644
--- a/src/qt/qrimagewidget.cpp
+++ b/src/qt/qrimagewidget.cpp
@@ -15,7 +15,7 @@
#include <QMouseEvent>
#include <QPainter>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#ifdef USE_QRCODE
#include <qrencode.h>
diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp
index b4322ddc0f..a5ee6583e0 100644
--- a/src/qt/receiverequestdialog.cpp
+++ b/src/qt/receiverequestdialog.cpp
@@ -14,7 +14,7 @@
#include <QDialog>
#include <QString>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
ReceiveRequestDialog::ReceiveRequestDialog(QWidget* parent)
: QDialog(parent, GUIUtil::dialog_flags),
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index ae3f9aa686..018c22a4a8 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/rpcconsole.h>
#include <qt/forms/ui_debugwindow.h>
@@ -16,7 +16,9 @@
#include <qt/guiutil.h>
#include <qt/peertablesortproxy.h>
#include <qt/platformstyle.h>
+#ifdef ENABLE_WALLET
#include <qt/walletmodel.h>
+#endif // ENABLE_WALLET
#include <rpc/client.h>
#include <rpc/server.h>
#include <util/strencodings.h>
diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h
index 4747e611d0..894ecb1fdf 100644
--- a/src/qt/rpcconsole.h
+++ b/src/qt/rpcconsole.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_QT_RPCCONSOLE_H
#define BITCOIN_QT_RPCCONSOLE_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/clientmodel.h>
#include <qt/guiutil.h>
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 03173ec80e..4019ca5e9d 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/sendcoinsdialog.h>
#include <qt/forms/ui_sendcoinsdialog.h>
diff --git a/src/qt/signverifymessagedialog.cpp b/src/qt/signverifymessagedialog.cpp
index 012186ee4d..0b1d3c6c3a 100644
--- a/src/qt/signverifymessagedialog.cpp
+++ b/src/qt/signverifymessagedialog.cpp
@@ -11,7 +11,7 @@
#include <qt/walletmodel.h>
#include <common/signmessage.h> // For MessageSign(), MessageVerify()
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <key_io.h>
#include <wallet/wallet.h>
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index ffd6689910..e194b5fd32 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/splashscreen.h>
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index f7d66f316e..3d5cb4a863 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -222,8 +222,8 @@ void AddressBookTests::addressBookTests()
// framework when it tries to look up unimplemented cocoa functions,
// and fails to handle returned nulls
// (https://bugreports.qt.io/browse/QTBUG-49686).
- QWARN("Skipping AddressBookTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
- "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.");
+ qWarning() << "Skipping AddressBookTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
+ "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.";
return;
}
#endif
diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp
index 10abcb00eb..73e04b55c8 100644
--- a/src/qt/test/apptests.cpp
+++ b/src/qt/test/apptests.cpp
@@ -60,8 +60,8 @@ void AppTests::appTests()
// framework when it tries to look up unimplemented cocoa functions,
// and fails to handle returned nulls
// (https://bugreports.qt.io/browse/QTBUG-49686).
- QWARN("Skipping AppTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
- "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.");
+ qWarning() << "Skipping AppTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
+ "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.";
return;
}
#endif
diff --git a/src/qt/test/optiontests.cpp b/src/qt/test/optiontests.cpp
index 0f82f65f3e..4f3eb778c5 100644
--- a/src/qt/test/optiontests.cpp
+++ b/src/qt/test/optiontests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <common/args.h>
#include <init.h>
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index 72e8055425..0797d31a71 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -127,6 +127,11 @@ void RPCNestedTests::rpcNestedTests()
RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest( abc , cba )");
QVERIFY(result == "[\"abc\",\"cba\"]");
+// Handle deprecated macro, can be removed once minimum Qt is at least 6.3.0.
+#if (QT_VERSION >= QT_VERSION_CHECK(6, 3, 0))
+#undef QVERIFY_EXCEPTION_THROWN
+#define QVERIFY_EXCEPTION_THROWN(expression, exceptiontype) QVERIFY_THROWS_EXCEPTION(exceptiontype, expression)
+#endif
QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo() .\n"), std::runtime_error); //invalid syntax
QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); //invalid syntax
RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo("); //tolerate non closing brackets if we have no arguments
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index c150606cfb..172c06f4ea 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <interfaces/init.h>
#include <interfaces/node.h>
@@ -28,22 +28,6 @@
#include <functional>
-#if defined(QT_STATIC)
-#include <QtPlugin>
-#if defined(QT_QPA_PLATFORM_MINIMAL)
-Q_IMPORT_PLUGIN(QMinimalIntegrationPlugin);
-#endif
-#if defined(QT_QPA_PLATFORM_XCB)
-Q_IMPORT_PLUGIN(QXcbIntegrationPlugin);
-#elif defined(QT_QPA_PLATFORM_WINDOWS)
-Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin);
-#elif defined(QT_QPA_PLATFORM_COCOA)
-Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin);
-#elif defined(QT_QPA_PLATFORM_ANDROID)
-Q_IMPORT_PLUGIN(QAndroidPlatformIntegrationPlugin)
-#endif
-#endif
-
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS{};
@@ -74,7 +58,7 @@ int main(int argc, char* argv[])
gArgs.ForceSetArg("-natpmp", "0");
std::string error;
- if (!gArgs.ReadConfigFiles(error, true)) QWARN(error.c_str());
+ if (!gArgs.ReadConfigFiles(error, true)) qWarning() << error.c_str();
// Prefer the "minimal" platform for the test instead of the normal default
// platform ("xcb", "windows", or "cocoa") so tests can't unintentionally
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 6a573d284c..98dfe12f08 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -475,8 +475,8 @@ void WalletTests::walletTests()
// framework when it tries to look up unimplemented cocoa functions,
// and fails to handle returned nulls
// (https://bugreports.qt.io/browse/QTBUG-49686).
- QWARN("Skipping WalletTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
- "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.");
+ qWarning() << "Skipping WalletTests on mac build with 'minimal' platform set due to Qt bugs. To run AppTests, invoke "
+ "with 'QT_QPA_PLATFORM=cocoa test_bitcoin-qt' on mac, or else use a linux or windows build.";
return;
}
#endif
diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp
index f261c6409d..29b7f5c401 100644
--- a/src/qt/utilitydialog.cpp
+++ b/src/qt/utilitydialog.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <qt/utilitydialog.h>
diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp
index 512ea8a1dc..dd093e984a 100644
--- a/src/qt/walletcontroller.cpp
+++ b/src/qt/walletcontroller.cpp
@@ -79,6 +79,14 @@ std::map<std::string, std::pair<bool, std::string>> WalletController::listWallet
return wallets;
}
+void WalletController::removeWallet(WalletModel* wallet_model)
+{
+ // Once the wallet is successfully removed from the node, the model will emit the 'WalletModel::unload' signal.
+ // This signal is already connected and will complete the removal of the view from the GUI.
+ // Look at 'WalletController::getOrCreateWallet' for the signal connection.
+ wallet_model->wallet().remove();
+}
+
void WalletController::closeWallet(WalletModel* wallet_model, QWidget* parent)
{
QMessageBox box(parent);
@@ -89,10 +97,7 @@ void WalletController::closeWallet(WalletModel* wallet_model, QWidget* parent)
box.setDefaultButton(QMessageBox::Yes);
if (box.exec() != QMessageBox::Yes) return;
- // First remove wallet from node.
- wallet_model->wallet().remove();
- // Now release the model.
- removeAndDeleteWallet(wallet_model);
+ removeWallet(wallet_model);
}
void WalletController::closeAllWallets(QWidget* parent)
@@ -105,11 +110,8 @@ void WalletController::closeAllWallets(QWidget* parent)
QMutexLocker locker(&m_mutex);
for (WalletModel* wallet_model : m_wallets) {
- wallet_model->wallet().remove();
- Q_EMIT walletRemoved(wallet_model);
- delete wallet_model;
+ removeWallet(wallet_model);
}
- m_wallets.clear();
}
WalletModel* WalletController::getOrCreateWallet(std::unique_ptr<interfaces::Wallet> wallet)
diff --git a/src/qt/walletcontroller.h b/src/qt/walletcontroller.h
index 7902c3b235..4d2ba43539 100644
--- a/src/qt/walletcontroller.h
+++ b/src/qt/walletcontroller.h
@@ -85,6 +85,9 @@ private:
friend class WalletControllerActivity;
friend class MigrateWalletActivity;
+
+ //! Starts the wallet closure procedure
+ void removeWallet(WalletModel* wallet_model);
};
class WalletControllerActivity : public QObject
diff --git a/src/qt/winshutdownmonitor.cpp b/src/qt/winshutdownmonitor.cpp
index 0b5278c192..9ccd7028da 100644
--- a/src/qt/winshutdownmonitor.cpp
+++ b/src/qt/winshutdownmonitor.cpp
@@ -12,7 +12,11 @@
// If we don't want a message to be processed by Qt, return true and set result to
// the value that the window procedure should return. Otherwise return false.
+#if (QT_VERSION >= QT_VERSION_CHECK(6, 0, 0))
+bool WinShutdownMonitor::nativeEventFilter(const QByteArray &eventType, void *pMessage, qintptr *pnResult)
+#else
bool WinShutdownMonitor::nativeEventFilter(const QByteArray &eventType, void *pMessage, long *pnResult)
+#endif
{
Q_UNUSED(eventType);
diff --git a/src/qt/winshutdownmonitor.h b/src/qt/winshutdownmonitor.h
index 060d8546e3..a8b626065d 100644
--- a/src/qt/winshutdownmonitor.h
+++ b/src/qt/winshutdownmonitor.h
@@ -20,7 +20,11 @@ public:
WinShutdownMonitor(std::function<void()> shutdown_fn) : m_shutdown_fn{std::move(shutdown_fn)} {}
/** Implements QAbstractNativeEventFilter interface for processing Windows messages */
+#if (QT_VERSION >= QT_VERSION_CHECK(6, 0, 0))
+ bool nativeEventFilter(const QByteArray &eventType, void *pMessage, qintptr *pnResult) override;
+#else
bool nativeEventFilter(const QByteArray &eventType, void *pMessage, long *pnResult) override;
+#endif
/** Register the reason for blocking shutdown on Windows to allow clean client exit */
static void registerShutdownBlockReason(const QString& strReason, const HWND& mainWinId);
diff --git a/src/random.cpp b/src/random.cpp
index c89ee9f38b..163112585a 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <random.h>
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 4754b597c5..dee48481c5 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <randomenv.h>
diff --git a/src/rest.cpp b/src/rest.cpp
index c42bc8e40c..ca26c699b5 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <rest.h>
@@ -309,8 +309,11 @@ static bool rest_block(const std::any& context,
if (!pblockindex) {
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- if (chainman.m_blockman.IsBlockPruned(*pblockindex)) {
- return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
+ if (!(pblockindex->nStatus & BLOCK_HAVE_DATA)) {
+ if (chainman.m_blockman.IsBlockPruned(*pblockindex)) {
+ return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
+ }
+ return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (not fully downloaded)");
}
pos = pblockindex->GetBlockPos();
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 633ce9f2bc..360f24ec55 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -22,6 +22,7 @@
#include <hash.h>
#include <index/blockfilterindex.h>
#include <index/coinstatsindex.h>
+#include <interfaces/mining.h>
#include <kernel/coinstats.h>
#include <logging/timer.h>
#include <net.h>
@@ -56,24 +57,32 @@
#include <condition_variable>
#include <memory>
#include <mutex>
+#include <optional>
using kernel::CCoinsStats;
using kernel::CoinStatsHashType;
+using interfaces::Mining;
using node::BlockManager;
using node::NodeContext;
using node::SnapshotMetadata;
using util::MakeUnorderedList;
-struct CUpdatedBlock
-{
- uint256 hash;
- int height;
-};
+std::tuple<std::unique_ptr<CCoinsViewCursor>, CCoinsStats, const CBlockIndex*>
+PrepareUTXOSnapshot(
+ Chainstate& chainstate,
+ const std::function<void()>& interruption_point = {})
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
-static GlobalMutex cs_blockchange;
-static std::condition_variable cond_blockchange;
-static CUpdatedBlock latestblock GUARDED_BY(cs_blockchange);
+UniValue WriteUTXOSnapshot(
+ Chainstate& chainstate,
+ CCoinsViewCursor* pcursor,
+ CCoinsStats* maybe_stats,
+ const CBlockIndex* tip,
+ AutoFile& afile,
+ const fs::path& path,
+ const fs::path& temppath,
+ const std::function<void()>& interruption_point = {});
/* Calculate the difficulty for a given block index.
*/
@@ -184,8 +193,10 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn
case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
CBlockUndo blockUndo;
const bool is_not_pruned{WITH_LOCK(::cs_main, return !blockman.IsBlockPruned(blockindex))};
- const bool have_undo{is_not_pruned && blockman.UndoReadFromDisk(blockUndo, blockindex)};
-
+ bool have_undo{is_not_pruned && WITH_LOCK(::cs_main, return blockindex.nStatus & BLOCK_HAVE_UNDO)};
+ if (have_undo && !blockman.UndoReadFromDisk(blockUndo, blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
for (size_t i = 0; i < block.vtx.size(); ++i) {
const CTransactionRef& tx = block.vtx.at(i);
// coinbase transaction (i.e. i == 0) doesn't have undo data
@@ -243,21 +254,12 @@ static RPCHelpMan getbestblockhash()
};
}
-void RPCNotifyBlockChange(const CBlockIndex* pindex)
-{
- if(pindex) {
- LOCK(cs_blockchange);
- latestblock.hash = pindex->GetBlockHash();
- latestblock.height = pindex->nHeight;
- }
- cond_blockchange.notify_all();
-}
-
static RPCHelpMan waitfornewblock()
{
return RPCHelpMan{"waitfornewblock",
- "\nWaits for a specific new block and returns useful info about it.\n"
- "\nReturns the current block on timeout or exit.\n",
+ "\nWaits for any new block and returns useful info about it.\n"
+ "\nReturns the current block on timeout or exit.\n"
+ "\nMake sure to use no RPC timeout (bitcoin-cli -rpcclienttimeout=0)",
{
{"timeout", RPCArg::Type::NUM, RPCArg::Default{0}, "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
@@ -276,17 +278,16 @@ static RPCHelpMan waitfornewblock()
int timeout = 0;
if (!request.params[0].isNull())
timeout = request.params[0].getInt<int>();
+ if (timeout < 0) throw JSONRPCError(RPC_MISC_ERROR, "Negative timeout");
- CUpdatedBlock block;
- {
- WAIT_LOCK(cs_blockchange, lock);
- block = latestblock;
- if(timeout)
- cond_blockchange.wait_for(lock, std::chrono::milliseconds(timeout), [&block]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); });
- else
- cond_blockchange.wait(lock, [&block]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); });
- block = latestblock;
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ Mining& miner = EnsureMining(node);
+
+ auto block{CHECK_NONFATAL(miner.getTip()).value()};
+ if (IsRPCRunning()) {
+ block = timeout ? miner.waitTipChanged(block.hash, std::chrono::milliseconds(timeout)) : miner.waitTipChanged(block.hash);
}
+
UniValue ret(UniValue::VOBJ);
ret.pushKV("hash", block.hash.GetHex());
ret.pushKV("height", block.height);
@@ -299,7 +300,8 @@ static RPCHelpMan waitforblock()
{
return RPCHelpMan{"waitforblock",
"\nWaits for a specific new block and returns useful info about it.\n"
- "\nReturns the current block on timeout or exit.\n",
+ "\nReturns the current block on timeout or exit.\n"
+ "\nMake sure to use no RPC timeout (bitcoin-cli -rpcclienttimeout=0)",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Block hash to wait for."},
{"timeout", RPCArg::Type::NUM, RPCArg::Default{0}, "Time in milliseconds to wait for a response. 0 indicates no timeout."},
@@ -322,15 +324,22 @@ static RPCHelpMan waitforblock()
if (!request.params[1].isNull())
timeout = request.params[1].getInt<int>();
+ if (timeout < 0) throw JSONRPCError(RPC_MISC_ERROR, "Negative timeout");
- CUpdatedBlock block;
- {
- WAIT_LOCK(cs_blockchange, lock);
- if(timeout)
- cond_blockchange.wait_for(lock, std::chrono::milliseconds(timeout), [&hash]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.hash == hash || !IsRPCRunning();});
- else
- cond_blockchange.wait(lock, [&hash]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.hash == hash || !IsRPCRunning(); });
- block = latestblock;
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ Mining& miner = EnsureMining(node);
+
+ auto block{CHECK_NONFATAL(miner.getTip()).value()};
+ const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout};
+ while (IsRPCRunning() && block.hash != hash) {
+ if (timeout) {
+ auto now{std::chrono::steady_clock::now()};
+ if (now >= deadline) break;
+ const MillisecondsDouble remaining{deadline - now};
+ block = miner.waitTipChanged(block.hash, remaining);
+ } else {
+ block = miner.waitTipChanged(block.hash);
+ }
}
UniValue ret(UniValue::VOBJ);
@@ -346,7 +355,8 @@ static RPCHelpMan waitforblockheight()
return RPCHelpMan{"waitforblockheight",
"\nWaits for (at least) block height and returns the height and hash\n"
"of the current tip.\n"
- "\nReturns the current block on timeout or exit.\n",
+ "\nReturns the current block on timeout or exit.\n"
+ "\nMake sure to use no RPC timeout (bitcoin-cli -rpcclienttimeout=0)",
{
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO, "Block height to wait for."},
{"timeout", RPCArg::Type::NUM, RPCArg::Default{0}, "Time in milliseconds to wait for a response. 0 indicates no timeout."},
@@ -369,16 +379,25 @@ static RPCHelpMan waitforblockheight()
if (!request.params[1].isNull())
timeout = request.params[1].getInt<int>();
+ if (timeout < 0) throw JSONRPCError(RPC_MISC_ERROR, "Negative timeout");
- CUpdatedBlock block;
- {
- WAIT_LOCK(cs_blockchange, lock);
- if(timeout)
- cond_blockchange.wait_for(lock, std::chrono::milliseconds(timeout), [&height]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.height >= height || !IsRPCRunning();});
- else
- cond_blockchange.wait(lock, [&height]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {return latestblock.height >= height || !IsRPCRunning(); });
- block = latestblock;
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ Mining& miner = EnsureMining(node);
+
+ auto block{CHECK_NONFATAL(miner.getTip()).value()};
+ const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout};
+
+ while (IsRPCRunning() && block.height < height) {
+ if (timeout) {
+ auto now{std::chrono::steady_clock::now()};
+ if (now >= deadline) break;
+ const MillisecondsDouble remaining{deadline - now};
+ block = miner.waitTipChanged(block.hash, remaining);
+ } else {
+ block = miner.waitTipChanged(block.hash);
+ }
}
+
UniValue ret(UniValue::VOBJ);
ret.pushKV("hash", block.hash.GetHex());
ret.pushKV("height", block.height);
@@ -580,20 +599,32 @@ static RPCHelpMan getblockheader()
};
}
+void CheckBlockDataAvailability(BlockManager& blockman, const CBlockIndex& blockindex, bool check_for_undo)
+{
+ AssertLockHeld(cs_main);
+ uint32_t flag = check_for_undo ? BLOCK_HAVE_UNDO : BLOCK_HAVE_DATA;
+ if (!(blockindex.nStatus & flag)) {
+ if (blockman.IsBlockPruned(blockindex)) {
+ throw JSONRPCError(RPC_MISC_ERROR, strprintf("%s not available (pruned data)", check_for_undo ? "Undo data" : "Block"));
+ }
+ if (check_for_undo) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available");
+ }
+ throw JSONRPCError(RPC_MISC_ERROR, "Block not available (not fully downloaded)");
+ }
+}
+
static CBlock GetBlockChecked(BlockManager& blockman, const CBlockIndex& blockindex)
{
CBlock block;
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false);
}
if (!blockman.ReadBlockFromDisk(block, blockindex)) {
- // Block not found on disk. This could be because we have the block
- // header in our index but not yet have the block or did not accept the
- // block. Or if the block was pruned right after we released the lock above.
+ // Block not found on disk. This shouldn't normally happen unless the block was
+ // pruned right after we released the lock above.
throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
}
@@ -606,16 +637,13 @@ static std::vector<uint8_t> GetRawBlockChecked(BlockManager& blockman, const CBl
FlatFilePos pos{};
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false);
pos = blockindex.GetBlockPos();
}
if (!blockman.ReadRawBlockFromDisk(data, pos)) {
- // Block not found on disk. This could be because we have the block
- // header in our index but not yet have the block or did not accept the
- // block. Or if the block was pruned right after we released the lock above.
+ // Block not found on disk. This shouldn't normally happen unless the block was
+ // pruned right after we released the lock above.
throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
}
@@ -631,9 +659,7 @@ static CBlockUndo GetUndoChecked(BlockManager& blockman, const CBlockIndex& bloc
{
LOCK(cs_main);
- if (blockman.IsBlockPruned(blockindex)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available (pruned data)");
- }
+ CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/true);
}
if (!blockman.UndoReadFromDisk(blockUndo, blockindex)) {
@@ -740,14 +766,7 @@ static RPCHelpMan getblock()
{
uint256 hash(ParseHashV(request.params[0], "blockhash"));
- int verbosity = 1;
- if (!request.params[1].isNull()) {
- if (request.params[1].isBool()) {
- verbosity = request.params[1].get_bool() ? 1 : 0;
- } else {
- verbosity = request.params[1].getInt<int>();
- }
- }
+ int verbosity{ParseVerbosity(request.params[1], /*default_verbosity=*/1)};
const CBlockIndex* pblockindex;
const CBlockIndex* tip;
@@ -1577,6 +1596,27 @@ static RPCHelpMan preciousblock()
};
}
+void InvalidateBlock(ChainstateManager& chainman, const uint256 block_hash) {
+ BlockValidationState state;
+ CBlockIndex* pblockindex;
+ {
+ LOCK(chainman.GetMutex());
+ pblockindex = chainman.m_blockman.LookupBlockIndex(block_hash);
+ if (!pblockindex) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
+ }
+ }
+ chainman.ActiveChainstate().InvalidateBlock(state, pblockindex);
+
+ if (state.IsValid()) {
+ chainman.ActiveChainstate().ActivateBestChain(state);
+ }
+
+ if (!state.IsValid()) {
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
+ }
+}
+
static RPCHelpMan invalidateblock()
{
return RPCHelpMan{"invalidateblock",
@@ -1591,31 +1631,33 @@ static RPCHelpMan invalidateblock()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
+ ChainstateManager& chainman = EnsureAnyChainman(request.context);
uint256 hash(ParseHashV(request.params[0], "blockhash"));
- BlockValidationState state;
- ChainstateManager& chainman = EnsureAnyChainman(request.context);
- CBlockIndex* pblockindex;
+ InvalidateBlock(chainman, hash);
+
+ return UniValue::VNULL;
+},
+ };
+}
+
+void ReconsiderBlock(ChainstateManager& chainman, uint256 block_hash) {
{
- LOCK(cs_main);
- pblockindex = chainman.m_blockman.LookupBlockIndex(hash);
+ LOCK(chainman.GetMutex());
+ CBlockIndex* pblockindex = chainman.m_blockman.LookupBlockIndex(block_hash);
if (!pblockindex) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
}
- }
- chainman.ActiveChainstate().InvalidateBlock(state, pblockindex);
- if (state.IsValid()) {
- chainman.ActiveChainstate().ActivateBestChain(state);
+ chainman.ActiveChainstate().ResetBlockFailureFlags(pblockindex);
}
+ BlockValidationState state;
+ chainman.ActiveChainstate().ActivateBestChain(state);
+
if (!state.IsValid()) {
throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
-
- return UniValue::VNULL;
-},
- };
}
static RPCHelpMan reconsiderblock()
@@ -1636,22 +1678,7 @@ static RPCHelpMan reconsiderblock()
ChainstateManager& chainman = EnsureAnyChainman(request.context);
uint256 hash(ParseHashV(request.params[0], "blockhash"));
- {
- LOCK(cs_main);
- CBlockIndex* pblockindex = chainman.m_blockman.LookupBlockIndex(hash);
- if (!pblockindex) {
- throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
- }
-
- chainman.ActiveChainstate().ResetBlockFailureFlags(pblockindex);
- }
-
- BlockValidationState state;
- chainman.ActiveChainstate().ActivateBestChain(state);
-
- if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
- }
+ ReconsiderBlock(chainman, hash);
return UniValue::VNULL;
},
@@ -2641,6 +2668,42 @@ static RPCHelpMan getblockfilter()
}
/**
+ * RAII class that disables the network in its constructor and enables it in its
+ * destructor.
+ */
+class NetworkDisable
+{
+ CConnman& m_connman;
+public:
+ NetworkDisable(CConnman& connman) : m_connman(connman) {
+ m_connman.SetNetworkActive(false);
+ if (m_connman.GetNetworkActive()) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Network activity could not be suspended.");
+ }
+ };
+ ~NetworkDisable() {
+ m_connman.SetNetworkActive(true);
+ };
+};
+
+/**
+ * RAII class that temporarily rolls back the local chain in it's constructor
+ * and rolls it forward again in it's destructor.
+ */
+class TemporaryRollback
+{
+ ChainstateManager& m_chainman;
+ const CBlockIndex& m_invalidate_index;
+public:
+ TemporaryRollback(ChainstateManager& chainman, const CBlockIndex& index) : m_chainman(chainman), m_invalidate_index(index) {
+ InvalidateBlock(m_chainman, m_invalidate_index.GetBlockHash());
+ };
+ ~TemporaryRollback() {
+ ReconsiderBlock(m_chainman, m_invalidate_index.GetBlockHash());
+ };
+};
+
+/**
* Serialize the UTXO set to a file for loading elsewhere.
*
* @see SnapshotMetadata
@@ -2649,9 +2712,20 @@ static RPCHelpMan dumptxoutset()
{
return RPCHelpMan{
"dumptxoutset",
- "Write the serialized UTXO set to a file.",
+ "Write the serialized UTXO set to a file. This can be used in loadtxoutset afterwards if this snapshot height is supported in the chainparams as well.\n\n"
+ "Unless the the \"latest\" type is requested, the node will roll back to the requested height and network activity will be suspended during this process. "
+ "Because of this it is discouraged to interact with the node in any other way during the execution of this call to avoid inconsistent results and race conditions, particularly RPCs that interact with blockstorage.\n\n"
+ "This call may take several minutes. Make sure to use no RPC timeout (bitcoin-cli -rpcclienttimeout=0)",
{
{"path", RPCArg::Type::STR, RPCArg::Optional::NO, "Path to the output file. If relative, will be prefixed by datadir."},
+ {"type", RPCArg::Type::STR, RPCArg::Default(""), "The type of snapshot to create. Can be \"latest\" to create a snapshot of the current UTXO set or \"rollback\" to temporarily roll back the state of the node to a historical block before creating the snapshot of a historical UTXO set. This parameter can be omitted if a separate \"rollback\" named parameter is specified indicating the height or hash of a specific historical block. If \"rollback\" is specified and separate \"rollback\" named parameter is not specified, this will roll back to the latest valid snapshot block that can currently be loaded with loadtxoutset."},
+ {"options", RPCArg::Type::OBJ_NAMED_PARAMS, RPCArg::Optional::OMITTED, "",
+ {
+ {"rollback", RPCArg::Type::NUM, RPCArg::Optional::OMITTED,
+ "Height or hash of the block to roll back to before creating the snapshot. Note: The further this number is from the tip, the longer this process will take. Consider setting a higher -rpcclienttimeout value in this case.",
+ RPCArgOptions{.skip_type_check = true, .type_str = {"", "string or numeric"}}},
+ },
+ },
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -2665,10 +2739,33 @@ static RPCHelpMan dumptxoutset()
}
},
RPCExamples{
- HelpExampleCli("dumptxoutset", "utxo.dat")
+ HelpExampleCli("-rpcclienttimeout=0 dumptxoutset", "utxo.dat latest") +
+ HelpExampleCli("-rpcclienttimeout=0 dumptxoutset", "utxo.dat rollback") +
+ HelpExampleCli("-rpcclienttimeout=0 -named dumptxoutset", R"(utxo.dat rollback=853456)")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ const CBlockIndex* tip{WITH_LOCK(::cs_main, return node.chainman->ActiveChain().Tip())};
+ const CBlockIndex* target_index{nullptr};
+ const std::string snapshot_type{self.Arg<std::string>("type")};
+ const UniValue options{request.params[2].isNull() ? UniValue::VOBJ : request.params[2]};
+ if (options.exists("rollback")) {
+ if (!snapshot_type.empty() && snapshot_type != "rollback") {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid snapshot type \"%s\" specified with rollback option", snapshot_type));
+ }
+ target_index = ParseHashOrHeight(options["rollback"], *node.chainman);
+ } else if (snapshot_type == "rollback") {
+ auto snapshot_heights = node.chainman->GetParams().GetAvailableSnapshotHeights();
+ CHECK_NONFATAL(snapshot_heights.size() > 0);
+ auto max_height = std::max_element(snapshot_heights.begin(), snapshot_heights.end());
+ target_index = ParseHashOrHeight(*max_height, *node.chainman);
+ } else if (snapshot_type == "latest") {
+ target_index = tip;
+ } else {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid snapshot type \"%s\" specified. Please specify \"rollback\" or \"latest\"", snapshot_type));
+ }
+
const ArgsManager& args{EnsureAnyArgsman(request.context)};
const fs::path path = fsbridge::AbsPathJoin(args.GetDataDirNet(), fs::u8path(request.params[0].get_str()));
// Write to a temporary path and then move into `path` on completion
@@ -2690,9 +2787,68 @@ static RPCHelpMan dumptxoutset()
"Couldn't open file " + temppath.utf8string() + " for writing.");
}
- NodeContext& node = EnsureAnyNodeContext(request.context);
- UniValue result = CreateUTXOSnapshot(
- node, node.chainman->ActiveChainstate(), afile, path, temppath);
+ CConnman& connman = EnsureConnman(node);
+ const CBlockIndex* invalidate_index{nullptr};
+ std::optional<NetworkDisable> disable_network;
+ std::optional<TemporaryRollback> temporary_rollback;
+
+ // If the user wants to dump the txoutset of the current tip, we don't have
+ // to roll back at all
+ if (target_index != tip) {
+ // If the node is running in pruned mode we ensure all necessary block
+ // data is available before starting to roll back.
+ if (node.chainman->m_blockman.IsPruneMode()) {
+ LOCK(node.chainman->GetMutex());
+ const CBlockIndex* current_tip{node.chainman->ActiveChain().Tip()};
+ const CBlockIndex* first_block{node.chainman->m_blockman.GetFirstBlock(*current_tip, /*status_mask=*/BLOCK_HAVE_MASK)};
+ if (first_block->nHeight > target_index->nHeight) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Could not roll back to requested height since necessary block data is already pruned.");
+ }
+ }
+
+ // Suspend network activity for the duration of the process when we are
+ // rolling back the chain to get a utxo set from a past height. We do
+ // this so we don't punish peers that send us that send us data that
+ // seems wrong in this temporary state. For example a normal new block
+ // would be classified as a block connecting an invalid block.
+ // Skip if the network is already disabled because this
+ // automatically re-enables the network activity at the end of the
+ // process which may not be what the user wants.
+ if (connman.GetNetworkActive()) {
+ disable_network.emplace(connman);
+ }
+
+ invalidate_index = WITH_LOCK(::cs_main, return node.chainman->ActiveChain().Next(target_index));
+ temporary_rollback.emplace(*node.chainman, *invalidate_index);
+ }
+
+ Chainstate* chainstate;
+ std::unique_ptr<CCoinsViewCursor> cursor;
+ CCoinsStats stats;
+ {
+ // Lock the chainstate before calling PrepareUtxoSnapshot, to be able
+ // to get a UTXO database cursor while the chain is pointing at the
+ // target block. After that, release the lock while calling
+ // WriteUTXOSnapshot. The cursor will remain valid and be used by
+ // WriteUTXOSnapshot to write a consistent snapshot even if the
+ // chainstate changes.
+ LOCK(node.chainman->GetMutex());
+ chainstate = &node.chainman->ActiveChainstate();
+ // In case there is any issue with a block being read from disk we need
+ // to stop here, otherwise the dump could still be created for the wrong
+ // height.
+ // The new tip could also not be the target block if we have a stale
+ // sister block of invalidate_index. This block (or a descendant) would
+ // be activated as the new tip and we would not get to new_tip_index.
+ if (target_index != chainstate->m_chain.Tip()) {
+ LogWarning("dumptxoutset failed to roll back to requested height, reverting to tip.\n");
+ throw JSONRPCError(RPC_MISC_ERROR, "Could not roll back to requested height.");
+ } else {
+ std::tie(cursor, stats, tip) = PrepareUTXOSnapshot(*chainstate, node.rpc_interruption_point);
+ }
+ }
+
+ UniValue result = WriteUTXOSnapshot(*chainstate, cursor.get(), &stats, tip, afile, path, temppath, node.rpc_interruption_point);
fs::rename(temppath, path);
result.pushKV("path", path.utf8string());
@@ -2701,12 +2857,10 @@ static RPCHelpMan dumptxoutset()
};
}
-UniValue CreateUTXOSnapshot(
- NodeContext& node,
+std::tuple<std::unique_ptr<CCoinsViewCursor>, CCoinsStats, const CBlockIndex*>
+PrepareUTXOSnapshot(
Chainstate& chainstate,
- AutoFile& afile,
- const fs::path& path,
- const fs::path& temppath)
+ const std::function<void()>& interruption_point)
{
std::unique_ptr<CCoinsViewCursor> pcursor;
std::optional<CCoinsStats> maybe_stats;
@@ -2716,7 +2870,7 @@ UniValue CreateUTXOSnapshot(
// We need to lock cs_main to ensure that the coinsdb isn't written to
// between (i) flushing coins cache to disk (coinsdb), (ii) getting stats
// based upon the coinsdb, and (iii) constructing a cursor to the
- // coinsdb for use below this block.
+ // coinsdb for use in WriteUTXOSnapshot.
//
// Cursors returned by leveldb iterate over snapshots, so the contents
// of the pcursor will not be affected by simultaneous writes during
@@ -2725,11 +2879,11 @@ UniValue CreateUTXOSnapshot(
// See discussion here:
// https://github.com/bitcoin/bitcoin/pull/15606#discussion_r274479369
//
- LOCK(::cs_main);
+ AssertLockHeld(::cs_main);
chainstate.ForceFlushStateToDisk();
- maybe_stats = GetUTXOStats(&chainstate.CoinsDB(), chainstate.m_blockman, CoinStatsHashType::HASH_SERIALIZED, node.rpc_interruption_point);
+ maybe_stats = GetUTXOStats(&chainstate.CoinsDB(), chainstate.m_blockman, CoinStatsHashType::HASH_SERIALIZED, interruption_point);
if (!maybe_stats) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set");
}
@@ -2738,6 +2892,19 @@ UniValue CreateUTXOSnapshot(
tip = CHECK_NONFATAL(chainstate.m_blockman.LookupBlockIndex(maybe_stats->hashBlock));
}
+ return {std::move(pcursor), *CHECK_NONFATAL(maybe_stats), tip};
+}
+
+UniValue WriteUTXOSnapshot(
+ Chainstate& chainstate,
+ CCoinsViewCursor* pcursor,
+ CCoinsStats* maybe_stats,
+ const CBlockIndex* tip,
+ AutoFile& afile,
+ const fs::path& path,
+ const fs::path& temppath,
+ const std::function<void()>& interruption_point)
+{
LOG_TIME_SECONDS(strprintf("writing UTXO snapshot at height %s (%s) to file %s (via %s)",
tip->nHeight, tip->GetBlockHash().ToString(),
fs::PathToString(path), fs::PathToString(temppath)));
@@ -2773,7 +2940,7 @@ UniValue CreateUTXOSnapshot(
pcursor->GetKey(key);
last_hash = key.hash;
while (pcursor->Valid()) {
- if (iter % 5000 == 0) node.rpc_interruption_point();
+ if (iter % 5000 == 0) interruption_point();
++iter;
if (pcursor->GetKey(key) && pcursor->GetValue(coin)) {
if (key.hash != last_hash) {
@@ -2804,6 +2971,17 @@ UniValue CreateUTXOSnapshot(
return result;
}
+UniValue CreateUTXOSnapshot(
+ node::NodeContext& node,
+ Chainstate& chainstate,
+ AutoFile& afile,
+ const fs::path& path,
+ const fs::path& tmppath)
+{
+ auto [cursor, stats, tip]{WITH_LOCK(::cs_main, return PrepareUTXOSnapshot(chainstate, node.rpc_interruption_point))};
+ return WriteUTXOSnapshot(chainstate, cursor.get(), &stats, tip, afile, path, tmppath, node.rpc_interruption_point);
+}
+
static RPCHelpMan loadtxoutset()
{
return RPCHelpMan{
@@ -2838,7 +3016,7 @@ static RPCHelpMan loadtxoutset()
}
},
RPCExamples{
- HelpExampleCli("loadtxoutset", "utxo.dat")
+ HelpExampleCli("-rpcclienttimeout=0 loadtxoutset", "utxo.dat")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
@@ -2866,6 +3044,13 @@ static RPCHelpMan loadtxoutset()
throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to load UTXO snapshot: %s. (%s)", util::ErrorString(activation_result).original, path.utf8string()));
}
+ // Because we can't provide historical blocks during tip or background sync.
+ // Update local services to reflect we are a limited peer until we are fully sync.
+ node.connman->RemoveLocalServices(NODE_NETWORK);
+ // Setting the limited state is usually redundant because the node can always
+ // provide the last 288 blocks, but it doesn't hurt to set it.
+ node.connman->AddLocalServices(NODE_NETWORK_LIMITED);
+
CBlockIndex& snapshot_index{*CHECK_NONFATAL(*activation_result)};
UniValue result(UniValue::VOBJ);
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index f6a7fe236c..89b9921d55 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -35,9 +35,6 @@ static constexpr int NUM_GETBLOCKSTATS_PERCENTILES = 5;
*/
double GetDifficulty(const CBlockIndex& blockindex);
-/** Callback for when block tip changed. */
-void RPCNotifyBlockChange(const CBlockIndex*);
-
/** Block description to JSON */
UniValue blockToJSON(node::BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity) LOCKS_EXCLUDED(cs_main);
@@ -48,7 +45,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex
void CalculatePercentilesByWeight(CAmount result[NUM_GETBLOCKSTATS_PERCENTILES], std::vector<std::pair<CAmount, int64_t>>& scores, int64_t total_weight);
/**
- * Helper to create UTXO snapshots given a chainstate and a file handle.
+ * Test-only helper to create UTXO snapshots given a chainstate and a file handle.
* @return a UniValue map containing metadata about the snapshot.
*/
UniValue CreateUTXOSnapshot(
@@ -60,5 +57,6 @@ UniValue CreateUTXOSnapshot(
//! Return height of highest block that has been pruned, or std::nullopt if no blocks have been pruned
std::optional<int> GetPruneHeight(const node::BlockManager& blockman, const CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+void CheckBlockDataAvailability(node::BlockManager& blockman, const CBlockIndex& blockindex, bool check_for_undo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
#endif // BITCOIN_RPC_BLOCKCHAIN_H
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index b866fa484b..601e4fa7bf 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -187,6 +187,8 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "gettxoutproof", 0, "txids" },
{ "gettxoutsetinfo", 1, "hash_or_height" },
{ "gettxoutsetinfo", 2, "use_index"},
+ { "dumptxoutset", 2, "options" },
+ { "dumptxoutset", 2, "rollback" },
{ "lockunspent", 0, "unlock" },
{ "lockunspent", 1, "transactions" },
{ "lockunspent", 2, "persistent" },
@@ -252,6 +254,8 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "keypoolrefill", 0, "newsize" },
{ "getrawmempool", 0, "verbose" },
{ "getrawmempool", 1, "mempool_sequence" },
+ { "getorphantxs", 0, "verbosity" },
+ { "getorphantxs", 0, "verbose" },
{ "estimatesmartfee", 0, "conf_target" },
{ "estimaterawfee", 0, "conf_target" },
{ "estimaterawfee", 1, "threshold" },
diff --git a/src/rpc/external_signer.cpp b/src/rpc/external_signer.cpp
index 3ad7a940e0..44de5443fa 100644
--- a/src/rpc/external_signer.cpp
+++ b/src/rpc/external_signer.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <common/args.h>
#include <common/system.h>
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index d61898260b..27a00c5d91 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -8,8 +8,10 @@
#include <node/mempool_persist.h>
#include <chainparams.h>
+#include <consensus/validation.h>
#include <core_io.h>
#include <kernel/mempool_entry.h>
+#include <net_processing.h>
#include <node/mempool_persist_args.h>
#include <node/types.h>
#include <policy/rbf.h>
@@ -24,6 +26,7 @@
#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/time.h>
+#include <util/vector.h>
#include <utility>
@@ -812,6 +815,104 @@ static RPCHelpMan savemempool()
};
}
+static std::vector<RPCResult> OrphanDescription()
+{
+ return {
+ RPCResult{RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ RPCResult{RPCResult::Type::STR_HEX, "wtxid", "The transaction witness hash in hex"},
+ RPCResult{RPCResult::Type::NUM, "bytes", "The serialized transaction size in bytes"},
+ RPCResult{RPCResult::Type::NUM, "vsize", "The virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted."},
+ RPCResult{RPCResult::Type::NUM, "weight", "The transaction weight as defined in BIP 141."},
+ RPCResult{RPCResult::Type::NUM_TIME, "expiration", "The orphan expiration time expressed in " + UNIX_EPOCH_TIME},
+ RPCResult{RPCResult::Type::ARR, "from", "",
+ {
+ RPCResult{RPCResult::Type::NUM, "peer_id", "Peer ID"},
+ }},
+ };
+}
+
+static UniValue OrphanToJSON(const TxOrphanage::OrphanTxBase& orphan)
+{
+ UniValue o(UniValue::VOBJ);
+ o.pushKV("txid", orphan.tx->GetHash().ToString());
+ o.pushKV("wtxid", orphan.tx->GetWitnessHash().ToString());
+ o.pushKV("bytes", orphan.tx->GetTotalSize());
+ o.pushKV("vsize", GetVirtualTransactionSize(*orphan.tx));
+ o.pushKV("weight", GetTransactionWeight(*orphan.tx));
+ o.pushKV("expiration", int64_t{TicksSinceEpoch<std::chrono::seconds>(orphan.nTimeExpire)});
+ UniValue from(UniValue::VARR);
+ from.push_back(orphan.fromPeer); // only one fromPeer for now
+ o.pushKV("from", from);
+ return o;
+}
+
+static RPCHelpMan getorphantxs()
+{
+ return RPCHelpMan{"getorphantxs",
+ "\nShows transactions in the tx orphanage.\n"
+ "\nEXPERIMENTAL warning: this call may be changed in future releases.\n",
+ {
+ {"verbosity|verbose", RPCArg::Type::NUM, RPCArg::Default{0}, "0 for an array of txids (may contain duplicates), 1 for an array of objects with tx details, and 2 for details from (1) and tx hex",
+ RPCArgOptions{.skip_type_check = true}},
+ },
+ {
+ RPCResult{"for verbose = 0",
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ }},
+ RPCResult{"for verbose = 1",
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "", OrphanDescription()},
+ }},
+ RPCResult{"for verbose = 2",
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ Cat<std::vector<RPCResult>>(
+ OrphanDescription(),
+ {{RPCResult::Type::STR_HEX, "hex", "The serialized, hex-encoded transaction data"}}
+ )
+ },
+ }},
+ },
+ RPCExamples{
+ HelpExampleCli("getorphantxs", "2")
+ + HelpExampleRpc("getorphantxs", "2")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ const NodeContext& node = EnsureAnyNodeContext(request.context);
+ PeerManager& peerman = EnsurePeerman(node);
+ std::vector<TxOrphanage::OrphanTxBase> orphanage = peerman.GetOrphanTransactions();
+
+ int verbosity{ParseVerbosity(request.params[0], /*default_verbosity=*/0)};
+
+ UniValue ret(UniValue::VARR);
+
+ if (verbosity <= 0) {
+ for (auto const& orphan : orphanage) {
+ ret.push_back(orphan.tx->GetHash().ToString());
+ }
+ } else if (verbosity == 1) {
+ for (auto const& orphan : orphanage) {
+ ret.push_back(OrphanToJSON(orphan));
+ }
+ } else {
+ // >= 2
+ for (auto const& orphan : orphanage) {
+ UniValue o{OrphanToJSON(orphan)};
+ o.pushKV("hex", EncodeHexTx(*orphan.tx));
+ ret.push_back(o);
+ }
+ }
+
+ return ret;
+ },
+ };
+}
+
static RPCHelpMan submitpackage()
{
return RPCHelpMan{"submitpackage",
@@ -1027,6 +1128,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t)
{"blockchain", &getrawmempool},
{"blockchain", &importmempool},
{"blockchain", &savemempool},
+ {"hidden", &getorphantxs},
{"rawtransactions", &submitpackage},
};
for (const auto& c : commands) {
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index bfa7dad4a1..44605cbc89 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chain.h>
#include <chainparams.h>
@@ -45,9 +45,9 @@
#include <memory>
#include <stdint.h>
-using node::BlockAssembler;
-using node::CBlockTemplate;
+using interfaces::BlockTemplate;
using interfaces::Mining;
+using node::BlockAssembler;
using node::NodeContext;
using node::RegenerateCommitments;
using node::UpdateTime;
@@ -130,7 +130,7 @@ static RPCHelpMan getnetworkhashps()
};
}
-static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
+static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock&& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
{
block_out.reset();
block.hashMerkleRoot = BlockMerkleRoot(block);
@@ -146,7 +146,7 @@ static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock& bl
return true;
}
- block_out = std::make_shared<const CBlock>(block);
+ block_out = std::make_shared<const CBlock>(std::move(block));
if (!process_new_block) return true;
@@ -161,12 +161,11 @@ static UniValue generateBlocks(ChainstateManager& chainman, Mining& miner, const
{
UniValue blockHashes(UniValue::VARR);
while (nGenerate > 0 && !chainman.m_interrupt) {
- std::unique_ptr<CBlockTemplate> pblocktemplate(miner.createNewBlock(coinbase_script));
- if (!pblocktemplate.get())
- throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
+ std::unique_ptr<BlockTemplate> block_template(miner.createNewBlock(coinbase_script));
+ CHECK_NONFATAL(block_template);
std::shared_ptr<const CBlock> block_out;
- if (!GenerateBlock(chainman, miner, pblocktemplate->block, nMaxTries, block_out, /*process_new_block=*/true)) {
+ if (!GenerateBlock(chainman, miner, block_template->getBlock(), nMaxTries, block_out, /*process_new_block=*/true)) {
break;
}
@@ -371,11 +370,10 @@ static RPCHelpMan generateblock()
ChainstateManager& chainman = EnsureChainman(node);
{
- std::unique_ptr<CBlockTemplate> blocktemplate{miner.createNewBlock(coinbase_script, {.use_mempool = false})};
- if (!blocktemplate) {
- throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
- }
- block = blocktemplate->block;
+ std::unique_ptr<BlockTemplate> block_template{miner.createNewBlock(coinbase_script, {.use_mempool = false})};
+ CHECK_NONFATAL(block_template);
+
+ block = block_template->getBlock();
}
CHECK_NONFATAL(block.vtx.size() == 1);
@@ -394,7 +392,7 @@ static RPCHelpMan generateblock()
std::shared_ptr<const CBlock> block_out;
uint64_t max_tries{DEFAULT_MAX_TRIES};
- if (!GenerateBlock(chainman, miner, block, max_tries, block_out, process_new_block) || !block_out) {
+ if (!GenerateBlock(chainman, miner, std::move(block), max_tries, block_out, process_new_block) || !block_out) {
throw JSONRPCError(RPC_MISC_ERROR, "Failed to make block.");
}
@@ -663,7 +661,7 @@ static RPCHelpMan getblocktemplate()
ChainstateManager& chainman = EnsureChainman(node);
Mining& miner = EnsureMining(node);
LOCK(cs_main);
- uint256 tip{CHECK_NONFATAL(miner.getTipHash()).value()};
+ uint256 tip{CHECK_NONFATAL(miner.getTip()).value().hash};
std::string strMode = "template";
UniValue lpval = NullUniValue;
@@ -740,7 +738,6 @@ static RPCHelpMan getblocktemplate()
{
// Wait to respond until either the best block changes, OR a minute has passed and there are more transactions
uint256 hashWatchedChain;
- std::chrono::steady_clock::time_point checktxtime;
unsigned int nTransactionsUpdatedLastLP;
if (lpval.isStr())
@@ -761,24 +758,19 @@ static RPCHelpMan getblocktemplate()
// Release lock while waiting
LEAVE_CRITICAL_SECTION(cs_main);
{
- checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1);
-
- WAIT_LOCK(g_best_block_mutex, lock);
- while (g_best_block == hashWatchedChain && IsRPCRunning())
- {
- if (g_best_block_cv.wait_until(lock, checktxtime) == std::cv_status::timeout)
- {
- // Timeout: Check transactions for update
- // without holding the mempool lock to avoid deadlocks
- if (miner.getTransactionsUpdated() != nTransactionsUpdatedLastLP)
- break;
- checktxtime += std::chrono::seconds(10);
- }
+ MillisecondsDouble checktxtime{std::chrono::minutes(1)};
+ while (tip == hashWatchedChain && IsRPCRunning()) {
+ tip = miner.waitTipChanged(hashWatchedChain, checktxtime).hash;
+ // Timeout: Check transactions for update
+ // without holding the mempool lock to avoid deadlocks
+ if (miner.getTransactionsUpdated() != nTransactionsUpdatedLastLP)
+ break;
+ checktxtime = std::chrono::seconds(10);
}
}
ENTER_CRITICAL_SECTION(cs_main);
- tip = CHECK_NONFATAL(miner.getTipHash()).value();
+ tip = CHECK_NONFATAL(miner.getTip()).value().hash;
if (!IsRPCRunning())
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down");
@@ -800,7 +792,7 @@ static RPCHelpMan getblocktemplate()
// Update block
static CBlockIndex* pindexPrev;
static int64_t time_start;
- static std::unique_ptr<CBlockTemplate> pblocktemplate;
+ static std::unique_ptr<BlockTemplate> block_template;
if (!pindexPrev || pindexPrev->GetBlockHash() != tip ||
(miner.getTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
{
@@ -814,20 +806,19 @@ static RPCHelpMan getblocktemplate()
// Create new block
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = miner.createNewBlock(scriptDummy);
- if (!pblocktemplate) {
- throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
- }
+ block_template = miner.createNewBlock(scriptDummy);
+ CHECK_NONFATAL(block_template);
+
// Need to update only after we know createNewBlock succeeded
pindexPrev = pindexPrevNew;
}
CHECK_NONFATAL(pindexPrev);
- CBlock* pblock = &pblocktemplate->block; // pointer for convenience
+ CBlock block{block_template->getBlock()};
// Update nTime
- UpdateTime(pblock, consensusParams, pindexPrev);
- pblock->nNonce = 0;
+ UpdateTime(&block, consensusParams, pindexPrev);
+ block.nNonce = 0;
// NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration
const bool fPreSegWit = !DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_SEGWIT);
@@ -836,8 +827,11 @@ static RPCHelpMan getblocktemplate()
UniValue transactions(UniValue::VARR);
std::map<uint256, int64_t> setTxIndex;
+ std::vector<CAmount> tx_fees{block_template->getTxFees()};
+ std::vector<CAmount> tx_sigops{block_template->getTxSigops()};
+
int i = 0;
- for (const auto& it : pblock->vtx) {
+ for (const auto& it : block.vtx) {
const CTransaction& tx = *it;
uint256 txHash = tx.GetHash();
setTxIndex[txHash] = i++;
@@ -860,8 +854,8 @@ static RPCHelpMan getblocktemplate()
entry.pushKV("depends", std::move(deps));
int index_in_template = i - 1;
- entry.pushKV("fee", pblocktemplate->vTxFees[index_in_template]);
- int64_t nTxSigOps = pblocktemplate->vTxSigOpsCost[index_in_template];
+ entry.pushKV("fee", tx_fees.at(index_in_template));
+ int64_t nTxSigOps{tx_sigops.at(index_in_template)};
if (fPreSegWit) {
CHECK_NONFATAL(nTxSigOps % WITNESS_SCALE_FACTOR == 0);
nTxSigOps /= WITNESS_SCALE_FACTOR;
@@ -874,7 +868,7 @@ static RPCHelpMan getblocktemplate()
UniValue aux(UniValue::VOBJ);
- arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits);
+ arith_uint256 hashTarget = arith_uint256().SetCompact(block.nBits);
UniValue aMutable(UniValue::VARR);
aMutable.push_back("time");
@@ -904,7 +898,7 @@ static RPCHelpMan getblocktemplate()
break;
case ThresholdState::LOCKED_IN:
// Ensure bit is set in block version
- pblock->nVersion |= chainman.m_versionbitscache.Mask(consensusParams, pos);
+ block.nVersion |= chainman.m_versionbitscache.Mask(consensusParams, pos);
[[fallthrough]];
case ThresholdState::STARTED:
{
@@ -913,7 +907,7 @@ static RPCHelpMan getblocktemplate()
if (setClientRules.find(vbinfo.name) == setClientRules.end()) {
if (!vbinfo.gbt_force) {
// If the client doesn't support this, don't indicate it in the [default] version
- pblock->nVersion &= ~chainman.m_versionbitscache.Mask(consensusParams, pos);
+ block.nVersion &= ~chainman.m_versionbitscache.Mask(consensusParams, pos);
}
}
break;
@@ -933,15 +927,15 @@ static RPCHelpMan getblocktemplate()
}
}
}
- result.pushKV("version", pblock->nVersion);
+ result.pushKV("version", block.nVersion);
result.pushKV("rules", std::move(aRules));
result.pushKV("vbavailable", std::move(vbavailable));
result.pushKV("vbrequired", int(0));
- result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex());
+ result.pushKV("previousblockhash", block.hashPrevBlock.GetHex());
result.pushKV("transactions", std::move(transactions));
result.pushKV("coinbaseaux", std::move(aux));
- result.pushKV("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue);
+ result.pushKV("coinbasevalue", (int64_t)block.vtx[0]->vout[0].nValue);
result.pushKV("longpollid", tip.GetHex() + ToString(nTransactionsUpdatedLast));
result.pushKV("target", hashTarget.GetHex());
result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1);
@@ -960,16 +954,16 @@ static RPCHelpMan getblocktemplate()
if (!fPreSegWit) {
result.pushKV("weightlimit", (int64_t)MAX_BLOCK_WEIGHT);
}
- result.pushKV("curtime", pblock->GetBlockTime());
- result.pushKV("bits", strprintf("%08x", pblock->nBits));
+ result.pushKV("curtime", block.GetBlockTime());
+ result.pushKV("bits", strprintf("%08x", block.nBits));
result.pushKV("height", (int64_t)(pindexPrev->nHeight+1));
if (consensusParams.signet_blocks) {
result.pushKV("signet_challenge", HexStr(consensusParams.signet_challenge));
}
- if (!pblocktemplate->vchCoinbaseCommitment.empty()) {
- result.pushKV("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment));
+ if (!block_template->getCoinbaseCommitment().empty()) {
+ result.pushKV("default_witness_commitment", HexStr(block_template->getCoinbaseCommitment()));
}
return result;
@@ -1096,7 +1090,7 @@ static RPCHelpMan submitheader()
}
BlockValidationState state;
- chainman.ProcessNewBlockHeaders({h}, /*min_pow_checked=*/true, state);
+ chainman.ProcessNewBlockHeaders({{h}}, /*min_pow_checked=*/true, state);
if (state.IsValid()) return UniValue::VNULL;
if (state.IsError()) {
throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
diff --git a/src/rpc/node.cpp b/src/rpc/node.cpp
index 54e2c8e226..5e36273cf4 100644
--- a/src/rpc/node.cpp
+++ b/src/rpc/node.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chainparams.h>
#include <httpserver.h>
@@ -244,15 +244,15 @@ static RPCHelpMan logging()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- uint32_t original_log_categories = LogInstance().GetCategoryMask();
+ BCLog::CategoryMask original_log_categories = LogInstance().GetCategoryMask();
if (request.params[0].isArray()) {
EnableOrDisableLogCategories(request.params[0], true);
}
if (request.params[1].isArray()) {
EnableOrDisableLogCategories(request.params[1], false);
}
- uint32_t updated_log_categories = LogInstance().GetCategoryMask();
- uint32_t changed_log_categories = original_log_categories ^ updated_log_categories;
+ BCLog::CategoryMask updated_log_categories = LogInstance().GetCategoryMask();
+ BCLog::CategoryMask changed_log_categories = original_log_categories ^ updated_log_categories;
// Update libevent logging if BCLog::LIBEVENT has changed.
if (changed_log_categories & BCLog::LIBEVENT) {
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 21bc0e52f1..65e6e40b0d 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -338,15 +338,7 @@ static RPCHelpMan getrawtransaction()
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "The genesis block coinbase is not considered an ordinary transaction and cannot be retrieved");
}
- // Accept either a bool (true) or a num (>=0) to indicate verbosity.
- int verbosity{0};
- if (!request.params[1].isNull()) {
- if (request.params[1].isBool()) {
- verbosity = request.params[1].get_bool();
- } else {
- verbosity = request.params[1].getInt<int>();
- }
- }
+ int verbosity{ParseVerbosity(request.params[1], /*default_verbosity=*/0)};
if (!request.params[2].isNull()) {
LOCK(cs_main);
@@ -405,11 +397,16 @@ static RPCHelpMan getrawtransaction()
CBlockUndo blockUndo;
CBlock block;
- if (tx->IsCoinBase() || !blockindex || WITH_LOCK(::cs_main, return chainman.m_blockman.IsBlockPruned(*blockindex)) ||
- !(chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex) && chainman.m_blockman.ReadBlockFromDisk(block, *blockindex))) {
+ if (tx->IsCoinBase() || !blockindex || WITH_LOCK(::cs_main, return !(blockindex->nStatus & BLOCK_HAVE_MASK))) {
TxToJSON(*tx, hash_block, result, chainman.ActiveChainstate());
return result;
}
+ if (!chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
+ if (!chainman.m_blockman.ReadBlockFromDisk(block, *blockindex)) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Block data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.");
+ }
CTxUndo* undoTX {nullptr};
auto it = std::find_if(block.vtx.begin(), block.vtx.end(), [tx](CTransactionRef t){ return *t == *tx; });
diff --git a/src/rpc/register.h b/src/rpc/register.h
index 65fd29ff08..17ed6c142c 100644
--- a/src/rpc/register.h
+++ b/src/rpc/register.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_RPC_REGISTER_H
#define BITCOIN_RPC_REGISTER_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
/** These are in one header file to avoid creating tons of single-function
* headers for everything under src/rpc/ */
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 2c07a2ff08..01f2dc0c0e 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <rpc/server.h>
@@ -11,6 +11,7 @@
#include <common/system.h>
#include <logging.h>
#include <node/context.h>
+#include <node/kernel_notifications.h>
#include <rpc/server_util.h>
#include <rpc/util.h>
#include <sync.h>
@@ -18,8 +19,7 @@
#include <util/strencodings.h>
#include <util/string.h>
#include <util/time.h>
-
-#include <boost/signals2/signal.hpp>
+#include <validation.h>
#include <cassert>
#include <chrono>
@@ -69,22 +69,6 @@ struct RPCCommandExecution
}
};
-static struct CRPCSignals
-{
- boost::signals2::signal<void ()> Started;
- boost::signals2::signal<void ()> Stopped;
-} g_rpcSignals;
-
-void RPCServer::OnStarted(std::function<void ()> slot)
-{
- g_rpcSignals.Started.connect(slot);
-}
-
-void RPCServer::OnStopped(std::function<void ()> slot)
-{
- g_rpcSignals.Stopped.connect(slot);
-}
-
std::string CRPCTable::help(const std::string& strCommand, const JSONRPCRequest& helpreq) const
{
std::string strRet;
@@ -185,7 +169,7 @@ static RPCHelpMan stop()
{
// Event loop will exit after current HTTP requests have been handled, so
// this reply will get back to the client.
- CHECK_NONFATAL((*CHECK_NONFATAL(EnsureAnyNodeContext(jsonRequest.context).shutdown))());
+ CHECK_NONFATAL((CHECK_NONFATAL(EnsureAnyNodeContext(jsonRequest.context).shutdown_request))());
if (jsonRequest.params[0].isNum()) {
UninterruptibleSleep(std::chrono::milliseconds{jsonRequest.params[0].getInt<int>()});
}
@@ -297,7 +281,6 @@ void StartRPC()
{
LogDebug(BCLog::RPC, "Starting RPC\n");
g_rpc_running = true;
- g_rpcSignals.Started();
}
void InterruptRPC()
@@ -316,11 +299,11 @@ void StopRPC()
static std::once_flag g_rpc_stop_flag;
// This function could be called twice if the GUI has been started with -server=1.
assert(!g_rpc_running);
- std::call_once(g_rpc_stop_flag, []() {
+ std::call_once(g_rpc_stop_flag, [&]() {
LogDebug(BCLog::RPC, "Stopping RPC\n");
WITH_LOCK(g_deadline_timers_mutex, deadlineTimers.clear());
DeleteAuthCookie();
- g_rpcSignals.Stopped();
+ LogDebug(BCLog::RPC, "RPC stopped.\n");
});
}
diff --git a/src/rpc/server.h b/src/rpc/server.h
index 56e8a63088..5a22279a58 100644
--- a/src/rpc/server.h
+++ b/src/rpc/server.h
@@ -18,12 +18,6 @@
class CRPCCommand;
-namespace RPCServer
-{
- void OnStarted(std::function<void ()> slot);
- void OnStopped(std::function<void ()> slot);
-}
-
/** Query whether RPC is running */
bool IsRPCRunning();
diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp
index 7958deb677..40294fda06 100644
--- a/src/rpc/txoutproof.cpp
+++ b/src/rpc/txoutproof.cpp
@@ -10,6 +10,7 @@
#include <merkleblock.h>
#include <node/blockstorage.h>
#include <primitives/transaction.h>
+#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/server_util.h>
#include <rpc/util.h>
@@ -96,6 +97,10 @@ static RPCHelpMan gettxoutproof()
}
}
+ {
+ LOCK(cs_main);
+ CheckBlockDataAvailability(chainman.m_blockman, *pblockindex, /*check_for_undo=*/false);
+ }
CBlock block;
if (!chainman.m_blockman.ReadBlockFromDisk(block, *pblockindex)) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk");
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index dbbf1506d4..d71d7d737b 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <clientversion.h>
#include <common/args.h>
@@ -19,6 +19,7 @@
#include <script/signingprovider.h>
#include <script/solver.h>
#include <tinyformat.h>
+#include <uint256.h>
#include <univalue.h>
#include <util/check.h>
#include <util/result.h>
@@ -80,6 +81,18 @@ void RPCTypeCheckObj(const UniValue& o,
}
}
+int ParseVerbosity(const UniValue& arg, int default_verbosity)
+{
+ if (!arg.isNull()) {
+ if (arg.isBool()) {
+ return arg.get_bool(); // true = 1
+ } else {
+ return arg.getInt<int>();
+ }
+ }
+ return default_verbosity;
+}
+
CAmount AmountFromValue(const UniValue& value, int decimals)
{
if (!value.isNum() && !value.isStr())
@@ -102,11 +115,11 @@ CFeeRate ParseFeeRate(const UniValue& json)
uint256 ParseHashV(const UniValue& v, std::string_view name)
{
const std::string& strHex(v.get_str());
- if (64 != strHex.length())
- throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be of length %d (not %d, for '%s')", name, 64, strHex.length(), strHex));
- if (!IsHex(strHex)) // Note: IsHex("") is false
- throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be hexadecimal string (not '%s')", name, strHex));
- return uint256S(strHex);
+ if (auto rv{uint256::FromHex(strHex)}) return *rv;
+ if (auto expected_len{uint256::size() * 2}; strHex.length() != expected_len) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be of length %d (not %d, for '%s')", name, expected_len, strHex.length(), strHex));
+ }
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be hexadecimal string (not '%s')", name, strHex));
}
uint256 ParseHashO(const UniValue& o, std::string_view strKey)
{
diff --git a/src/rpc/util.h b/src/rpc/util.h
index 23024376e0..b8e6759768 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -101,6 +101,15 @@ std::vector<unsigned char> ParseHexV(const UniValue& v, std::string_view name);
std::vector<unsigned char> ParseHexO(const UniValue& o, std::string_view strKey);
/**
+ * Parses verbosity from provided UniValue.
+ *
+ * @param[in] arg The verbosity argument as a bool (true) or int (0, 1, 2,...)
+ * @param[in] default_verbosity The value to return if verbosity argument is null
+ * @returns An integer describing the verbosity level (e.g. 0, 1, 2, etc.)
+ */
+int ParseVerbosity(const UniValue& arg, int default_verbosity);
+
+/**
* Validate and return a CAmount from a UniValue number or string.
*
* @param[in] value UniValue number or string to parse.
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 9d0e9b5e3c..dcdddb88e9 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1303,7 +1303,7 @@ public:
// Serialize the nSequence
if (nInput != nIn && (fHashSingle || fHashNone))
// let the others update at will
- ::Serialize(s, int{0});
+ ::Serialize(s, int32_t{0});
else
::Serialize(s, txTo.vin[nInput].nSequence);
}
@@ -1565,7 +1565,7 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons
}
template <class T>
-uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache)
+uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache)
{
assert(nIn < txTo.vin.size());
diff --git a/src/script/script.h b/src/script/script.h
index e3119cbe05..f457984980 100644
--- a/src/script/script.h
+++ b/src/script/script.h
@@ -17,6 +17,7 @@
#include <cstdint>
#include <cstring>
#include <limits>
+#include <span>
#include <stdexcept>
#include <string>
#include <type_traits>
@@ -412,6 +413,32 @@ bool GetScriptOp(CScriptBase::const_iterator& pc, CScriptBase::const_iterator en
/** Serialized script, used inside transaction inputs and outputs */
class CScript : public CScriptBase
{
+private:
+ inline void AppendDataSize(const uint32_t size)
+ {
+ if (size < OP_PUSHDATA1) {
+ insert(end(), static_cast<value_type>(size));
+ } else if (size <= 0xff) {
+ insert(end(), OP_PUSHDATA1);
+ insert(end(), static_cast<value_type>(size));
+ } else if (size <= 0xffff) {
+ insert(end(), OP_PUSHDATA2);
+ value_type data[2];
+ WriteLE16(data, size);
+ insert(end(), std::cbegin(data), std::cend(data));
+ } else {
+ insert(end(), OP_PUSHDATA4);
+ value_type data[4];
+ WriteLE32(data, size);
+ insert(end(), std::cbegin(data), std::cend(data));
+ }
+ }
+
+ void AppendData(std::span<const value_type> data)
+ {
+ insert(end(), data.begin(), data.end());
+ }
+
protected:
CScript& push_int64(int64_t n)
{
@@ -463,35 +490,19 @@ public:
return *this;
}
- CScript& operator<<(const std::vector<unsigned char>& b) LIFETIMEBOUND
+ CScript& operator<<(std::span<const std::byte> b) LIFETIMEBOUND
{
- if (b.size() < OP_PUSHDATA1)
- {
- insert(end(), (unsigned char)b.size());
- }
- else if (b.size() <= 0xff)
- {
- insert(end(), OP_PUSHDATA1);
- insert(end(), (unsigned char)b.size());
- }
- else if (b.size() <= 0xffff)
- {
- insert(end(), OP_PUSHDATA2);
- uint8_t _data[2];
- WriteLE16(_data, b.size());
- insert(end(), _data, _data + sizeof(_data));
- }
- else
- {
- insert(end(), OP_PUSHDATA4);
- uint8_t _data[4];
- WriteLE32(_data, b.size());
- insert(end(), _data, _data + sizeof(_data));
- }
- insert(end(), b.begin(), b.end());
+ AppendDataSize(b.size());
+ AppendData({reinterpret_cast<const value_type*>(b.data()), b.size()});
return *this;
}
+ // For compatibility reasons. In new code, prefer using std::byte instead of uint8_t.
+ CScript& operator<<(std::span<const value_type> b) LIFETIMEBOUND
+ {
+ return *this << std::as_bytes(b);
+ }
+
bool GetOp(const_iterator& pc, opcodetype& opcodeRet, std::vector<unsigned char>& vchRet) const
{
return GetScriptOp(pc, end(), opcodeRet, &vchRet);
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 9568348bf6..42db251359 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -694,27 +694,6 @@ void SignatureData::MergeSignatureData(SignatureData sigdata)
signatures.insert(std::make_move_iterator(sigdata.signatures.begin()), std::make_move_iterator(sigdata.signatures.end()));
}
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data)
-{
- assert(nIn < txTo.vin.size());
-
- MutableTransactionSignatureCreator creator(txTo, nIn, amount, nHashType);
-
- bool ret = ProduceSignature(provider, creator, fromPubKey, sig_data);
- UpdateInput(txTo.vin.at(nIn), sig_data);
- return ret;
-}
-
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType, SignatureData& sig_data)
-{
- assert(nIn < txTo.vin.size());
- const CTxIn& txin = txTo.vin[nIn];
- assert(txin.prevout.n < txFrom.vout.size());
- const CTxOut& txout = txFrom.vout[txin.prevout.n];
-
- return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType, sig_data);
-}
-
namespace {
/** Dummy signature checker which accepts all signatures. */
class DummySignatureChecker final : public BaseSignatureChecker
diff --git a/src/script/sign.h b/src/script/sign.h
index 4edd5bf326..fe2c470bc6 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -97,25 +97,6 @@ struct SignatureData {
/** Produce a script signature using a generic signature creator. */
bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata);
-/**
- * Produce a satisfying script (scriptSig or witness).
- *
- * @param provider Utility containing the information necessary to solve a script.
- * @param fromPubKey The script to produce a satisfaction for.
- * @param txTo The spending transaction.
- * @param nIn The index of the input in `txTo` referring the output being spent.
- * @param amount The value of the output being spent.
- * @param nHashType Signature hash type.
- * @param sig_data Additional data provided to solve a script. Filled with the resulting satisfying
- * script and whether the satisfaction is complete.
- *
- * @return True if the produced script is entirely satisfying `fromPubKey`.
- **/
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo,
- unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data);
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo,
- unsigned int nIn, int nHashType, SignatureData& sig_data);
-
/** Extract signature data from a transaction input, and insert it. */
SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nIn, const CTxOut& txout);
void UpdateInput(CTxIn& input, const SignatureData& data);
diff --git a/src/secp256k1/.github/workflows/ci.yml b/src/secp256k1/.github/workflows/ci.yml
index e238f3b7a1..0fc104d29b 100644
--- a/src/secp256k1/.github/workflows/ci.yml
+++ b/src/secp256k1/.github/workflows/ci.yml
@@ -632,7 +632,7 @@ jobs:
- name: Install Homebrew packages
run: |
- brew install automake libtool gcc
+ brew install --quiet automake libtool gcc
ln -s $(brew --prefix gcc)/bin/gcc-?? /usr/local/bin/gcc
- name: Install and cache Valgrind
@@ -691,7 +691,7 @@ jobs:
- name: Install Homebrew packages
run: |
- brew install automake libtool gcc
+ brew install --quiet automake libtool gcc
ln -s $(brew --prefix gcc)/bin/gcc-?? /usr/local/bin/gcc
- name: CI script
diff --git a/src/secp256k1/CHANGELOG.md b/src/secp256k1/CHANGELOG.md
index 0868e75480..fb82940627 100644
--- a/src/secp256k1/CHANGELOG.md
+++ b/src/secp256k1/CHANGELOG.md
@@ -5,6 +5,8 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [Unreleased]
+
## [0.5.1] - 2024-08-01
#### Added
@@ -141,6 +143,7 @@ This version was in fact never released.
The number was given by the build system since the introduction of autotools in Jan 2014 (ea0fe5a5bf0c04f9cc955b2966b614f5f378c6f6).
Therefore, this version number does not uniquely identify a set of source files.
+[unreleased]: https://github.com/bitcoin-core/secp256k1/compare/v0.5.1...HEAD
[0.5.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.1...v0.5.0
[0.4.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...v0.4.1
diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt
index 7a87686056..d5a7a2de39 100644
--- a/src/secp256k1/CMakeLists.txt
+++ b/src/secp256k1/CMakeLists.txt
@@ -4,7 +4,7 @@ project(libsecp256k1
# The package (a.k.a. release) version is based on semantic versioning 2.0.0 of
# the API. All changes in experimental modules are treated as
# backwards-compatible and therefore at most increase the minor version.
- VERSION 0.5.1
+ VERSION 0.5.2
DESCRIPTION "Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1."
HOMEPAGE_URL "https://github.com/bitcoin-core/secp256k1"
LANGUAGES C
@@ -27,7 +27,7 @@ endif()
# All changes in experimental modules are treated as if they don't affect the
# interface and therefore only increase the revision.
set(${PROJECT_NAME}_LIB_VERSION_CURRENT 4)
-set(${PROJECT_NAME}_LIB_VERSION_REVISION 1)
+set(${PROJECT_NAME}_LIB_VERSION_REVISION 2)
set(${PROJECT_NAME}_LIB_VERSION_AGE 2)
set(CMAKE_C_STANDARD 90)
@@ -276,6 +276,14 @@ if(SECP256K1_APPEND_CFLAGS)
string(APPEND CMAKE_C_COMPILE_OBJECT " ${SECP256K1_APPEND_CFLAGS}")
endif()
+set(SECP256K1_APPEND_LDFLAGS "" CACHE STRING "Linker flags that are appended to the command line after all other flags added by the build system. This variable is intended for debugging and special builds.")
+if(SECP256K1_APPEND_LDFLAGS)
+ # Appending to this low-level rule variable is the only way to
+ # guarantee that the flags appear at the end of the command line.
+ string(APPEND CMAKE_C_CREATE_SHARED_LIBRARY " ${SECP256K1_APPEND_LDFLAGS}")
+ string(APPEND CMAKE_C_LINK_EXECUTABLE " ${SECP256K1_APPEND_LDFLAGS}")
+endif()
+
add_subdirectory(src)
if(SECP256K1_BUILD_EXAMPLES)
add_subdirectory(examples)
@@ -355,6 +363,9 @@ endif()
if(SECP256K1_APPEND_CFLAGS)
message("SECP256K1_APPEND_CFLAGS ............... ${SECP256K1_APPEND_CFLAGS}")
endif()
+if(SECP256K1_APPEND_LDFLAGS)
+ message("SECP256K1_APPEND_LDFLAGS .............. ${SECP256K1_APPEND_LDFLAGS}")
+endif()
message("")
if(print_msan_notice)
message(
diff --git a/src/secp256k1/ci/linux-debian.Dockerfile b/src/secp256k1/ci/linux-debian.Dockerfile
index 5ce715b41b..241bfa9719 100644
--- a/src/secp256k1/ci/linux-debian.Dockerfile
+++ b/src/secp256k1/ci/linux-debian.Dockerfile
@@ -40,7 +40,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
apt-get clean && rm -rf /var/lib/apt/lists/*
# Build and install gcc snapshot
-ARG GCC_SNAPSHOT_MAJOR=14
+ARG GCC_SNAPSHOT_MAJOR=15
RUN apt-get update && apt-get install --no-install-recommends -y wget libgmp-dev libmpfr-dev libmpc-dev flex && \
mkdir gcc && cd gcc && \
wget --progress=dot:giga --https-only --recursive --accept '*.tar.xz' --level 1 --no-directories "https://gcc.gnu.org/pub/gcc/snapshots/LATEST-${GCC_SNAPSHOT_MAJOR}" && \
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 6c4c11ddcd..6841543f59 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -5,8 +5,8 @@ AC_PREREQ([2.60])
# backwards-compatible and therefore at most increase the minor version.
define(_PKG_VERSION_MAJOR, 0)
define(_PKG_VERSION_MINOR, 5)
-define(_PKG_VERSION_PATCH, 1)
-define(_PKG_VERSION_IS_RELEASE, true)
+define(_PKG_VERSION_PATCH, 2)
+define(_PKG_VERSION_IS_RELEASE, false)
# The library version is based on libtool versioning of the ABI. The set of
# rules for updating the version can be found here:
@@ -14,7 +14,7 @@ define(_PKG_VERSION_IS_RELEASE, true)
# All changes in experimental modules are treated as if they don't affect the
# interface and therefore only increase the revision.
define(_LIB_VERSION_CURRENT, 4)
-define(_LIB_VERSION_REVISION, 1)
+define(_LIB_VERSION_REVISION, 2)
define(_LIB_VERSION_AGE, 2)
AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_PATCH)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-dev]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1])
diff --git a/src/secp256k1/examples/schnorr.c b/src/secp256k1/examples/schnorr.c
index b0409b986b..8d5d14bdaf 100644
--- a/src/secp256k1/examples/schnorr.c
+++ b/src/secp256k1/examples/schnorr.c
@@ -18,9 +18,9 @@
#include "examples_util.h"
int main(void) {
- unsigned char msg[12] = "Hello World!";
+ unsigned char msg[] = {'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', '!'};
unsigned char msg_hash[32];
- unsigned char tag[17] = "my_fancy_protocol";
+ unsigned char tag[] = {'m', 'y', '_', 'f', 'a', 'n', 'c', 'y', '_', 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l'};
unsigned char seckey[32];
unsigned char randomize[32];
unsigned char auxiliary_rand[32];
diff --git a/src/secp256k1/include/secp256k1_ellswift.h b/src/secp256k1/include/secp256k1_ellswift.h
index ae37287f82..0d1293e94f 100644
--- a/src/secp256k1/include/secp256k1_ellswift.h
+++ b/src/secp256k1/include/secp256k1_ellswift.h
@@ -35,7 +35,7 @@ extern "C" {
*
* If the Y coordinate is relevant, it is given the same parity as t.
*
- * Changes w.r.t. the the paper:
+ * Changes w.r.t. the paper:
* - The u=0, t=0, and u^3+t^2+7=0 conditions result in decoding to the point
* at infinity in the paper. Here they are remapped to finite points.
* - The paper uses an additional encoding bit for the parity of y. Here the
diff --git a/src/secp256k1/src/modules/ellswift/tests_impl.h b/src/secp256k1/src/modules/ellswift/tests_impl.h
index ed5658f34c..3c314c9b50 100644
--- a/src/secp256k1/src/modules/ellswift/tests_impl.h
+++ b/src/secp256k1/src/modules/ellswift/tests_impl.h
@@ -406,9 +406,9 @@ void run_ellswift_tests(void) {
/* Test hash initializers. */
{
secp256k1_sha256 sha, sha_optimized;
- static const unsigned char encode_tag[25] = "secp256k1_ellswift_encode";
- static const unsigned char create_tag[25] = "secp256k1_ellswift_create";
- static const unsigned char bip324_tag[26] = "bip324_ellswift_xonly_ecdh";
+ static const unsigned char encode_tag[] = {'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', '_', 'e', 'l', 'l', 's', 'w', 'i', 'f', 't', '_', 'e', 'n', 'c', 'o', 'd', 'e'};
+ static const unsigned char create_tag[] = {'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', '_', 'e', 'l', 'l', 's', 'w', 'i', 'f', 't', '_', 'c', 'r', 'e', 'a', 't', 'e'};
+ static const unsigned char bip324_tag[] = {'b', 'i', 'p', '3', '2', '4', '_', 'e', 'l', 'l', 's', 'w', 'i', 'f', 't', '_', 'x', 'o', 'n', 'l', 'y', '_', 'e', 'c', 'd', 'h'};
/* Check that hash initialized by
* secp256k1_ellswift_sha256_init_encode has the expected
diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h
index 26727e4651..57f7eadd3c 100644
--- a/src/secp256k1/src/modules/schnorrsig/main_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h
@@ -45,7 +45,7 @@ static void secp256k1_nonce_function_bip340_sha256_tagged_aux(secp256k1_sha256 *
/* algo argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340
* by using the correct tagged hash function. */
-static const unsigned char bip340_algo[13] = "BIP0340/nonce";
+static const unsigned char bip340_algo[] = {'B', 'I', 'P', '0', '3', '4', '0', '/', 'n', 'o', 'n', 'c', 'e'};
static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC;
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
index aa4fc38270..2d716a01f8 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
@@ -21,9 +21,9 @@ static void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, s
}
static void run_nonce_function_bip340_tests(void) {
- unsigned char tag[13] = "BIP0340/nonce";
- unsigned char aux_tag[11] = "BIP0340/aux";
- unsigned char algo[13] = "BIP0340/nonce";
+ unsigned char tag[] = {'B', 'I', 'P', '0', '3', '4', '0', '/', 'n', 'o', 'n', 'c', 'e'};
+ unsigned char aux_tag[] = {'B', 'I', 'P', '0', '3', '4', '0', '/', 'a', 'u', 'x'};
+ unsigned char algo[] = {'B', 'I', 'P', '0', '3', '4', '0', '/', 'n', 'o', 'n', 'c', 'e'};
size_t algolen = sizeof(algo);
secp256k1_sha256 sha;
secp256k1_sha256 sha_optimized;
@@ -158,7 +158,7 @@ static void test_schnorrsig_api(void) {
/* Checks that hash initialized by secp256k1_schnorrsig_sha256_tagged has the
* expected state. */
static void test_schnorrsig_sha256_tagged(void) {
- unsigned char tag[17] = "BIP0340/challenge";
+ unsigned char tag[] = {'B', 'I', 'P', '0', '3', '4', '0', '/', 'c', 'h', 'a', 'l', 'l', 'e', 'n', 'g', 'e'};
secp256k1_sha256 sha;
secp256k1_sha256 sha_optimized;
@@ -806,7 +806,7 @@ static void test_schnorrsig_sign(void) {
unsigned char sk[32];
secp256k1_xonly_pubkey pk;
secp256k1_keypair keypair;
- const unsigned char msg[32] = "this is a msg for a schnorrsig..";
+ const unsigned char msg[] = {'t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 'm', 's', 'g', ' ', 'f', 'o', 'r', ' ', 'a', ' ', 's', 'c', 'h', 'n', 'o', 'r', 'r', 's', 'i', 'g', '.', '.'};
unsigned char sig[64];
unsigned char sig2[64];
unsigned char zeros64[64] = { 0 };
diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h
index 07564f7f3f..b84f5730a9 100644
--- a/src/secp256k1/src/testrand_impl.h
+++ b/src/secp256k1/src/testrand_impl.h
@@ -18,7 +18,7 @@
static uint64_t secp256k1_test_state[4];
SECP256K1_INLINE static void testrand_seed(const unsigned char *seed16) {
- static const unsigned char PREFIX[19] = "secp256k1 test init";
+ static const unsigned char PREFIX[] = {'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', ' ', 't', 'e', 's', 't', ' ', 'i', 'n', 'i', 't'};
unsigned char out32[32];
secp256k1_sha256 hash;
int i;
diff --git a/src/streams.cpp b/src/streams.cpp
index cdd36a86fe..baa5ad7abe 100644
--- a/src/streams.cpp
+++ b/src/streams.cpp
@@ -4,21 +4,29 @@
#include <span.h>
#include <streams.h>
+#include <util/fs_helpers.h>
#include <array>
+AutoFile::AutoFile(std::FILE* file, std::vector<std::byte> data_xor)
+ : m_file{file}, m_xor{std::move(data_xor)}
+{
+ if (!IsNull()) {
+ auto pos{std::ftell(m_file)};
+ if (pos >= 0) m_position = pos;
+ }
+}
+
std::size_t AutoFile::detail_fread(Span<std::byte> dst)
{
if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr");
- if (m_xor.empty()) {
- return std::fread(dst.data(), 1, dst.size(), m_file);
- } else {
- const auto init_pos{std::ftell(m_file)};
- if (init_pos < 0) throw std::ios_base::failure("AutoFile::read: ftell failed");
- std::size_t ret{std::fread(dst.data(), 1, dst.size(), m_file)};
- util::Xor(dst.subspan(0, ret), m_xor, init_pos);
- return ret;
+ size_t ret = std::fread(dst.data(), 1, dst.size(), m_file);
+ if (!m_xor.empty()) {
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown");
+ util::Xor(dst.subspan(0, ret), m_xor, *m_position);
}
+ if (m_position.has_value()) *m_position += ret;
+ return ret;
}
void AutoFile::seek(int64_t offset, int origin)
@@ -29,18 +37,23 @@ void AutoFile::seek(int64_t offset, int origin)
if (std::fseek(m_file, offset, origin) != 0) {
throw std::ios_base::failure(feof() ? "AutoFile::seek: end of file" : "AutoFile::seek: fseek failed");
}
+ if (origin == SEEK_SET) {
+ m_position = offset;
+ } else if (origin == SEEK_CUR && m_position.has_value()) {
+ *m_position += offset;
+ } else {
+ int64_t r{std::ftell(m_file)};
+ if (r < 0) {
+ throw std::ios_base::failure("AutoFile::seek: ftell failed");
+ }
+ m_position = r;
+ }
}
int64_t AutoFile::tell()
{
- if (IsNull()) {
- throw std::ios_base::failure("AutoFile::tell: file handle is nullptr");
- }
- int64_t r{std::ftell(m_file)};
- if (r < 0) {
- throw std::ios_base::failure("AutoFile::tell: ftell failed");
- }
- return r;
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::tell: position unknown");
+ return *m_position;
}
void AutoFile::read(Span<std::byte> dst)
@@ -60,6 +73,7 @@ void AutoFile::ignore(size_t nSize)
throw std::ios_base::failure(feof() ? "AutoFile::ignore: end of file" : "AutoFile::ignore: fread failed");
}
nSize -= nNow;
+ if (m_position.has_value()) *m_position += nNow;
}
}
@@ -70,19 +84,29 @@ void AutoFile::write(Span<const std::byte> src)
if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) {
throw std::ios_base::failure("AutoFile::write: write failed");
}
+ if (m_position.has_value()) *m_position += src.size();
} else {
- auto current_pos{std::ftell(m_file)};
- if (current_pos < 0) throw std::ios_base::failure("AutoFile::write: ftell failed");
+ if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::write: position unknown");
std::array<std::byte, 4096> buf;
while (src.size() > 0) {
auto buf_now{Span{buf}.first(std::min<size_t>(src.size(), buf.size()))};
std::copy(src.begin(), src.begin() + buf_now.size(), buf_now.begin());
- util::Xor(buf_now, m_xor, current_pos);
+ util::Xor(buf_now, m_xor, *m_position);
if (std::fwrite(buf_now.data(), 1, buf_now.size(), m_file) != buf_now.size()) {
throw std::ios_base::failure{"XorFile::write: failed"};
}
src = src.subspan(buf_now.size());
- current_pos += buf_now.size();
+ *m_position += buf_now.size();
}
}
}
+
+bool AutoFile::Commit()
+{
+ return ::FileCommit(m_file);
+}
+
+bool AutoFile::Truncate(unsigned size)
+{
+ return ::TruncateFile(m_file, size);
+}
diff --git a/src/streams.h b/src/streams.h
index c2a9dea287..e9f3562c6c 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -390,9 +390,10 @@ class AutoFile
protected:
std::FILE* m_file;
std::vector<std::byte> m_xor;
+ std::optional<int64_t> m_position;
public:
- explicit AutoFile(std::FILE* file, std::vector<std::byte> data_xor={}) : m_file{file}, m_xor{std::move(data_xor)} {}
+ explicit AutoFile(std::FILE* file, std::vector<std::byte> data_xor={});
~AutoFile() { fclose(); }
@@ -419,12 +420,6 @@ public:
return ret;
}
- /** Get wrapped FILE* without transfer of ownership.
- * @note Ownership of the FILE* will remain with this class. Use this only if the scope of the
- * AutoFile outlives use of the passed pointer.
- */
- std::FILE* Get() const { return m_file; }
-
/** Return true if the wrapped FILE* is nullptr, false otherwise.
*/
bool IsNull() const { return m_file == nullptr; }
@@ -435,9 +430,18 @@ public:
/** Implementation detail, only used internally. */
std::size_t detail_fread(Span<std::byte> dst);
+ /** Wrapper around fseek(). Will throw if seeking is not possible. */
void seek(int64_t offset, int origin);
+
+ /** Find position within the file. Will throw if unknown. */
int64_t tell();
+ /** Wrapper around FileCommit(). */
+ bool Commit();
+
+ /** Wrapper around TruncateFile(). */
+ bool Truncate(unsigned size);
+
//
// Stream subset
//
diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt
index a666a76f8f..c23fbae92f 100644
--- a/src/test/CMakeLists.txt
+++ b/src/test/CMakeLists.txt
@@ -12,7 +12,7 @@ generate_header_from_json(data/script_tests.json)
generate_header_from_json(data/sighash.json)
generate_header_from_json(data/tx_invalid.json)
generate_header_from_json(data/tx_valid.json)
-generate_header_from_raw(data/asmap.raw)
+generate_header_from_raw(data/asmap.raw test::data)
# Do not use generator expressions in test sources because the
# SOURCES property is processed to gather test suite macros.
@@ -132,6 +132,7 @@ add_executable(test_bitcoin
txvalidation_tests.cpp
txvalidationcache_tests.cpp
uint256_tests.cpp
+ util_string_tests.cpp
util_tests.cpp
util_threadnames_tests.cpp
validation_block_tests.cpp
@@ -159,14 +160,6 @@ if(ENABLE_WALLET)
endif()
if(WITH_MULTIPROCESS)
- add_library(bitcoin_ipc_test STATIC EXCLUDE_FROM_ALL
- ipc_test.cpp
- )
-
- target_capnp_sources(bitcoin_ipc_test ${PROJECT_SOURCE_DIR}
- ipc_test.capnp
- )
-
target_link_libraries(bitcoin_ipc_test
PRIVATE
core_interface
@@ -177,7 +170,7 @@ if(WITH_MULTIPROCESS)
PRIVATE
ipc_tests.cpp
)
- target_link_libraries(test_bitcoin bitcoin_ipc_test)
+ target_link_libraries(test_bitcoin bitcoin_ipc_test bitcoin_ipc)
endif()
function(add_boost_test source_file)
@@ -196,10 +189,10 @@ function(add_boost_test source_file)
)
if(test_suite_name)
add_test(NAME ${test_suite_name}
- COMMAND test_bitcoin --run_test=${test_suite_name} --catch_system_error=no
+ COMMAND test_bitcoin --run_test=${test_suite_name} --catch_system_error=no --log_level=test_suite -- DEBUG_LOG_OUT
)
set_property(TEST ${test_suite_name} PROPERTY
- SKIP_REGULAR_EXPRESSION "no test cases matching filter" "Skipping"
+ SKIP_REGULAR_EXPRESSION "no test cases matching filter"
)
endif()
endfunction()
diff --git a/src/test/README.md b/src/test/README.md
index a6e89aac80..7e0f245ee8 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -10,14 +10,19 @@ The build system is set up to compile an executable called `test_bitcoin`
that runs all of the unit tests. The main source file for the test library is found in
`util/setup_common.cpp`.
+The examples in this document assume the build directory is named
+`build`. You'll need to adapt them if you named it differently.
+
### Compiling/running unit tests
Unit tests will be automatically compiled if dependencies were met
during the generation of the Bitcoin Core build system
and tests weren't explicitly disabled.
-Assuming the build directory is named `build`, the unit tests can be run
-with `ctest --test-dir build`, which includes unit tests from subtrees.
+The unit tests can be run with `ctest --test-dir build`, which includes unit
+tests from subtrees.
+
+Run `test_bitcoin --list_content` for the full list of tests.
To run the unit tests manually, launch `build/src/test/test_bitcoin`. To recompile
after a test file was modified, run `cmake --build build` and then run the test again. If you
@@ -35,35 +40,46 @@ the `src/qt/test/test_main.cpp` file.
### Running individual tests
-`test_bitcoin` accepts the command line arguments from the boost framework.
-For example, to run just the `getarg_tests` suite of tests:
+The `test_bitcoin` runner accepts command line arguments from the Boost
+framework. To see the list of arguments that may be passed, run:
+
+```
+test_bitcoin --help
+```
+
+For example, to run only the tests in the `getarg_tests` file, with full logging:
```bash
build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests
```
-`log_level` controls the verbosity of the test framework, which logs when a
-test case is entered, for example.
+or
-`test_bitcoin` also accepts some of the command line arguments accepted by
-`bitcoind`. Use `--` to separate these sets of arguments:
+```bash
+build/src/test/test_bitcoin -l all -t getarg_tests
+```
+
+or to run only the doubledash test in `getarg_tests`
```bash
-build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1
+build/src/test/test_bitcoin --run_test=getarg_tests/doubledash
```
-The `-printtoconsole=1` after the two dashes sends debug logging, which
-normally goes only to `debug.log` within the data directory, also to the
-standard terminal output.
+The `--log_level=` (or `-l`) argument controls the verbosity of the test output.
-... or to run just the doubledash test:
+The `test_bitcoin` runner also accepts some of the command line arguments accepted by
+`bitcoind`. Use `--` to separate these sets of arguments:
```bash
-build/src/test/test_bitcoin --run_test=getarg_tests/doubledash
+build/src/test/test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1
```
-`test_bitcoin` creates a temporary working (data) directory with a randomly
-generated pathname within `test_common_Bitcoin Core/`, which in turn is within
+The `-printtoconsole=1` after the two dashes sends debug logging, which
+normally goes only to `debug.log` within the data directory, to the
+standard terminal output as well.
+
+Running `test_bitcoin` creates a temporary working (data) directory with a randomly
+generated pathname within `test_common bitcoin/`, which in turn is within
the system's temporary directory (see
[`temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path)).
This data directory looks like a simplified form of the standard `bitcoind` data
@@ -73,7 +89,7 @@ have a `debug.log` file, for example.
The location of the temporary data directory can be specified with the
`-testdatadir` option. This can make debugging easier. The directory
path used is the argument path appended with
-`/test_common_Bitcoin Core/<test-name>/datadir`.
+`/test_common bitcoin/<test-name>/datadir`.
The directory path is created if necessary.
Specifying this argument also causes the data directory
not to be removed after the last test. This is useful for looking at
@@ -83,11 +99,11 @@ so no leftover state is used.)
```bash
$ build/src/test/test_bitcoin --run_test=getarg_tests/doubledash -- -testdatadir=/somewhere/mydatadir
-Test directory (will not be deleted): "/somewhere/mydatadir/test_common_Bitcoin Core/getarg_tests/doubledash/datadir"
+Test directory (will not be deleted): "/somewhere/mydatadir/test_common bitcoin/getarg_tests/doubledash/datadir"
Running 1 test case...
*** No errors detected
-$ ls -l '/somewhere/mydatadir/test_common_Bitcoin Core/getarg_tests/doubledash/datadir'
+$ ls -l '/somewhere/mydatadir/test_common bitcoin/getarg_tests/doubledash/datadir'
total 8
drwxrwxr-x 2 admin admin 4096 Nov 27 22:45 blocks
-rw-rw-r-- 1 admin admin 1003 Nov 27 22:45 debug.log
@@ -97,8 +113,6 @@ If you run an entire test suite, such as `--run_test=getarg_tests`, or all the t
(by not specifying `--run_test`), a separate directory
will be created for each individual test.
-Run `test_bitcoin --help` for the full list of tests.
-
### Adding test cases
To add a new unit test file to our test suite, you need
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index e5d25637bd..c4f58ebecf 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -47,11 +47,12 @@ static CService ResolveService(const std::string& ip, uint16_t port = 0)
}
-static std::vector<bool> FromBytes(const unsigned char* source, int vector_size)
+static std::vector<bool> FromBytes(std::span<const std::byte> source)
{
+ int vector_size(source.size() * 8);
std::vector<bool> result(vector_size);
for (int byte_i = 0; byte_i < vector_size / 8; ++byte_i) {
- unsigned char cur_byte = source[byte_i];
+ uint8_t cur_byte{std::to_integer<uint8_t>(source[byte_i])};
for (int bit_i = 0; bit_i < 8; ++bit_i) {
result[byte_i * 8 + bit_i] = (cur_byte >> bit_i) & 1;
}
@@ -195,21 +196,21 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_AUTO_TEST_CASE(addrman_select_by_network)
{
auto addrman = std::make_unique<AddrMan>(EMPTY_NETGROUPMAN, DETERMINISTIC, GetCheckRatio(m_node));
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_IPV4).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV4).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_IPV4}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV4}).first.IsValid());
// add ipv4 address to the new table
CNetAddr source = ResolveIP("252.2.2.2");
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_IPV4).first == addr1);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV6).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_ONION).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_I2P).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_CJDNS).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_CJDNS).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_IPV4}).first == addr1);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV6}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_ONION}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_I2P}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_CJDNS}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_CJDNS}).first.IsValid());
BOOST_CHECK(addrman->Select(/*new_only=*/false).first == addr1);
// add I2P address to the new table
@@ -217,25 +218,29 @@ BOOST_AUTO_TEST_CASE(addrman_select_by_network)
i2p_addr.SetSpecial("udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.i2p");
BOOST_CHECK(addrman->Add({i2p_addr}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_I2P).first == i2p_addr);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_I2P).first == i2p_addr);
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_IPV6).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_ONION).first.IsValid());
- BOOST_CHECK(!addrman->Select(/*new_only=*/false, NET_CJDNS).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_I2P}).first == i2p_addr);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_I2P}).first == i2p_addr);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
+ std::unordered_set<Network> nets_with_entries = {NET_IPV4, NET_I2P};
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, nets_with_entries).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_IPV6}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_ONION}).first.IsValid());
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, {NET_CJDNS}).first.IsValid());
+ std::unordered_set<Network> nets_without_entries = {NET_IPV6, NET_ONION, NET_CJDNS};
+ BOOST_CHECK(!addrman->Select(/*new_only=*/false, nets_without_entries).first.IsValid());
// bump I2P address to tried table
BOOST_CHECK(addrman->Good(i2p_addr));
- BOOST_CHECK(!addrman->Select(/*new_only=*/true, NET_I2P).first.IsValid());
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_I2P).first == i2p_addr);
+ BOOST_CHECK(!addrman->Select(/*new_only=*/true, {NET_I2P}).first.IsValid());
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_I2P}).first == i2p_addr);
// add another I2P address to the new table
CAddress i2p_addr2;
i2p_addr2.SetSpecial("c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p");
BOOST_CHECK(addrman->Add({i2p_addr2}, source));
- BOOST_CHECK(addrman->Select(/*new_only=*/true, NET_I2P).first == i2p_addr2);
+ BOOST_CHECK(addrman->Select(/*new_only=*/true, {NET_I2P}).first == i2p_addr2);
// ensure that both new and tried table are selected from
bool new_selected{false};
@@ -243,7 +248,7 @@ BOOST_AUTO_TEST_CASE(addrman_select_by_network)
int counter = 256;
while (--counter > 0 && (!new_selected || !tried_selected)) {
- const CAddress selected{addrman->Select(/*new_only=*/false, NET_I2P).first};
+ const CAddress selected{addrman->Select(/*new_only=*/false, {NET_I2P}).first};
BOOST_REQUIRE(selected == i2p_addr || selected == i2p_addr2);
if (selected == i2p_addr) {
tried_selected = true;
@@ -276,7 +281,7 @@ BOOST_AUTO_TEST_CASE(addrman_select_special)
// since the only ipv4 address is on the new table, ensure that the new
// table gets selected even if new_only is false. if the table was being
// selected at random, this test will sporadically fail
- BOOST_CHECK(addrman->Select(/*new_only=*/false, NET_IPV4).first == addr1);
+ BOOST_CHECK(addrman->Select(/*new_only=*/false, {NET_IPV4}).first == addr1);
}
BOOST_AUTO_TEST_CASE(addrman_new_collisions)
@@ -576,7 +581,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
// 101.8.0.0/16 AS8
BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
{
- std::vector<bool> asmap = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+ std::vector<bool> asmap = FromBytes(test::data::asmap);
NetGroupManager ngm_asmap{asmap};
CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
@@ -630,7 +635,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
{
- std::vector<bool> asmap = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+ std::vector<bool> asmap = FromBytes(test::data::asmap);
NetGroupManager ngm_asmap{asmap};
CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
@@ -708,7 +713,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
BOOST_AUTO_TEST_CASE(addrman_serialization)
{
- std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+ std::vector<bool> asmap1 = FromBytes(test::data::asmap);
NetGroupManager netgroupman{asmap1};
const auto ratio = GetCheckRatio(m_node);
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index f178499299..37a39adb9c 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -23,9 +23,6 @@ static inline arith_uint256 arith_uint256V(const std::vector<unsigned char>& vch
{
return UintToArith256(uint256(vch));
}
-// Takes a number written in hex (with most significant digits first).
-static inline arith_uint256 arith_uint256S(std::string_view str) { return UintToArith256(uint256S(str)); }
-
const unsigned char R1Array[] =
"\x9c\x52\x4a\xdb\xcf\x56\x11\x12\x2b\x29\x12\x5e\x5d\x35\xd2\xd2"
"\x22\x81\xaa\xb5\x33\xf0\x08\x32\xd5\x56\xb1\xf9\xea\xe5\x1d\x7d";
@@ -39,8 +36,6 @@ const unsigned char R2Array[] =
"\x13\x30\x47\xa3\x19\x2d\xda\x71\x49\x13\x72\xf0\xb4\xca\x81\xd7";
const arith_uint256 R2L = arith_uint256V(std::vector<unsigned char>(R2Array,R2Array+32));
-const char R1LplusR2L[] = "549FB09FEA236A1EA3E31D4D58F1B1369288D204211CA751527CFC175767850C";
-
const unsigned char ZeroArray[] =
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
@@ -97,27 +92,25 @@ BOOST_AUTO_TEST_CASE( basics ) // constructors, equality, inequality
}
BOOST_CHECK(ZeroL == (OneL << 256));
- // String Constructor and Copy Constructor
- BOOST_CHECK(arith_uint256S("0x" + R1L.ToString()) == R1L);
- BOOST_CHECK(arith_uint256S("0x" + R2L.ToString()) == R2L);
- BOOST_CHECK(arith_uint256S("0x" + ZeroL.ToString()) == ZeroL);
- BOOST_CHECK(arith_uint256S("0x" + OneL.ToString()) == OneL);
- BOOST_CHECK(arith_uint256S("0x" + MaxL.ToString()) == MaxL);
- BOOST_CHECK(arith_uint256S(R1L.ToString()) == R1L);
- BOOST_CHECK(arith_uint256S(" 0x" + R1L.ToString() + " ") == R1L);
- BOOST_CHECK(arith_uint256S("") == ZeroL);
- BOOST_CHECK(arith_uint256S("1") == OneL);
- BOOST_CHECK(R1L == arith_uint256S(R1ArrayHex));
+ // Construct from hex string
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(R1L.ToString()).value()), R1L);
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(R2L.ToString()).value()), R2L);
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(ZeroL.ToString()).value()), ZeroL);
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(OneL.ToString()).value()), OneL);
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(MaxL.ToString()).value()), MaxL);
+ BOOST_CHECK_EQUAL(UintToArith256(uint256::FromHex(R1ArrayHex).value()), R1L);
+
+ // Copy constructor
BOOST_CHECK(arith_uint256(R1L) == R1L);
BOOST_CHECK((arith_uint256(R1L^R2L)^R2L) == R1L);
BOOST_CHECK(arith_uint256(ZeroL) == ZeroL);
BOOST_CHECK(arith_uint256(OneL) == OneL);
// uint64_t constructor
- BOOST_CHECK((R1L & arith_uint256S("0xffffffffffffffff")) == arith_uint256(R1LLow64));
- BOOST_CHECK(ZeroL == arith_uint256(0));
- BOOST_CHECK(OneL == arith_uint256(1));
- BOOST_CHECK(arith_uint256S("0xffffffffffffffff") == arith_uint256(0xffffffffffffffffULL));
+ BOOST_CHECK_EQUAL(R1L & arith_uint256{0xffffffffffffffff}, arith_uint256{R1LLow64});
+ BOOST_CHECK_EQUAL(ZeroL, arith_uint256{0});
+ BOOST_CHECK_EQUAL(OneL, arith_uint256{1});
+ BOOST_CHECK_EQUAL(arith_uint256{0xffffffffffffffff}, arith_uint256{0xffffffffffffffffULL});
// Assignment (from base_uint)
arith_uint256 tmpL = ~ZeroL; BOOST_CHECK(tmpL == ~ZeroL);
@@ -284,15 +277,12 @@ BOOST_AUTO_TEST_CASE( comparison ) // <= >= < >
BOOST_CHECK_LT(ZeroL,
OneL);
- // Verify hex number representation has the most significant digits first.
- BOOST_CHECK_LT(arith_uint256S("0000000000000000000000000000000000000000000000000000000000000001"),
- arith_uint256S("1000000000000000000000000000000000000000000000000000000000000000"));
}
BOOST_AUTO_TEST_CASE( plusMinus )
{
arith_uint256 TmpL = 0;
- BOOST_CHECK(R1L + R2L == arith_uint256S(R1LplusR2L));
+ BOOST_CHECK_EQUAL(R1L + R2L, UintToArith256(uint256{"549fb09fea236a1ea3e31d4d58f1b1369288d204211ca751527cfc175767850c"}));
TmpL += R1L;
BOOST_CHECK(TmpL == R1L);
TmpL += R2L;
@@ -356,8 +346,8 @@ BOOST_AUTO_TEST_CASE( multiply )
BOOST_AUTO_TEST_CASE( divide )
{
- arith_uint256 D1L{arith_uint256S("AD7133AC1977FA2B7")};
- arith_uint256 D2L{arith_uint256S("ECD751716")};
+ arith_uint256 D1L{UintToArith256(uint256{"00000000000000000000000000000000000000000000000ad7133ac1977fa2b7"})};
+ arith_uint256 D2L{UintToArith256(uint256{"0000000000000000000000000000000000000000000000000000000ecd751716"})};
BOOST_CHECK((R1L / D1L).ToString() == "00000000000000000b8ac01106981635d9ed112290f8895545a7654dde28fb3a");
BOOST_CHECK((R1L / D2L).ToString() == "000000000873ce8efec5b67150bad3aa8c5fcb70e947586153bf2cec7c37c57a");
BOOST_CHECK(R1L / OneL == R1L);
@@ -571,4 +561,51 @@ BOOST_AUTO_TEST_CASE( getmaxcoverage ) // some more tests just to get 100% cover
CHECKBITWISEOPERATOR(R1,~R2,&)
}
+BOOST_AUTO_TEST_CASE(conversion)
+{
+ for (const arith_uint256& arith : {ZeroL, OneL, R1L, R2L}) {
+ const auto u256{uint256::FromHex(arith.GetHex()).value()};
+ BOOST_CHECK_EQUAL(UintToArith256(ArithToUint256(arith)), arith);
+ BOOST_CHECK_EQUAL(UintToArith256(u256), arith);
+ BOOST_CHECK_EQUAL(u256, ArithToUint256(arith));
+ BOOST_CHECK_EQUAL(ArithToUint256(arith).GetHex(), UintToArith256(u256).GetHex());
+ }
+
+ for (uint8_t num : {0, 1, 0xff}) {
+ BOOST_CHECK_EQUAL(UintToArith256(uint256{num}), arith_uint256{num});
+ BOOST_CHECK_EQUAL(uint256{num}, ArithToUint256(arith_uint256{num}));
+ BOOST_CHECK_EQUAL(UintToArith256(uint256{num}), num);
+ }
+}
+
+BOOST_AUTO_TEST_CASE(operator_with_self)
+{
+ /* Clang 16 and earlier detects v -= v and v /= v as self-assignments
+ to 0 and 1 respectively.
+ See: https://github.com/llvm/llvm-project/issues/42469
+ and the fix in commit c5302325b2a62d77cf13dd16cd5c19141862fed0 .
+
+ This makes some sense for arithmetic classes, but could be considered a bug
+ elsewhere. Disable the warning here so that the code can be tested, but the
+ warning should remain on as there will likely always be a better way to
+ express this.
+ */
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wself-assign-overloaded"
+#endif
+ arith_uint256 v{2};
+ v *= v;
+ BOOST_CHECK_EQUAL(v, arith_uint256{4});
+ v /= v;
+ BOOST_CHECK_EQUAL(v, arith_uint256{1});
+ v += v;
+ BOOST_CHECK_EQUAL(v, arith_uint256{2});
+ v -= v;
+ BOOST_CHECK_EQUAL(v, arith_uint256{0});
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index 067a32d6a4..48ae874fcd 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -103,7 +103,7 @@ bool BuildChainTestingSetup::BuildChain(const CBlockIndex* pindex,
CBlockHeader header = block->GetBlockHeader();
BlockValidationState state;
- if (!Assert(m_node.chainman)->ProcessNewBlockHeaders({header}, true, state, &pindex)) {
+ if (!Assert(m_node.chainman)->ProcessNewBlockHeaders({{header}}, true, state, &pindex)) {
return false;
}
}
@@ -144,7 +144,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
BOOST_REQUIRE(filter_index.StartBackgroundSync());
// Allow filter index to catch up with the block index.
- IndexWaitSynced(filter_index, *Assert(m_node.shutdown));
+ IndexWaitSynced(filter_index, *Assert(m_node.shutdown_signal));
// Check that filter index has all blocks that were in the chain before it started.
{
diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp
index 121f00bd25..c2b95dd861 100644
--- a/src/test/blockmanager_tests.cpp
+++ b/src/test/blockmanager_tests.cpp
@@ -28,13 +28,13 @@ BOOST_FIXTURE_TEST_SUITE(blockmanager_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos)
{
const auto params {CreateChainParams(ArgsManager{}, ChainType::MAIN)};
- KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings)};
+ KernelNotifications notifications{Assert(m_node.shutdown_request), m_node.exit_status, *Assert(m_node.warnings)};
const BlockManager::Options blockman_opts{
.chainparams = *params,
.blocks_dir = m_args.GetBlocksDirPath(),
.notifications = notifications,
};
- BlockManager blockman{*Assert(m_node.shutdown), blockman_opts};
+ BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
// simulate adding a genesis block normally
BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0).nPos, BLOCK_SERIALIZATION_HEADER_SIZE);
// simulate what happens during reindex
@@ -135,13 +135,13 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup)
BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file)
{
- KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings)};
+ KernelNotifications notifications{Assert(m_node.shutdown_request), m_node.exit_status, *Assert(m_node.warnings)};
node::BlockManager::Options blockman_opts{
.chainparams = Params(),
.blocks_dir = m_args.GetBlocksDirPath(),
.notifications = notifications,
};
- BlockManager blockman{*Assert(m_node.shutdown), blockman_opts};
+ BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
// Test blocks with no transactions, not even a coinbase
CBlock block1;
diff --git a/src/test/cluster_linearize_tests.cpp b/src/test/cluster_linearize_tests.cpp
index d15e783ea1..265ccdc805 100644
--- a/src/test/cluster_linearize_tests.cpp
+++ b/src/test/cluster_linearize_tests.cpp
@@ -18,13 +18,24 @@ using namespace cluster_linearize;
namespace {
+/** Special magic value that indicates to TestDepGraphSerialization that a cluster entry represents
+ * a hole. */
+constexpr std::pair<FeeFrac, TestBitSet> HOLE{FeeFrac{0, 0x3FFFFF}, {}};
+
template<typename SetType>
-void TestDepGraphSerialization(const Cluster<SetType>& cluster, const std::string& hexenc)
+void TestDepGraphSerialization(const std::vector<std::pair<FeeFrac, SetType>>& cluster, const std::string& hexenc)
{
- DepGraph depgraph(cluster);
-
- // Run normal sanity and correspondence checks, which includes a round-trip test.
- VerifyDepGraphFromCluster(cluster, depgraph);
+ // Construct DepGraph from cluster argument.
+ DepGraph<SetType> depgraph;
+ SetType holes;
+ for (ClusterIndex i = 0; i < cluster.size(); ++i) {
+ depgraph.AddTransaction(cluster[i].first);
+ if (cluster[i] == HOLE) holes.Set(i);
+ }
+ for (ClusterIndex i = 0; i < cluster.size(); ++i) {
+ depgraph.AddDependencies(cluster[i].second, i);
+ }
+ depgraph.RemoveTransactions(holes);
// There may be multiple serializations of the same graph, but DepGraphFormatter's serializer
// only produces one of those. Verify that hexenc matches that canonical serialization.
@@ -133,6 +144,34 @@ BOOST_AUTO_TEST_CASE(depgraph_ser_tests)
skip insertion C): D,A,B,E,C */
"00" /* end of graph */
);
+
+ // Transactions: A(1,2), B(3,1), C(2,1), D(1,3), E(1,1). Deps: C->A, D->A, D->B, E->D.
+ // In order: [_, D, _, _, A, _, B, _, _, _, E, _, _, C] (_ being holes). Internally serialized
+ // in order A,B,C,D,E.
+ TestDepGraphSerialization<TestBitSet>(
+ {HOLE, {{1, 3}, {4, 6}}, HOLE, HOLE, {{1, 2}, {}}, HOLE, {{3, 1}, {}}, HOLE, HOLE, HOLE, {{1, 1}, {1}}, HOLE, HOLE, {{2, 1}, {4}}},
+ "02" /* A size */
+ "02" /* A fee */
+ "03" /* A insertion position (3 holes): _, _, _, A */
+ "01" /* B size */
+ "06" /* B fee */
+ "06" /* B insertion position (skip B->A dependency, skip 4 inserts, add 1 hole): _, _, _, A, _, B */
+ "01" /* C size */
+ "04" /* C fee */
+ "01" /* C->A dependency (skip C->B dependency) */
+ "0b" /* C insertion position (skip 6 inserts, add 5 holes): _, _, _, A, _, B, _, _, _, _, _, C */
+ "03" /* D size */
+ "02" /* D fee */
+ "01" /* D->B dependency (skip D->C dependency) */
+ "00" /* D->A dependency (no skips) */
+ "0b" /* D insertion position (skip 11 inserts): _, D, _, _, A, _, B, _, _, _, _, _, C */
+ "01" /* E size */
+ "02" /* E fee */
+ "00" /* E->D dependency (no skips) */
+ "04" /* E insertion position (skip E->C dependency, E->B and E->A are implied, skip 3
+ inserts): _, D, _, _, A, _, B, _, _, _, E, _, _, C */
+ "00" /* end of graph */
+ );
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/coinstatsindex_tests.cpp b/src/test/coinstatsindex_tests.cpp
index 08814c1499..e09aad05e9 100644
--- a/src/test/coinstatsindex_tests.cpp
+++ b/src/test/coinstatsindex_tests.cpp
@@ -35,7 +35,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup)
BOOST_REQUIRE(coin_stats_index.StartBackgroundSync());
- IndexWaitSynced(coin_stats_index, *Assert(m_node.shutdown));
+ IndexWaitSynced(coin_stats_index, *Assert(m_node.shutdown_signal));
// Check that CoinStatsIndex works for genesis block.
const CBlockIndex* genesis_block_index;
@@ -86,7 +86,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_unclean_shutdown, TestChain100Setup)
CoinStatsIndex index{interfaces::MakeChain(m_node), 1 << 20};
BOOST_REQUIRE(index.Init());
BOOST_REQUIRE(index.StartBackgroundSync());
- IndexWaitSynced(index, *Assert(m_node.shutdown));
+ IndexWaitSynced(index, *Assert(m_node.shutdown_signal));
std::shared_ptr<const CBlock> new_block;
CBlockIndex* new_block_index = nullptr;
{
diff --git a/src/test/feefrac_tests.cpp b/src/test/feefrac_tests.cpp
index 5af3c3d7ed..41c9c0a633 100644
--- a/src/test/feefrac_tests.cpp
+++ b/src/test/feefrac_tests.cpp
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_CASE(feefrac_operators)
FeeFrac sum{1500, 400};
FeeFrac diff{500, -200};
FeeFrac empty{0, 0};
- FeeFrac zero_fee{0, 1}; // zero-fee allowed
+ [[maybe_unused]] FeeFrac zero_fee{0, 1}; // zero-fee allowed
BOOST_CHECK(empty == FeeFrac{}); // same as no-args
diff --git a/src/test/fuzz/CMakeLists.txt b/src/test/fuzz/CMakeLists.txt
index 165add2e5a..1c7b0d5c25 100644
--- a/src/test/fuzz/CMakeLists.txt
+++ b/src/test/fuzz/CMakeLists.txt
@@ -72,6 +72,7 @@ add_executable(fuzz
netbase_dns_lookup.cpp
node_eviction.cpp
p2p_handshake.cpp
+ p2p_headers_presync.cpp
p2p_transport_serialization.cpp
package_eval.cpp
parse_hd_keypath.cpp
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index dbec2bc858..bcc3dd3e14 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -186,7 +186,7 @@ public:
return false;
}
- auto IdsReferToSameAddress = [&](int id, int other_id) EXCLUSIVE_LOCKS_REQUIRED(m_impl->cs, other.m_impl->cs) {
+ auto IdsReferToSameAddress = [&](nid_type id, nid_type other_id) EXCLUSIVE_LOCKS_REQUIRED(m_impl->cs, other.m_impl->cs) {
if (id == -1 && other_id == -1) {
return true;
}
@@ -250,19 +250,30 @@ FUZZ_TARGET(addrman, .init = initialize_addrman)
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
addresses.push_back(ConsumeAddress(fuzzed_data_provider));
}
- addr_man.Add(addresses, ConsumeNetAddr(fuzzed_data_provider), std::chrono::seconds{ConsumeTime(fuzzed_data_provider, 0, 100000000)});
+ auto net_addr = ConsumeNetAddr(fuzzed_data_provider);
+ auto time_penalty = std::chrono::seconds{ConsumeTime(fuzzed_data_provider, 0, 100000000)};
+ addr_man.Add(addresses, net_addr, time_penalty);
},
[&] {
- addr_man.Good(ConsumeService(fuzzed_data_provider), NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}});
+ auto addr = ConsumeService(fuzzed_data_provider);
+ auto time = NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}};
+ addr_man.Good(addr, time);
},
[&] {
- addr_man.Attempt(ConsumeService(fuzzed_data_provider), fuzzed_data_provider.ConsumeBool(), NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}});
+ auto addr = ConsumeService(fuzzed_data_provider);
+ auto count_failure = fuzzed_data_provider.ConsumeBool();
+ auto time = NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}};
+ addr_man.Attempt(addr, count_failure, time);
},
[&] {
- addr_man.Connected(ConsumeService(fuzzed_data_provider), NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}});
+ auto addr = ConsumeService(fuzzed_data_provider);
+ auto time = NodeSeconds{std::chrono::seconds{ConsumeTime(fuzzed_data_provider)}};
+ addr_man.Connected(addr, time);
},
[&] {
- addr_man.SetServices(ConsumeService(fuzzed_data_provider), ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS));
+ auto addr = ConsumeService(fuzzed_data_provider);
+ auto n_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
+ addr_man.SetServices(addr, n_services);
});
}
const AddrMan& const_addr_man{addr_man};
@@ -270,12 +281,19 @@ FUZZ_TARGET(addrman, .init = initialize_addrman)
if (fuzzed_data_provider.ConsumeBool()) {
network = fuzzed_data_provider.PickValueInArray(ALL_NETWORKS);
}
- (void)const_addr_man.GetAddr(
- /*max_addresses=*/fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
- /*max_pct=*/fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
- network,
- /*filtered=*/fuzzed_data_provider.ConsumeBool());
- (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool(), network);
+ auto max_addresses = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ auto max_pct = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ auto filtered = fuzzed_data_provider.ConsumeBool();
+ (void)const_addr_man.GetAddr(max_addresses, max_pct, network, filtered);
+
+ std::unordered_set<Network> nets;
+ for (const auto& net : ALL_NETWORKS) {
+ if (fuzzed_data_provider.ConsumeBool()) {
+ nets.insert(net);
+ }
+ }
+ (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool(), nets);
+
std::optional<bool> in_new;
if (fuzzed_data_provider.ConsumeBool()) {
in_new = fuzzed_data_provider.ConsumeBool();
diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp
index 45316b6b21..81761c7bf9 100644
--- a/src/test/fuzz/autofile.cpp
+++ b/src/test/fuzz/autofile.cpp
@@ -56,7 +56,6 @@ FUZZ_TARGET(autofile)
WriteToStream(fuzzed_data_provider, auto_file);
});
}
- (void)auto_file.Get();
(void)auto_file.IsNull();
if (fuzzed_data_provider.ConsumeBool()) {
FILE* f = auto_file.release();
diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp
index b26151f63c..4165cc6b2c 100644
--- a/src/test/fuzz/banman.cpp
+++ b/src/test/fuzz/banman.cpp
@@ -78,7 +78,9 @@ FUZZ_TARGET(banman, .init = initialize_banman)
contains_invalid = true;
}
}
- ban_man.Ban(net_addr, ConsumeBanTimeOffset(fuzzed_data_provider), fuzzed_data_provider.ConsumeBool());
+ auto ban_time_offset = ConsumeBanTimeOffset(fuzzed_data_provider);
+ auto since_unix_epoch = fuzzed_data_provider.ConsumeBool();
+ ban_man.Ban(net_addr, ban_time_offset, since_unix_epoch);
},
[&] {
CSubNet subnet{ConsumeSubNet(fuzzed_data_provider)};
@@ -86,7 +88,9 @@ FUZZ_TARGET(banman, .init = initialize_banman)
if (!subnet.IsValid()) {
contains_invalid = true;
}
- ban_man.Ban(subnet, ConsumeBanTimeOffset(fuzzed_data_provider), fuzzed_data_provider.ConsumeBool());
+ auto ban_time_offset = ConsumeBanTimeOffset(fuzzed_data_provider);
+ auto since_unix_epoch = fuzzed_data_provider.ConsumeBool();
+ ban_man.Ban(subnet, ban_time_offset, since_unix_epoch);
},
[&] {
ban_man.ClearBanned();
diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp
index e30c19b265..a6a042a25c 100644
--- a/src/test/fuzz/buffered_file.cpp
+++ b/src/test/fuzz/buffered_file.cpp
@@ -25,7 +25,9 @@ FUZZ_TARGET(buffered_file)
ConsumeRandomLengthByteVector<std::byte>(fuzzed_data_provider),
};
try {
- opt_buffered_file.emplace(fuzzed_file, fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096), fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096));
+ auto n_buf_size = fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096);
+ auto n_rewind_in = fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096);
+ opt_buffered_file.emplace(fuzzed_file, n_buf_size, n_rewind_in);
} catch (const std::ios_base::failure&) {
}
if (opt_buffered_file && !fuzzed_file.IsNull()) {
diff --git a/src/test/fuzz/cluster_linearize.cpp b/src/test/fuzz/cluster_linearize.cpp
index 2dfdfbb41d..5b3770636a 100644
--- a/src/test/fuzz/cluster_linearize.cpp
+++ b/src/test/fuzz/cluster_linearize.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <cluster_linearize.h>
+#include <random.h>
#include <serialize.h>
#include <streams.h>
#include <test/fuzz/fuzz.h>
@@ -36,7 +37,7 @@ class SimpleCandidateFinder
public:
/** Construct an SimpleCandidateFinder for a given graph. */
SimpleCandidateFinder(const DepGraph<SetType>& depgraph LIFETIMEBOUND) noexcept :
- m_depgraph(depgraph), m_todo{SetType::Fill(depgraph.TxCount())} {}
+ m_depgraph(depgraph), m_todo{depgraph.Positions()} {}
/** Remove a set of transactions from the set of to-be-linearized ones. */
void MarkDone(SetType select) noexcept { m_todo -= select; }
@@ -106,7 +107,7 @@ class ExhaustiveCandidateFinder
public:
/** Construct an ExhaustiveCandidateFinder for a given graph. */
ExhaustiveCandidateFinder(const DepGraph<SetType>& depgraph LIFETIMEBOUND) noexcept :
- m_depgraph(depgraph), m_todo{SetType::Fill(depgraph.TxCount())} {}
+ m_depgraph(depgraph), m_todo{depgraph.Positions()} {}
/** Remove a set of transactions from the set of to-be-linearized ones. */
void MarkDone(SetType select) noexcept { m_todo -= select; }
@@ -152,7 +153,7 @@ std::pair<std::vector<ClusterIndex>, bool> SimpleLinearize(const DepGraph<SetTyp
{
std::vector<ClusterIndex> linearization;
SimpleCandidateFinder finder(depgraph);
- SetType todo = SetType::Fill(depgraph.TxCount());
+ SetType todo = depgraph.Positions();
bool optimal = true;
while (todo.Any()) {
auto [candidate, iterations_done] = finder.FindCandidateSet(max_iterations);
@@ -165,6 +166,23 @@ std::pair<std::vector<ClusterIndex>, bool> SimpleLinearize(const DepGraph<SetTyp
return {std::move(linearization), optimal};
}
+/** Stitch connected components together in a DepGraph, guaranteeing its corresponding cluster is connected. */
+template<typename BS>
+void MakeConnected(DepGraph<BS>& depgraph)
+{
+ auto todo = depgraph.Positions();
+ auto comp = depgraph.FindConnectedComponent(todo);
+ Assume(depgraph.IsConnected(comp));
+ todo -= comp;
+ while (todo.Any()) {
+ auto nextcomp = depgraph.FindConnectedComponent(todo);
+ Assume(depgraph.IsConnected(nextcomp));
+ depgraph.AddDependencies(BS::Singleton(comp.Last()), nextcomp.First());
+ todo -= nextcomp;
+ comp = nextcomp;
+ }
+}
+
/** Given a dependency graph, and a todo set, read a topological subset of todo from reader. */
template<typename SetType>
SetType ReadTopologicalSet(const DepGraph<SetType>& depgraph, const SetType& todo, SpanReader& reader)
@@ -188,7 +206,7 @@ template<typename BS>
std::vector<ClusterIndex> ReadLinearization(const DepGraph<BS>& depgraph, SpanReader& reader)
{
std::vector<ClusterIndex> linearization;
- TestBitSet todo = TestBitSet::Fill(depgraph.TxCount());
+ TestBitSet todo = depgraph.Positions();
// In every iteration one topologically-valid transaction is appended to linearization.
while (todo.Any()) {
// Compute the set of transactions with no not-yet-included ancestors.
@@ -223,59 +241,157 @@ std::vector<ClusterIndex> ReadLinearization(const DepGraph<BS>& depgraph, SpanRe
} // namespace
-FUZZ_TARGET(clusterlin_add_dependency)
-{
- // Verify that computing a DepGraph from a cluster, or building it step by step using AddDependency
- // have the same effect.
-
- // Construct a cluster of a certain length, with no dependencies.
- FuzzedDataProvider provider(buffer.data(), buffer.size());
- auto num_tx = provider.ConsumeIntegralInRange<ClusterIndex>(2, 32);
- Cluster<TestBitSet> cluster(num_tx, std::pair{FeeFrac{0, 1}, TestBitSet{}});
- // Construct the corresponding DepGraph object (also no dependencies).
- DepGraph depgraph(cluster);
- SanityCheck(depgraph);
- // Read (parent, child) pairs, and add them to the cluster and depgraph.
- LIMITED_WHILE(provider.remaining_bytes() > 0, TestBitSet::Size() * TestBitSet::Size()) {
- auto parent = provider.ConsumeIntegralInRange<ClusterIndex>(0, num_tx - 1);
- auto child = provider.ConsumeIntegralInRange<ClusterIndex>(0, num_tx - 2);
- child += (child >= parent);
- cluster[child].second.Set(parent);
- depgraph.AddDependency(parent, child);
- assert(depgraph.Ancestors(child)[parent]);
- assert(depgraph.Descendants(parent)[child]);
- }
- // Sanity check the result.
- SanityCheck(depgraph);
- // Verify that the resulting DepGraph matches one recomputed from the cluster.
- assert(DepGraph(cluster) == depgraph);
-}
-
-FUZZ_TARGET(clusterlin_cluster_serialization)
+FUZZ_TARGET(clusterlin_depgraph_sim)
{
- // Verify that any graph of transactions has its ancestry correctly computed by DepGraph, and
- // if it is a DAG, that it can be serialized as a DepGraph in a way that roundtrips. This
- // guarantees that any acyclic cluster has a corresponding DepGraph serialization.
+ // Simulation test to verify the full behavior of DepGraph.
FuzzedDataProvider provider(buffer.data(), buffer.size());
- // Construct a cluster in a naive way (using a FuzzedDataProvider-based serialization).
- Cluster<TestBitSet> cluster;
- auto num_tx = provider.ConsumeIntegralInRange<ClusterIndex>(1, 32);
- cluster.resize(num_tx);
- for (ClusterIndex i = 0; i < num_tx; ++i) {
- cluster[i].first.size = provider.ConsumeIntegralInRange<int32_t>(1, 0x3fffff);
- cluster[i].first.fee = provider.ConsumeIntegralInRange<int64_t>(-0x8000000000000, 0x7ffffffffffff);
- for (ClusterIndex j = 0; j < num_tx; ++j) {
- if (i == j) continue;
- if (provider.ConsumeBool()) cluster[i].second.Set(j);
+ /** Real DepGraph being tested. */
+ DepGraph<TestBitSet> real;
+ /** Simulated DepGraph (sim[i] is std::nullopt if position i does not exist; otherwise,
+ * sim[i]->first is its individual feerate, and sim[i]->second is its set of ancestors. */
+ std::array<std::optional<std::pair<FeeFrac, TestBitSet>>, TestBitSet::Size()> sim;
+ /** The number of non-nullopt position in sim. */
+ ClusterIndex num_tx_sim{0};
+
+ /** Read a valid index of a transaction from the provider. */
+ auto idx_fn = [&]() {
+ auto offset = provider.ConsumeIntegralInRange<ClusterIndex>(0, num_tx_sim - 1);
+ for (ClusterIndex i = 0; i < sim.size(); ++i) {
+ if (!sim[i].has_value()) continue;
+ if (offset == 0) return i;
+ --offset;
+ }
+ assert(false);
+ return ClusterIndex(-1);
+ };
+
+ /** Read a valid subset of the transactions from the provider. */
+ auto subset_fn = [&]() {
+ auto range = (uint64_t{1} << num_tx_sim) - 1;
+ const auto mask = provider.ConsumeIntegralInRange<uint64_t>(0, range);
+ auto mask_shifted = mask;
+ TestBitSet subset;
+ for (ClusterIndex i = 0; i < sim.size(); ++i) {
+ if (!sim[i].has_value()) continue;
+ if (mask_shifted & 1) {
+ subset.Set(i);
+ }
+ mask_shifted >>= 1;
+ }
+ assert(mask_shifted == 0);
+ return subset;
+ };
+
+ /** Read any set of transactions from the provider (including unused positions). */
+ auto set_fn = [&]() {
+ auto range = (uint64_t{1} << sim.size()) - 1;
+ const auto mask = provider.ConsumeIntegralInRange<uint64_t>(0, range);
+ TestBitSet set;
+ for (ClusterIndex i = 0; i < sim.size(); ++i) {
+ if ((mask >> i) & 1) {
+ set.Set(i);
+ }
+ }
+ return set;
+ };
+
+ /** Propagate ancestor information in sim. */
+ auto anc_update_fn = [&]() {
+ while (true) {
+ bool updates{false};
+ for (ClusterIndex chl = 0; chl < sim.size(); ++chl) {
+ if (!sim[chl].has_value()) continue;
+ for (auto par : sim[chl]->second) {
+ if (!sim[chl]->second.IsSupersetOf(sim[par]->second)) {
+ sim[chl]->second |= sim[par]->second;
+ updates = true;
+ }
+ }
+ }
+ if (!updates) break;
+ }
+ };
+
+ /** Compare the state of transaction i in the simulation with the real one. */
+ auto check_fn = [&](ClusterIndex i) {
+ // Compare used positions.
+ assert(real.Positions()[i] == sim[i].has_value());
+ if (sim[i].has_value()) {
+ // Compare feerate.
+ assert(real.FeeRate(i) == sim[i]->first);
+ // Compare ancestors (note that SanityCheck verifies correspondence between ancestors
+ // and descendants, so we can restrict ourselves to ancestors here).
+ assert(real.Ancestors(i) == sim[i]->second);
}
+ };
+
+ LIMITED_WHILE(provider.remaining_bytes() > 0, 1000) {
+ uint8_t command = provider.ConsumeIntegral<uint8_t>();
+ if (num_tx_sim == 0 || ((command % 3) <= 0 && num_tx_sim < TestBitSet::Size())) {
+ // AddTransaction.
+ auto fee = provider.ConsumeIntegralInRange<int64_t>(-0x8000000000000, 0x7ffffffffffff);
+ auto size = provider.ConsumeIntegralInRange<int32_t>(1, 0x3fffff);
+ FeeFrac feerate{fee, size};
+ // Apply to DepGraph.
+ auto idx = real.AddTransaction(feerate);
+ // Verify that the returned index is correct.
+ assert(!sim[idx].has_value());
+ for (ClusterIndex i = 0; i < TestBitSet::Size(); ++i) {
+ if (!sim[i].has_value()) {
+ assert(idx == i);
+ break;
+ }
+ }
+ // Update sim.
+ sim[idx] = {feerate, TestBitSet::Singleton(idx)};
+ ++num_tx_sim;
+ continue;
+ }
+ if ((command % 3) <= 1 && num_tx_sim > 0) {
+ // AddDependencies.
+ ClusterIndex child = idx_fn();
+ auto parents = subset_fn();
+ // Apply to DepGraph.
+ real.AddDependencies(parents, child);
+ // Apply to sim.
+ sim[child]->second |= parents;
+ continue;
+ }
+ if (num_tx_sim > 0) {
+ // Remove transactions.
+ auto del = set_fn();
+ // Propagate all ancestry information before deleting anything in the simulation (as
+ // intermediary transactions may be deleted which impact connectivity).
+ anc_update_fn();
+ // Compare the state of the transactions being deleted.
+ for (auto i : del) check_fn(i);
+ // Apply to DepGraph.
+ real.RemoveTransactions(del);
+ // Apply to sim.
+ for (ClusterIndex i = 0; i < sim.size(); ++i) {
+ if (sim[i].has_value()) {
+ if (del[i]) {
+ --num_tx_sim;
+ sim[i] = std::nullopt;
+ } else {
+ sim[i]->second -= del;
+ }
+ }
+ }
+ continue;
+ }
+ // This should be unreachable (one of the 3 above actions should always be possible).
+ assert(false);
}
- // Construct dependency graph, and verify it matches the cluster (which includes a round-trip
- // check for the serialization).
- DepGraph depgraph(cluster);
- VerifyDepGraphFromCluster(cluster, depgraph);
+ // Compare the real obtained depgraph against the simulation.
+ anc_update_fn();
+ for (ClusterIndex i = 0; i < sim.size(); ++i) check_fn(i);
+ assert(real.TxCount() == num_tx_sim);
+ // Sanity check the result (which includes round-tripping serialization, if applicable).
+ SanityCheck(real);
}
FUZZ_TARGET(clusterlin_depgraph_serialization)
@@ -305,7 +421,7 @@ FUZZ_TARGET(clusterlin_components)
reader >> Using<DepGraphFormatter>(depgraph);
} catch (const std::ios_base::failure&) {}
- TestBitSet todo = TestBitSet::Fill(depgraph.TxCount());
+ TestBitSet todo = depgraph.Positions();
while (todo.Any()) {
// Find a connected component inside todo.
auto component = depgraph.FindConnectedComponent(todo);
@@ -316,7 +432,7 @@ FUZZ_TARGET(clusterlin_components)
// If todo is the entire graph, and the entire graph is connected, then the component must
// be the entire graph.
- if (todo == TestBitSet::Fill(depgraph.TxCount())) {
+ if (todo == depgraph.Positions()) {
assert((component == todo) == depgraph.IsConnected());
}
@@ -353,7 +469,7 @@ FUZZ_TARGET(clusterlin_components)
reader >> VARINT(subset_bits);
} catch (const std::ios_base::failure&) {}
TestBitSet subset;
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ for (ClusterIndex i : depgraph.Positions()) {
if (todo[i]) {
if (subset_bits & 1) subset.Set(i);
subset_bits >>= 1;
@@ -369,6 +485,20 @@ FUZZ_TARGET(clusterlin_components)
assert(depgraph.FindConnectedComponent(todo).None());
}
+FUZZ_TARGET(clusterlin_make_connected)
+{
+ // Verify that MakeConnected makes graphs connected.
+
+ SpanReader reader(buffer);
+ DepGraph<TestBitSet> depgraph;
+ try {
+ reader >> Using<DepGraphFormatter>(depgraph);
+ } catch (const std::ios_base::failure&) {}
+ MakeConnected(depgraph);
+ SanityCheck(depgraph);
+ assert(depgraph.IsConnected());
+}
+
FUZZ_TARGET(clusterlin_chunking)
{
// Verify the correctness of the ChunkLinearization function.
@@ -392,13 +522,13 @@ FUZZ_TARGET(clusterlin_chunking)
}
// Naively recompute the chunks (each is the highest-feerate prefix of what remains).
- auto todo = TestBitSet::Fill(depgraph.TxCount());
+ auto todo = depgraph.Positions();
for (const auto& chunk_feerate : chunking) {
assert(todo.Any());
SetInfo<TestBitSet> accumulator, best;
for (ClusterIndex idx : linearization) {
if (todo[idx]) {
- accumulator |= SetInfo(depgraph, idx);
+ accumulator.Set(depgraph, idx);
if (best.feerate.IsEmpty() || accumulator.feerate >> best.feerate) {
best = accumulator;
}
@@ -423,10 +553,11 @@ FUZZ_TARGET(clusterlin_ancestor_finder)
} catch (const std::ios_base::failure&) {}
AncestorCandidateFinder anc_finder(depgraph);
- auto todo = TestBitSet::Fill(depgraph.TxCount());
+ auto todo = depgraph.Positions();
while (todo.Any()) {
// Call the ancestor finder's FindCandidateSet for what remains of the graph.
assert(!anc_finder.AllDone());
+ assert(todo.Count() == anc_finder.NumRemaining());
auto best_anc = anc_finder.FindCandidateSet();
// Sanity check the result.
assert(best_anc.transactions.Any());
@@ -458,6 +589,7 @@ FUZZ_TARGET(clusterlin_ancestor_finder)
anc_finder.MarkDone(del_set);
}
assert(anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == 0);
}
static constexpr auto MAX_SIMPLE_ITERATIONS = 300000;
@@ -468,13 +600,17 @@ FUZZ_TARGET(clusterlin_search_finder)
// and comparing with the results from SimpleCandidateFinder, ExhaustiveCandidateFinder, and
// AncestorCandidateFinder.
- // Retrieve an RNG seed and a depgraph from the fuzz input.
+ // Retrieve an RNG seed, a depgraph, and whether to make it connected, from the fuzz input.
SpanReader reader(buffer);
DepGraph<TestBitSet> depgraph;
uint64_t rng_seed{0};
+ uint8_t make_connected{1};
try {
- reader >> Using<DepGraphFormatter>(depgraph) >> rng_seed;
+ reader >> Using<DepGraphFormatter>(depgraph) >> rng_seed >> make_connected;
} catch (const std::ios_base::failure&) {}
+ // The most complicated graphs are connected ones (other ones just split up). Optionally force
+ // the graph to be connected.
+ if (make_connected) MakeConnected(depgraph);
// Instantiate ALL the candidate finders.
SearchCandidateFinder src_finder(depgraph, rng_seed);
@@ -482,12 +618,13 @@ FUZZ_TARGET(clusterlin_search_finder)
ExhaustiveCandidateFinder exh_finder(depgraph);
AncestorCandidateFinder anc_finder(depgraph);
- auto todo = TestBitSet::Fill(depgraph.TxCount());
+ auto todo = depgraph.Positions();
while (todo.Any()) {
assert(!src_finder.AllDone());
assert(!smp_finder.AllDone());
assert(!exh_finder.AllDone());
assert(!anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == todo.Count());
// For each iteration, read an iteration count limit from the fuzz input.
uint64_t max_iterations = 1;
@@ -513,9 +650,17 @@ FUZZ_TARGET(clusterlin_search_finder)
assert(found.transactions.IsSupersetOf(depgraph.Ancestors(i) & todo));
}
- // At most 2^N-1 iterations can be required: the number of non-empty subsets a graph with N
- // transactions has.
- assert(iterations_done <= ((uint64_t{1} << todo.Count()) - 1));
+ // At most 2^(N-1) iterations can be required: the maximum number of non-empty topological
+ // subsets a (connected) cluster with N transactions can have. Even when the cluster is no
+ // longer connected after removing certain transactions, this holds, because the connected
+ // components are searched separately.
+ assert(iterations_done <= (uint64_t{1} << (todo.Count() - 1)));
+ // Additionally, test that no more than sqrt(2^N)+1 iterations are required. This is just
+ // an empirical bound that seems to hold, without proof. Still, add a test for it so we
+ // can learn about counterexamples if they exist.
+ if (iterations_done >= 1 && todo.Count() <= 63) {
+ Assume((iterations_done - 1) * (iterations_done - 1) <= uint64_t{1} << todo.Count());
+ }
// Perform quality checks only if SearchCandidateFinder claims an optimal result.
if (iterations_done < max_iterations) {
@@ -562,6 +707,7 @@ FUZZ_TARGET(clusterlin_search_finder)
assert(smp_finder.AllDone());
assert(exh_finder.AllDone());
assert(anc_finder.AllDone());
+ assert(anc_finder.NumRemaining() == 0);
}
FUZZ_TARGET(clusterlin_linearization_chunking)
@@ -576,7 +722,7 @@ FUZZ_TARGET(clusterlin_linearization_chunking)
} catch (const std::ios_base::failure&) {}
// Retrieve a topologically-valid subset of depgraph.
- auto todo = TestBitSet::Fill(depgraph.TxCount());
+ auto todo = depgraph.Positions();
auto subset = SetInfo(depgraph, ReadTopologicalSet(depgraph, todo, reader));
// Retrieve a valid linearization for depgraph.
@@ -621,7 +767,7 @@ FUZZ_TARGET(clusterlin_linearization_chunking)
SetInfo<TestBitSet> accumulator, best;
for (auto j : linearization) {
if (todo[j] && !combined[j]) {
- accumulator |= SetInfo(depgraph, j);
+ accumulator.Set(depgraph, j);
if (best.feerate.IsEmpty() || accumulator.feerate > best.feerate) {
best = accumulator;
}
@@ -685,14 +831,19 @@ FUZZ_TARGET(clusterlin_linearize)
{
// Verify the behavior of Linearize().
- // Retrieve an RNG seed, an iteration count, and a depgraph from the fuzz input.
+ // Retrieve an RNG seed, an iteration count, a depgraph, and whether to make it connected from
+ // the fuzz input.
SpanReader reader(buffer);
DepGraph<TestBitSet> depgraph;
uint64_t rng_seed{0};
uint64_t iter_count{0};
+ uint8_t make_connected{1};
try {
- reader >> VARINT(iter_count) >> Using<DepGraphFormatter>(depgraph) >> rng_seed;
+ reader >> VARINT(iter_count) >> Using<DepGraphFormatter>(depgraph) >> rng_seed >> make_connected;
} catch (const std::ios_base::failure&) {}
+ // The most complicated graphs are connected ones (other ones just split up). Optionally force
+ // the graph to be connected.
+ if (make_connected) MakeConnected(depgraph);
// Optionally construct an old linearization for it.
std::vector<ClusterIndex> old_linearization;
@@ -721,12 +872,24 @@ FUZZ_TARGET(clusterlin_linearize)
}
// If the iteration count is sufficiently high, an optimal linearization must be found.
- // Each linearization step can use up to 2^k iterations, with steps k=1..n. That sum is
- // 2 * (2^n - 1)
+ // Each linearization step can use up to 2^(k-1) iterations, with steps k=1..n. That sum is
+ // 2^n - 1.
const uint64_t n = depgraph.TxCount();
- if (n <= 18 && iter_count > 2U * ((uint64_t{1} << n) - 1U)) {
+ if (n <= 19 && iter_count > (uint64_t{1} << n)) {
assert(optimal);
}
+ // Additionally, if the assumption of sqrt(2^k)+1 iterations per step holds, plus ceil(k/4)
+ // start-up cost per step, plus ceil(n^2/64) start-up cost overall, we can compute the upper
+ // bound for a whole linearization (summing for k=1..n) using the Python expression
+ // [sum((k+3)//4 + int(math.sqrt(2**k)) + 1 for k in range(1, n + 1)) + (n**2 + 63) // 64 for n in range(0, 35)]:
+ static constexpr uint64_t MAX_OPTIMAL_ITERS[] = {
+ 0, 4, 8, 12, 18, 26, 37, 51, 70, 97, 133, 182, 251, 346, 480, 666, 927, 1296, 1815, 2545,
+ 3576, 5031, 7087, 9991, 14094, 19895, 28096, 39690, 56083, 79263, 112041, 158391, 223936,
+ 316629, 447712
+ };
+ if (n < std::size(MAX_OPTIMAL_ITERS) && iter_count >= MAX_OPTIMAL_ITERS[n]) {
+ Assume(optimal);
+ }
// If Linearize claims optimal result, run quality tests.
if (optimal) {
@@ -742,8 +905,8 @@ FUZZ_TARGET(clusterlin_linearize)
// Only for very small clusters, test every topologically-valid permutation.
if (depgraph.TxCount() <= 7) {
- std::vector<ClusterIndex> perm_linearization(depgraph.TxCount());
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) perm_linearization[i] = i;
+ std::vector<ClusterIndex> perm_linearization;
+ for (ClusterIndex i : depgraph.Positions()) perm_linearization.push_back(i);
// Iterate over all valid permutations.
do {
// Determine whether perm_linearization is topological.
@@ -827,30 +990,30 @@ FUZZ_TARGET(clusterlin_postlinearize_tree)
// Now construct a new graph, copying the nodes, but leaving only the first parent (even
// direction) or the first child (odd direction).
DepGraph<TestBitSet> depgraph_tree;
- for (ClusterIndex i = 0; i < depgraph_gen.TxCount(); ++i) {
- depgraph_tree.AddTransaction(depgraph_gen.FeeRate(i));
+ for (ClusterIndex i = 0; i < depgraph_gen.PositionRange(); ++i) {
+ if (depgraph_gen.Positions()[i]) {
+ depgraph_tree.AddTransaction(depgraph_gen.FeeRate(i));
+ } else {
+ // For holes, add a dummy transaction which is deleted below, so that non-hole
+ // transactions retain their position.
+ depgraph_tree.AddTransaction(FeeFrac{});
+ }
}
+ depgraph_tree.RemoveTransactions(TestBitSet::Fill(depgraph_gen.PositionRange()) - depgraph_gen.Positions());
+
if (direction & 1) {
for (ClusterIndex i = 0; i < depgraph_gen.TxCount(); ++i) {
- auto children = depgraph_gen.Descendants(i) - TestBitSet::Singleton(i);
- // Remove descendants that are children of other descendants.
- for (auto j : children) {
- if (!children[j]) continue;
- children -= depgraph_gen.Descendants(j);
- children.Set(j);
+ auto children = depgraph_gen.GetReducedChildren(i);
+ if (children.Any()) {
+ depgraph_tree.AddDependencies(TestBitSet::Singleton(i), children.First());
}
- if (children.Any()) depgraph_tree.AddDependency(i, children.First());
}
} else {
for (ClusterIndex i = 0; i < depgraph_gen.TxCount(); ++i) {
- auto parents = depgraph_gen.Ancestors(i) - TestBitSet::Singleton(i);
- // Remove ancestors that are parents of other ancestors.
- for (auto j : parents) {
- if (!parents[j]) continue;
- parents -= depgraph_gen.Ancestors(j);
- parents.Set(j);
+ auto parents = depgraph_gen.GetReducedParents(i);
+ if (parents.Any()) {
+ depgraph_tree.AddDependencies(TestBitSet::Singleton(parents.First()), i);
}
- if (parents.Any()) depgraph_tree.AddDependency(parents.First(), i);
}
}
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index 8f5f6a6071..beefc9d82e 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -91,17 +91,15 @@ FUZZ_TARGET(connman, .init = initialize_connman)
(void)connman.ForNode(fuzzed_data_provider.ConsumeIntegral<NodeId>(), [&](auto) { return fuzzed_data_provider.ConsumeBool(); });
},
[&] {
- (void)connman.GetAddresses(
- /*max_addresses=*/fuzzed_data_provider.ConsumeIntegral<size_t>(),
- /*max_pct=*/fuzzed_data_provider.ConsumeIntegral<size_t>(),
- /*network=*/std::nullopt,
- /*filtered=*/fuzzed_data_provider.ConsumeBool());
+ auto max_addresses = fuzzed_data_provider.ConsumeIntegral<size_t>();
+ auto max_pct = fuzzed_data_provider.ConsumeIntegral<size_t>();
+ auto filtered = fuzzed_data_provider.ConsumeBool();
+ (void)connman.GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt, filtered);
},
[&] {
- (void)connman.GetAddresses(
- /*requestor=*/random_node,
- /*max_addresses=*/fuzzed_data_provider.ConsumeIntegral<size_t>(),
- /*max_pct=*/fuzzed_data_provider.ConsumeIntegral<size_t>());
+ auto max_addresses = fuzzed_data_provider.ConsumeIntegral<size_t>();
+ auto max_pct = fuzzed_data_provider.ConsumeIntegral<size_t>();
+ (void)connman.GetAddresses(/*requestor=*/random_node, max_addresses, max_pct);
},
[&] {
(void)connman.GetDeterministicRandomizer(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp
index ca8c1cd033..aa478277e3 100644
--- a/src/test/fuzz/crypto.cpp
+++ b/src/test/fuzz/crypto.cpp
@@ -22,7 +22,9 @@ FUZZ_TARGET(crypto)
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
std::vector<uint8_t> data = ConsumeRandomLengthByteVector(fuzzed_data_provider);
if (data.empty()) {
- data.resize(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 4096), fuzzed_data_provider.ConsumeIntegral<uint8_t>());
+ auto new_size = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 4096);
+ auto x = fuzzed_data_provider.ConsumeIntegral<uint8_t>();
+ data.resize(new_size, x);
}
CHash160 hash160;
@@ -44,7 +46,9 @@ FUZZ_TARGET(crypto)
if (fuzzed_data_provider.ConsumeBool()) {
data = ConsumeRandomLengthByteVector(fuzzed_data_provider);
if (data.empty()) {
- data.resize(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 4096), fuzzed_data_provider.ConsumeIntegral<uint8_t>());
+ auto new_size = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 4096);
+ auto x = fuzzed_data_provider.ConsumeIntegral<uint8_t>();
+ data.resize(new_size, x);
}
}
diff --git a/src/test/fuzz/crypto_chacha20.cpp b/src/test/fuzz/crypto_chacha20.cpp
index d115a2b7e1..fe47f18923 100644
--- a/src/test/fuzz/crypto_chacha20.cpp
+++ b/src/test/fuzz/crypto_chacha20.cpp
@@ -28,11 +28,10 @@ FUZZ_TARGET(crypto_chacha20)
chacha20.SetKey(key);
},
[&] {
- chacha20.Seek(
- {
- fuzzed_data_provider.ConsumeIntegral<uint32_t>(),
- fuzzed_data_provider.ConsumeIntegral<uint64_t>()
- }, fuzzed_data_provider.ConsumeIntegral<uint32_t>());
+ ChaCha20::Nonce96 nonce{
+ fuzzed_data_provider.ConsumeIntegral<uint32_t>(),
+ fuzzed_data_provider.ConsumeIntegral<uint64_t>()};
+ chacha20.Seek(nonce, fuzzed_data_provider.ConsumeIntegral<uint32_t>());
},
[&] {
std::vector<uint8_t> output(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
diff --git a/src/test/fuzz/crypto_chacha20poly1305.cpp b/src/test/fuzz/crypto_chacha20poly1305.cpp
index 5e62e6f3df..0700ba7fb6 100644
--- a/src/test/fuzz/crypto_chacha20poly1305.cpp
+++ b/src/test/fuzz/crypto_chacha20poly1305.cpp
@@ -39,7 +39,7 @@ FUZZ_TARGET(crypto_aeadchacha20poly1305)
// data).
InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>());
- LIMITED_WHILE(provider.ConsumeBool(), 10000)
+ LIMITED_WHILE(provider.ConsumeBool(), 100)
{
// Mode:
// - Bit 0: whether to use single-plain Encrypt/Decrypt; otherwise use a split at prefix.
diff --git a/src/test/fuzz/crypto_common.cpp b/src/test/fuzz/crypto_common.cpp
index 8e07dfedb9..5a76d4e1a9 100644
--- a/src/test/fuzz/crypto_common.cpp
+++ b/src/test/fuzz/crypto_common.cpp
@@ -35,6 +35,10 @@ FUZZ_TARGET(crypto_common)
WriteLE64(writele64_arr.data(), random_u64);
assert(ReadLE64(writele64_arr.data()) == random_u64);
+ std::array<uint8_t, 2> writebe16_arr;
+ WriteBE16(writebe16_arr.data(), random_u16);
+ assert(ReadBE16(writebe16_arr.data()) == random_u16);
+
std::array<uint8_t, 4> writebe32_arr;
WriteBE32(writebe32_arr.data(), random_u32);
assert(ReadBE32(writebe32_arr.data()) == random_u32);
diff --git a/src/test/fuzz/cuckoocache.cpp b/src/test/fuzz/cuckoocache.cpp
index 50a71ee03f..f8a5bde3e6 100644
--- a/src/test/fuzz/cuckoocache.cpp
+++ b/src/test/fuzz/cuckoocache.cpp
@@ -41,7 +41,9 @@ FUZZ_TARGET(cuckoocache)
if (fuzzed_data_provider.ConsumeBool()) {
cuckoo_cache.insert(fuzzed_data_provider.ConsumeBool());
} else {
- cuckoo_cache.contains(fuzzed_data_provider.ConsumeBool(), fuzzed_data_provider.ConsumeBool());
+ auto e = fuzzed_data_provider.ConsumeBool();
+ auto erase = fuzzed_data_provider.ConsumeBool();
+ cuckoo_cache.contains(e, erase);
}
}
fuzzed_data_provider_ptr = nullptr;
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index fdad0a287a..bba2dd8e3a 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -49,7 +49,7 @@ static std::vector<const char*> g_args;
static void SetArgs(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
// Only take into account arguments that start with `--`. The others are for the fuzz engine:
- // `fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5`
+ // `fuzz -runs=1 fuzz_corpora/address_deserialize_v2 --checkaddrman=5`
if (strlen(argv[i]) > 2 && argv[i][0] == '-' && argv[i][1] == '-') {
g_args.push_back(argv[i]);
}
diff --git a/src/test/fuzz/hex.cpp b/src/test/fuzz/hex.cpp
index 964e30cc7e..3dcf1ed3d5 100644
--- a/src/test/fuzz/hex.cpp
+++ b/src/test/fuzz/hex.cpp
@@ -35,9 +35,14 @@ FUZZ_TARGET(hex)
assert(uint256::FromUserHex(random_hex_string));
}
if (const auto result{uint256::FromUserHex(random_hex_string)}) {
- assert(uint256::FromHex(result->ToString()));
+ const auto result_string{result->ToString()}; // ToString() returns a fixed-length string without "0x" prefix
+ assert(result_string.length() == 64);
+ assert(IsHex(result_string));
+ assert(TryParseHex(result_string));
+ assert(Txid::FromHex(result_string));
+ assert(Wtxid::FromHex(result_string));
+ assert(uint256::FromHex(result_string));
}
- (void)uint256S(random_hex_string);
try {
(void)HexToPubKey(random_hex_string);
} catch (const UniValue&) {
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 02c6796d11..b9e3154106 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -69,7 +69,7 @@ FUZZ_TARGET(integer, .init = initialize_integer)
const bool b = fuzzed_data_provider.ConsumeBool();
const Consensus::Params& consensus_params = Params().GetConsensus();
- (void)CheckProofOfWork(u256, u32, consensus_params);
+ (void)CheckProofOfWorkImpl(u256, u32, consensus_params);
if (u64 <= MAX_MONEY) {
const uint64_t compressed_money_amount = CompressAmount(u64);
assert(u64 == DecompressAmount(compressed_money_amount));
@@ -140,7 +140,7 @@ FUZZ_TARGET(integer, .init = initialize_integer)
const arith_uint256 au256 = UintToArith256(u256);
assert(ArithToUint256(au256) == u256);
- assert(uint256S(au256.GetHex()) == u256);
+ assert(uint256::FromHex(au256.GetHex()).value() == u256);
(void)au256.bits();
(void)au256.GetCompact(/* fNegative= */ false);
(void)au256.GetCompact(/* fNegative= */ true);
diff --git a/src/test/fuzz/message.cpp b/src/test/fuzz/message.cpp
index 6763206f72..99bbad6591 100644
--- a/src/test/fuzz/message.cpp
+++ b/src/test/fuzz/message.cpp
@@ -39,7 +39,9 @@ FUZZ_TARGET(message, .init = initialize_message)
}
{
(void)MessageHash(random_message);
- (void)MessageVerify(fuzzed_data_provider.ConsumeRandomLengthString(1024), fuzzed_data_provider.ConsumeRandomLengthString(1024), random_message);
+ auto address = fuzzed_data_provider.ConsumeRandomLengthString(1024);
+ auto signature = fuzzed_data_provider.ConsumeRandomLengthString(1024);
+ (void)MessageVerify(address, signature, random_message);
(void)SigningResultString(fuzzed_data_provider.PickValueInArray({SigningResult::OK, SigningResult::PRIVATE_KEY_NOT_AVAILABLE, SigningResult::SIGNING_FAILED}));
}
}
diff --git a/src/test/fuzz/p2p_headers_presync.cpp b/src/test/fuzz/p2p_headers_presync.cpp
new file mode 100644
index 0000000000..2670aa8ee4
--- /dev/null
+++ b/src/test/fuzz/p2p_headers_presync.cpp
@@ -0,0 +1,216 @@
+#include <blockencodings.h>
+#include <net.h>
+#include <net_processing.h>
+#include <netmessagemaker.h>
+#include <node/peerman_args.h>
+#include <pow.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <test/util/net.h>
+#include <test/util/script.h>
+#include <test/util/setup_common.h>
+#include <uint256.h>
+#include <validation.h>
+
+namespace {
+constexpr uint32_t FUZZ_MAX_HEADERS_RESULTS{16};
+
+class HeadersSyncSetup : public TestingSetup
+{
+ std::vector<CNode*> m_connections;
+
+public:
+ HeadersSyncSetup(const ChainType chain_type, TestOpts opts) : TestingSetup(chain_type, opts)
+ {
+ PeerManager::Options peerman_opts;
+ node::ApplyArgsManOptions(*m_node.args, peerman_opts);
+ peerman_opts.max_headers_result = FUZZ_MAX_HEADERS_RESULTS;
+ m_node.peerman = PeerManager::make(*m_node.connman, *m_node.addrman,
+ m_node.banman.get(), *m_node.chainman,
+ *m_node.mempool, *m_node.warnings, peerman_opts);
+
+ CConnman::Options options;
+ options.m_msgproc = m_node.peerman.get();
+ m_node.connman->Init(options);
+ }
+
+ void ResetAndInitialize() EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
+ void SendMessage(FuzzedDataProvider& fuzzed_data_provider, CSerializedNetMsg&& msg)
+ EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
+};
+
+void HeadersSyncSetup::ResetAndInitialize()
+{
+ m_connections.clear();
+ auto& connman = static_cast<ConnmanTestMsg&>(*m_node.connman);
+ connman.StopNodes();
+
+ NodeId id{0};
+ std::vector<ConnectionType> conn_types = {
+ ConnectionType::OUTBOUND_FULL_RELAY,
+ ConnectionType::BLOCK_RELAY,
+ ConnectionType::INBOUND
+ };
+
+ for (auto conn_type : conn_types) {
+ CAddress addr{};
+ m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false));
+ CNode& p2p_node = *m_connections.back();
+
+ connman.Handshake(
+ /*node=*/p2p_node,
+ /*successfully_connected=*/true,
+ /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS),
+ /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS),
+ /*version=*/PROTOCOL_VERSION,
+ /*relay_txs=*/true);
+
+ connman.AddTestNode(p2p_node);
+ }
+}
+
+void HeadersSyncSetup::SendMessage(FuzzedDataProvider& fuzzed_data_provider, CSerializedNetMsg&& msg)
+{
+ auto& connman = static_cast<ConnmanTestMsg&>(*m_node.connman);
+ CNode& connection = *PickValue(fuzzed_data_provider, m_connections);
+
+ connman.FlushSendBuffer(connection);
+ (void)connman.ReceiveMsgFrom(connection, std::move(msg));
+ connection.fPauseSend = false;
+ try {
+ connman.ProcessMessagesOnce(connection);
+ } catch (const std::ios_base::failure&) {
+ }
+ m_node.peerman->SendMessages(&connection);
+}
+
+CBlockHeader ConsumeHeader(FuzzedDataProvider& fuzzed_data_provider, const uint256& prev_hash, uint32_t prev_nbits)
+{
+ CBlockHeader header;
+ header.nNonce = 0;
+ // Either use the previous difficulty or let the fuzzer choose
+ header.nBits = fuzzed_data_provider.ConsumeBool() ?
+ prev_nbits :
+ fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(0x17058EBE, 0x1D00FFFF);
+ header.nTime = ConsumeTime(fuzzed_data_provider);
+ header.hashPrevBlock = prev_hash;
+ header.nVersion = fuzzed_data_provider.ConsumeIntegral<int32_t>();
+ return header;
+}
+
+CBlock ConsumeBlock(FuzzedDataProvider& fuzzed_data_provider, const uint256& prev_hash, uint32_t prev_nbits)
+{
+ auto header = ConsumeHeader(fuzzed_data_provider, prev_hash, prev_nbits);
+ // In order to reach the headers acceptance logic, the block is
+ // constructed in a way that will pass the mutation checks.
+ CBlock block{header};
+ CMutableTransaction tx;
+ tx.vin.resize(1);
+ tx.vout.resize(1);
+ tx.vout[0].nValue = 0;
+ tx.vin[0].scriptSig.resize(2);
+ block.vtx.push_back(MakeTransactionRef(tx));
+ block.hashMerkleRoot = block.vtx[0]->GetHash();
+ return block;
+}
+
+void FinalizeHeader(CBlockHeader& header, const ChainstateManager& chainman)
+{
+ while (!CheckProofOfWork(header.GetHash(), header.nBits, chainman.GetParams().GetConsensus())) {
+ ++(header.nNonce);
+ }
+}
+
+// Global setup works for this test as state modification (specifically in the
+// block index) would indicate a bug.
+HeadersSyncSetup* g_testing_setup;
+
+void initialize()
+{
+ static auto setup = MakeNoLogFileContext<HeadersSyncSetup>(ChainType::MAIN, {.extra_args = {"-checkpoints=0"}});
+ g_testing_setup = setup.get();
+}
+} // namespace
+
+FUZZ_TARGET(p2p_headers_presync, .init = initialize)
+{
+ ChainstateManager& chainman = *g_testing_setup->m_node.chainman;
+
+ LOCK(NetEventsInterface::g_msgproc_mutex);
+
+ g_testing_setup->ResetAndInitialize();
+
+ FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
+
+ CBlockHeader base{chainman.GetParams().GenesisBlock()};
+ SetMockTime(base.nTime);
+
+ // The chain is just a single block, so this is equal to 1
+ size_t original_index_size{WITH_LOCK(cs_main, return chainman.m_blockman.m_block_index.size())};
+ arith_uint256 total_work{WITH_LOCK(cs_main, return chainman.m_best_header->nChainWork)};
+
+ std::vector<CBlockHeader> all_headers;
+
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100)
+ {
+ auto finalized_block = [&]() {
+ CBlock block = ConsumeBlock(fuzzed_data_provider, base.GetHash(), base.nBits);
+ FinalizeHeader(block, chainman);
+ return block;
+ };
+
+ // Send low-work headers, compact blocks, and blocks
+ CallOneOf(
+ fuzzed_data_provider,
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send FUZZ_MAX_HEADERS_RESULTS headers
+ std::vector<CBlock> headers;
+ headers.resize(FUZZ_MAX_HEADERS_RESULTS);
+ for (CBlock& header : headers) {
+ header = ConsumeHeader(fuzzed_data_provider, base.GetHash(), base.nBits);
+ FinalizeHeader(header, chainman);
+ base = header;
+ }
+
+ all_headers.insert(all_headers.end(), headers.begin(), headers.end());
+
+ auto headers_msg = NetMsg::Make(NetMsgType::HEADERS, TX_WITH_WITNESS(headers));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ },
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send a compact block
+ auto block = finalized_block();
+ CBlockHeaderAndShortTxIDs cmpct_block{block, fuzzed_data_provider.ConsumeIntegral<uint64_t>()};
+
+ all_headers.push_back(block);
+
+ auto headers_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, TX_WITH_WITNESS(cmpct_block));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ },
+ [&]() NO_THREAD_SAFETY_ANALYSIS {
+ // Send a block
+ auto block = finalized_block();
+
+ all_headers.push_back(block);
+
+ auto headers_msg = NetMsg::Make(NetMsgType::BLOCK, TX_WITH_WITNESS(block));
+ g_testing_setup->SendMessage(fuzzed_data_provider, std::move(headers_msg));
+ });
+ }
+
+ // This is a conservative overestimate, as base is only moved forward when sending headers. In theory,
+ // the longest chain generated by this test is 1600 (FUZZ_MAX_HEADERS_RESULTS * 100) headers. In that case,
+ // this variable will accurately reflect the chain's total work.
+ total_work += CalculateClaimedHeadersWork(all_headers);
+
+ // This test should never create a chain with more work than MinimumChainWork.
+ assert(total_work < chainman.MinimumChainWork());
+
+ // The headers/blocks sent in this test should never be stored, as the chains don't have the work required
+ // to meet the anti-DoS work threshold. So, if at any point the block index grew in size, then there's a bug
+ // in the headers pre-sync logic.
+ assert(WITH_LOCK(cs_main, return chainman.m_blockman.m_block_index.size()) == original_index_size);
+
+ g_testing_setup->m_node.validation_signals->SyncWithValidationInterfaceQueue();
+}
diff --git a/src/test/fuzz/policy_estimator.cpp b/src/test/fuzz/policy_estimator.cpp
index a4e1947b9f..2942740395 100644
--- a/src/test/fuzz/policy_estimator.cpp
+++ b/src/test/fuzz/policy_estimator.cpp
@@ -85,9 +85,18 @@ FUZZ_TARGET(policy_estimator, .init = initialize_policy_estimator)
});
(void)block_policy_estimator.estimateFee(fuzzed_data_provider.ConsumeIntegral<int>());
EstimationResult result;
- (void)block_policy_estimator.estimateRawFee(fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeFloatingPoint<double>(), fuzzed_data_provider.PickValueInArray(ALL_FEE_ESTIMATE_HORIZONS), fuzzed_data_provider.ConsumeBool() ? &result : nullptr);
+ auto conf_target = fuzzed_data_provider.ConsumeIntegral<int>();
+ auto success_threshold = fuzzed_data_provider.ConsumeFloatingPoint<double>();
+ auto horizon = fuzzed_data_provider.PickValueInArray(ALL_FEE_ESTIMATE_HORIZONS);
+ auto* result_ptr = fuzzed_data_provider.ConsumeBool() ? &result : nullptr;
+ (void)block_policy_estimator.estimateRawFee(conf_target, success_threshold, horizon, result_ptr);
+
FeeCalculation fee_calculation;
- (void)block_policy_estimator.estimateSmartFee(fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeBool() ? &fee_calculation : nullptr, fuzzed_data_provider.ConsumeBool());
+ conf_target = fuzzed_data_provider.ConsumeIntegral<int>();
+ auto* fee_calc_ptr = fuzzed_data_provider.ConsumeBool() ? &fee_calculation : nullptr;
+ auto conservative = fuzzed_data_provider.ConsumeBool();
+ (void)block_policy_estimator.estimateSmartFee(conf_target, fee_calc_ptr, conservative);
+
(void)block_policy_estimator.HighestTargetTracked(fuzzed_data_provider.PickValueInArray(ALL_FEE_ESTIMATE_HORIZONS));
}
{
diff --git a/src/test/fuzz/pow.cpp b/src/test/fuzz/pow.cpp
index 05cdb740e4..dba999ce4f 100644
--- a/src/test/fuzz/pow.cpp
+++ b/src/test/fuzz/pow.cpp
@@ -80,7 +80,7 @@ FUZZ_TARGET(pow, .init = initialize_pow)
{
const std::optional<uint256> hash = ConsumeDeserializable<uint256>(fuzzed_data_provider);
if (hash) {
- (void)CheckProofOfWork(*hash, fuzzed_data_provider.ConsumeIntegral<unsigned int>(), consensus_params);
+ (void)CheckProofOfWorkImpl(*hash, fuzzed_data_provider.ConsumeIntegral<unsigned int>(), consensus_params);
}
}
}
diff --git a/src/test/fuzz/prevector.cpp b/src/test/fuzz/prevector.cpp
index aeceb38a58..fffa099391 100644
--- a/src/test/fuzz/prevector.cpp
+++ b/src/test/fuzz/prevector.cpp
@@ -210,15 +210,20 @@ FUZZ_TARGET(prevector)
LIMITED_WHILE(prov.remaining_bytes(), 3000)
{
switch (prov.ConsumeIntegralInRange<int>(0, 13 + 3 * (test.size() > 0))) {
- case 0:
- test.insert(prov.ConsumeIntegralInRange<size_t>(0, test.size()), prov.ConsumeIntegral<int>());
- break;
+ case 0: {
+ auto position = prov.ConsumeIntegralInRange<size_t>(0, test.size());
+ auto value = prov.ConsumeIntegral<int>();
+ test.insert(position, value);
+ } break;
case 1:
test.resize(std::max(0, std::min(30, (int)test.size() + prov.ConsumeIntegralInRange<int>(0, 4) - 2)));
break;
- case 2:
- test.insert(prov.ConsumeIntegralInRange<size_t>(0, test.size()), 1 + prov.ConsumeBool(), prov.ConsumeIntegral<int>());
- break;
+ case 2: {
+ auto position = prov.ConsumeIntegralInRange<size_t>(0, test.size());
+ auto count = 1 + prov.ConsumeBool();
+ auto value = prov.ConsumeIntegral<int>();
+ test.insert(position, count, value);
+ } break;
case 3: {
int del = prov.ConsumeIntegralInRange<int>(0, test.size());
int beg = prov.ConsumeIntegralInRange<int>(0, test.size() - del);
@@ -255,9 +260,11 @@ FUZZ_TARGET(prevector)
case 9:
test.clear();
break;
- case 10:
- test.assign(prov.ConsumeIntegralInRange<size_t>(0, 32767), prov.ConsumeIntegral<int>());
- break;
+ case 10: {
+ auto n = prov.ConsumeIntegralInRange<size_t>(0, 32767);
+ auto value = prov.ConsumeIntegral<int>();
+ test.assign(n, value);
+ } break;
case 11:
test.swap();
break;
@@ -267,9 +274,11 @@ FUZZ_TARGET(prevector)
case 13:
test.move();
break;
- case 14:
- test.update(prov.ConsumeIntegralInRange<size_t>(0, test.size() - 1), prov.ConsumeIntegral<int>());
- break;
+ case 14: {
+ auto pos = prov.ConsumeIntegralInRange<size_t>(0, test.size() - 1);
+ auto value = prov.ConsumeIntegral<int>();
+ test.update(pos, value);
+ } break;
case 15:
test.erase(prov.ConsumeIntegralInRange<size_t>(0, test.size() - 1));
break;
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index 9122617e46..4db37ab7b7 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -143,6 +143,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"getnetworkhashps",
"getnetworkinfo",
"getnodeaddresses",
+ "getorphantxs",
"getpeerinfo",
"getprioritisedtransactions",
"getrawaddrman",
diff --git a/src/test/fuzz/script_format.cpp b/src/test/fuzz/script_format.cpp
index 10150dcd7f..e26c42ae38 100644
--- a/src/test/fuzz/script_format.cpp
+++ b/src/test/fuzz/script_format.cpp
@@ -30,5 +30,7 @@ FUZZ_TARGET(script_format, .init = initialize_script_format)
(void)ScriptToAsmStr(script, /*fAttemptSighashDecode=*/fuzzed_data_provider.ConsumeBool());
UniValue o1(UniValue::VOBJ);
- ScriptToUniv(script, /*out=*/o1, /*include_hex=*/fuzzed_data_provider.ConsumeBool(), /*include_address=*/fuzzed_data_provider.ConsumeBool());
+ auto include_hex = fuzzed_data_provider.ConsumeBool();
+ auto include_address = fuzzed_data_provider.ConsumeBool();
+ ScriptToUniv(script, /*out=*/o1, include_hex, include_address);
}
diff --git a/src/test/fuzz/script_interpreter.cpp b/src/test/fuzz/script_interpreter.cpp
index 5e76443abe..9e3ad02b2e 100644
--- a/src/test/fuzz/script_interpreter.cpp
+++ b/src/test/fuzz/script_interpreter.cpp
@@ -25,12 +25,18 @@ FUZZ_TARGET(script_interpreter)
const CTransaction tx_to{*mtx};
const unsigned int in = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
if (in < tx_to.vin.size()) {
- (void)SignatureHash(script_code, tx_to, in, fuzzed_data_provider.ConsumeIntegral<int>(), ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0}), nullptr);
+ auto n_hash_type = fuzzed_data_provider.ConsumeIntegral<int>();
+ auto amount = ConsumeMoney(fuzzed_data_provider);
+ auto sigversion = fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0});
+ (void)SignatureHash(script_code, tx_to, in, n_hash_type, amount, sigversion, nullptr);
const std::optional<CMutableTransaction> mtx_precomputed = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider, TX_WITH_WITNESS);
if (mtx_precomputed) {
const CTransaction tx_precomputed{*mtx_precomputed};
const PrecomputedTransactionData precomputed_transaction_data{tx_precomputed};
- (void)SignatureHash(script_code, tx_to, in, fuzzed_data_provider.ConsumeIntegral<int>(), ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0}), &precomputed_transaction_data);
+ n_hash_type = fuzzed_data_provider.ConsumeIntegral<int>();
+ amount = ConsumeMoney(fuzzed_data_provider);
+ sigversion = fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0});
+ (void)SignatureHash(script_code, tx_to, in, n_hash_type, amount, sigversion, &precomputed_transaction_data);
}
}
}
diff --git a/src/test/fuzz/script_sign.cpp b/src/test/fuzz/script_sign.cpp
index 4695bc611b..9fa5e0b7d8 100644
--- a/src/test/fuzz/script_sign.cpp
+++ b/src/test/fuzz/script_sign.cpp
@@ -13,6 +13,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <test/util/transaction_utils.h>
#include <util/chaintype.h>
#include <util/translation.h>
@@ -111,7 +112,10 @@ FUZZ_TARGET(script_sign, .init = initialize_script_sign)
}
if (n_in < script_tx_to.vin.size()) {
SignatureData empty;
- (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>(), empty);
+ auto from_pub_key = ConsumeScript(fuzzed_data_provider);
+ auto amount = ConsumeMoney(fuzzed_data_provider);
+ auto n_hash_type = fuzzed_data_provider.ConsumeIntegral<int>();
+ (void)SignSignature(provider, from_pub_key, script_tx_to, n_in, amount, n_hash_type, empty);
MutableTransactionSignatureCreator signature_creator{tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>()};
std::vector<unsigned char> vch_sig;
CKeyID address;
@@ -122,7 +126,9 @@ FUZZ_TARGET(script_sign, .init = initialize_script_sign)
} else {
address = CKeyID{ConsumeUInt160(fuzzed_data_provider)};
}
- (void)signature_creator.CreateSig(provider, vch_sig, address, ConsumeScript(fuzzed_data_provider), fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0}));
+ auto script_code = ConsumeScript(fuzzed_data_provider);
+ auto sigversion = fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0});
+ (void)signature_creator.CreateSig(provider, vch_sig, address, script_code, sigversion);
}
std::map<COutPoint, Coin> coins{ConsumeCoins(fuzzed_data_provider)};
std::map<int, bilingual_str> input_errors;
diff --git a/src/test/fuzz/socks5.cpp b/src/test/fuzz/socks5.cpp
index af81fcb593..17d1787586 100644
--- a/src/test/fuzz/socks5.cpp
+++ b/src/test/fuzz/socks5.cpp
@@ -41,8 +41,8 @@ FUZZ_TARGET(socks5, .init = initialize_socks5)
FuzzedSock fuzzed_sock = ConsumeSock(fuzzed_data_provider);
// This Socks5(...) fuzzing harness would have caught CVE-2017-18350 within
// a few seconds of fuzzing.
- (void)Socks5(fuzzed_data_provider.ConsumeRandomLengthString(512),
- fuzzed_data_provider.ConsumeIntegral<uint16_t>(),
- fuzzed_data_provider.ConsumeBool() ? &proxy_credentials : nullptr,
- fuzzed_sock);
+ auto str_dest = fuzzed_data_provider.ConsumeRandomLengthString(512);
+ auto port = fuzzed_data_provider.ConsumeIntegral<uint16_t>();
+ auto* auth = fuzzed_data_provider.ConsumeBool() ? &proxy_credentials : nullptr;
+ (void)Socks5(str_dest, port, auth, fuzzed_sock);
}
diff --git a/src/test/fuzz/system.cpp b/src/test/fuzz/system.cpp
index 73ae89b52a..2ab5b7ed39 100644
--- a/src/test/fuzz/system.cpp
+++ b/src/test/fuzz/system.cpp
@@ -44,23 +44,31 @@ FUZZ_TARGET(system, .init = initialize_system)
args_manager.SelectConfigNetwork(fuzzed_data_provider.ConsumeRandomLengthString(16));
},
[&] {
- args_manager.SoftSetArg(fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeRandomLengthString(16));
+ auto str_arg = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ auto str_value = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ args_manager.SoftSetArg(str_arg, str_value);
},
[&] {
- args_manager.ForceSetArg(fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeRandomLengthString(16));
+ auto str_arg = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ auto str_value = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ args_manager.ForceSetArg(str_arg, str_value);
},
[&] {
- args_manager.SoftSetBoolArg(fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeBool());
+ auto str_arg = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ auto f_value = fuzzed_data_provider.ConsumeBool();
+ args_manager.SoftSetBoolArg(str_arg, f_value);
},
[&] {
- const OptionsCategory options_category = fuzzed_data_provider.PickValueInArray<OptionsCategory>({OptionsCategory::OPTIONS, OptionsCategory::CONNECTION, OptionsCategory::WALLET, OptionsCategory::WALLET_DEBUG_TEST, OptionsCategory::ZMQ, OptionsCategory::DEBUG_TEST, OptionsCategory::CHAINPARAMS, OptionsCategory::NODE_RELAY, OptionsCategory::BLOCK_CREATION, OptionsCategory::RPC, OptionsCategory::GUI, OptionsCategory::COMMANDS, OptionsCategory::REGISTER_COMMANDS, OptionsCategory::HIDDEN});
+ const OptionsCategory options_category = fuzzed_data_provider.PickValueInArray<OptionsCategory>({OptionsCategory::OPTIONS, OptionsCategory::CONNECTION, OptionsCategory::WALLET, OptionsCategory::WALLET_DEBUG_TEST, OptionsCategory::ZMQ, OptionsCategory::DEBUG_TEST, OptionsCategory::CHAINPARAMS, OptionsCategory::NODE_RELAY, OptionsCategory::BLOCK_CREATION, OptionsCategory::RPC, OptionsCategory::GUI, OptionsCategory::COMMANDS, OptionsCategory::REGISTER_COMMANDS, OptionsCategory::CLI_COMMANDS, OptionsCategory::IPC, OptionsCategory::HIDDEN});
// Avoid hitting:
// common/args.cpp:563: void ArgsManager::AddArg(const std::string &, const std::string &, unsigned int, const OptionsCategory &): Assertion `ret.second' failed.
const std::string argument_name = GetArgumentName(fuzzed_data_provider.ConsumeRandomLengthString(16));
if (args_manager.GetArgFlags(argument_name) != std::nullopt) {
return;
}
- args_manager.AddArg(argument_name, fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeIntegral<unsigned int>() & ~ArgsManager::COMMAND, options_category);
+ auto help = fuzzed_data_provider.ConsumeRandomLengthString(16);
+ auto flags = fuzzed_data_provider.ConsumeIntegral<unsigned int>() & ~ArgsManager::COMMAND;
+ args_manager.AddArg(argument_name, help, flags, options_category);
},
[&] {
// Avoid hitting:
diff --git a/src/test/fuzz/util/net.cpp b/src/test/fuzz/util/net.cpp
index ca0fd65cae..b02c4edbad 100644
--- a/src/test/fuzz/util/net.cpp
+++ b/src/test/fuzz/util/net.cpp
@@ -414,10 +414,10 @@ bool FuzzedSock::IsConnected(std::string& errmsg) const
void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman, CNode& node) noexcept
{
- connman.Handshake(node,
- /*successfully_connected=*/fuzzed_data_provider.ConsumeBool(),
- /*remote_services=*/ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS),
- /*local_services=*/ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS),
- /*version=*/fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max()),
- /*relay_txs=*/fuzzed_data_provider.ConsumeBool());
+ auto successfully_connected = fuzzed_data_provider.ConsumeBool();
+ auto remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
+ auto local_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS);
+ auto version = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(MIN_PEER_PROTO_VERSION, std::numeric_limits<int32_t>::max());
+ auto relay_txs = fuzzed_data_provider.ConsumeBool();
+ connman.Handshake(node, successfully_connected, remote_services, local_services, version, relay_txs);
}
diff --git a/src/test/fuzz/utxo_snapshot.cpp b/src/test/fuzz/utxo_snapshot.cpp
index 21c305e222..1241bba8be 100644
--- a/src/test/fuzz/utxo_snapshot.cpp
+++ b/src/test/fuzz/utxo_snapshot.cpp
@@ -58,7 +58,7 @@ void initialize_chain()
auto& chainman{*setup->m_node.chainman};
for (const auto& block : chain) {
BlockValidationState dummy;
- bool processed{chainman.ProcessNewBlockHeaders({*block}, true, dummy)};
+ bool processed{chainman.ProcessNewBlockHeaders({{block->GetBlockHeader()}}, true, dummy)};
Assert(processed);
const auto* index{WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block->GetHash()))};
Assert(index);
@@ -137,7 +137,7 @@ void utxo_snapshot_fuzz(FuzzBufferType buffer)
if constexpr (!INVALID) {
for (const auto& block : *g_chain) {
BlockValidationState dummy;
- bool processed{chainman.ProcessNewBlockHeaders({*block}, true, dummy)};
+ bool processed{chainman.ProcessNewBlockHeaders({{block->GetBlockHeader()}}, true, dummy)};
Assert(processed);
const auto* index{WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block->GetHash()))};
Assert(index);
diff --git a/src/test/ipc_test.capnp b/src/test/ipc_test.capnp
index 55a3dc2683..46cd08b94a 100644
--- a/src/test/ipc_test.capnp
+++ b/src/test/ipc_test.capnp
@@ -9,10 +9,15 @@ $Cxx.namespace("gen");
using Proxy = import "/mp/proxy.capnp";
$Proxy.include("test/ipc_test.h");
-$Proxy.includeTypes("ipc/capnp/common-types.h");
+$Proxy.includeTypes("test/ipc_test_types.h");
+
+using Mining = import "../ipc/capnp/mining.capnp";
interface FooInterface $Proxy.wrap("FooImplementation") {
add @0 (a :Int32, b :Int32) -> (result :Int32);
passOutPoint @1 (arg :Data) -> (result :Data);
passUniValue @2 (arg :Text) -> (result :Text);
+ passTransaction @3 (arg :Data) -> (result :Data);
+ passVectorChar @4 (arg :Data) -> (result :Data);
+ passBlockState @5 (arg :Mining.BlockValidationState) -> (result :Mining.BlockValidationState);
}
diff --git a/src/test/ipc_test.cpp b/src/test/ipc_test.cpp
index ce4edaceb0..91eba9214f 100644
--- a/src/test/ipc_test.cpp
+++ b/src/test/ipc_test.cpp
@@ -2,19 +2,47 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <interfaces/init.h>
+#include <ipc/capnp/protocol.h>
+#include <ipc/process.h>
+#include <ipc/protocol.h>
#include <logging.h>
#include <mp/proxy-types.h>
#include <test/ipc_test.capnp.h>
#include <test/ipc_test.capnp.proxy.h>
#include <test/ipc_test.h>
+#include <tinyformat.h>
+#include <validation.h>
#include <future>
+#include <thread>
#include <kj/common.h>
#include <kj/memory.h>
#include <kj/test.h>
+#include <stdexcept>
#include <boost/test/unit_test.hpp>
+//! Remote init class.
+class TestInit : public interfaces::Init
+{
+public:
+ std::unique_ptr<interfaces::Echo> makeEcho() override { return interfaces::MakeEcho(); }
+};
+
+//! Generate a temporary path with temp_directory_path and mkstemp
+static std::string TempPath(std::string_view pattern)
+{
+ std::string temp{fs::PathToString(fs::path{fs::temp_directory_path()} / fs::PathFromString(std::string{pattern}))};
+ temp.push_back('\0');
+ int fd{mkstemp(temp.data())};
+ BOOST_CHECK_GE(fd, 0);
+ BOOST_CHECK_EQUAL(close(fd), 0);
+ temp.resize(temp.size() - 1);
+ fs::remove(fs::PathFromString(temp));
+ return temp;
+}
+
//! Unit test that tests execution of IPC calls without actually creating a
//! separate process. This test is primarily intended to verify behavior of type
//! conversion code that converts C++ objects to Cap'n Proto messages and vice
@@ -23,13 +51,13 @@
//! The test creates a thread which creates a FooImplementation object (defined
//! in ipc_test.h) and a two-way pipe accepting IPC requests which call methods
//! on the object through FooInterface (defined in ipc_test.capnp).
-void IpcTest()
+void IpcPipeTest()
{
// Setup: create FooImplemention object and listen for FooInterface requests
std::promise<std::unique_ptr<mp::ProxyClient<gen::FooInterface>>> foo_promise;
std::function<void()> disconnect_client;
std::thread thread([&]() {
- mp::EventLoop loop("IpcTest", [](bool raise, const std::string& log) { LogPrintf("LOG%i: %s\n", raise, log); });
+ mp::EventLoop loop("IpcPipeTest", [](bool raise, const std::string& log) { LogPrintf("LOG%i: %s\n", raise, log); });
auto pipe = loop.m_io_context.provider->newTwoWayPipe();
auto connection_client = std::make_unique<mp::Connection>(loop, kj::mv(pipe.ends[0]));
@@ -61,7 +89,107 @@ void IpcTest()
UniValue uni2{foo->passUniValue(uni1)};
BOOST_CHECK_EQUAL(uni1.write(), uni2.write());
+ CMutableTransaction mtx;
+ mtx.version = 2;
+ mtx.nLockTime = 3;
+ mtx.vin.emplace_back(txout1);
+ mtx.vout.emplace_back(COIN, CScript());
+ CTransactionRef tx1{MakeTransactionRef(mtx)};
+ CTransactionRef tx2{foo->passTransaction(tx1)};
+ BOOST_CHECK(*Assert(tx1) == *Assert(tx2));
+
+ std::vector<char> vec1{'H', 'e', 'l', 'l', 'o'};
+ std::vector<char> vec2{foo->passVectorChar(vec1)};
+ BOOST_CHECK_EQUAL(std::string_view(vec1.begin(), vec1.end()), std::string_view(vec2.begin(), vec2.end()));
+
+ BlockValidationState bs1;
+ bs1.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "reject reason", "debug message");
+ BlockValidationState bs2{foo->passBlockState(bs1)};
+ BOOST_CHECK_EQUAL(bs1.IsValid(), bs2.IsValid());
+ BOOST_CHECK_EQUAL(bs1.IsError(), bs2.IsError());
+ BOOST_CHECK_EQUAL(bs1.IsInvalid(), bs2.IsInvalid());
+ BOOST_CHECK_EQUAL(static_cast<int>(bs1.GetResult()), static_cast<int>(bs2.GetResult()));
+ BOOST_CHECK_EQUAL(bs1.GetRejectReason(), bs2.GetRejectReason());
+ BOOST_CHECK_EQUAL(bs1.GetDebugMessage(), bs2.GetDebugMessage());
+
+ BlockValidationState bs3;
+ BlockValidationState bs4{foo->passBlockState(bs3)};
+ BOOST_CHECK_EQUAL(bs3.IsValid(), bs4.IsValid());
+ BOOST_CHECK_EQUAL(bs3.IsError(), bs4.IsError());
+ BOOST_CHECK_EQUAL(bs3.IsInvalid(), bs4.IsInvalid());
+ BOOST_CHECK_EQUAL(static_cast<int>(bs3.GetResult()), static_cast<int>(bs4.GetResult()));
+ BOOST_CHECK_EQUAL(bs3.GetRejectReason(), bs4.GetRejectReason());
+ BOOST_CHECK_EQUAL(bs3.GetDebugMessage(), bs4.GetDebugMessage());
+
// Test cleanup: disconnect pipe and join thread
disconnect_client();
thread.join();
}
+
+//! Test ipc::Protocol connect() and serve() methods connecting over a socketpair.
+void IpcSocketPairTest()
+{
+ int fds[2];
+ BOOST_CHECK_EQUAL(socketpair(AF_UNIX, SOCK_STREAM, 0, fds), 0);
+ std::unique_ptr<interfaces::Init> init{std::make_unique<TestInit>()};
+ std::unique_ptr<ipc::Protocol> protocol{ipc::capnp::MakeCapnpProtocol()};
+ std::promise<void> promise;
+ std::thread thread([&]() {
+ protocol->serve(fds[0], "test-serve", *init, [&] { promise.set_value(); });
+ });
+ promise.get_future().wait();
+ std::unique_ptr<interfaces::Init> remote_init{protocol->connect(fds[1], "test-connect")};
+ std::unique_ptr<interfaces::Echo> remote_echo{remote_init->makeEcho()};
+ BOOST_CHECK_EQUAL(remote_echo->echo("echo test"), "echo test");
+ remote_echo.reset();
+ remote_init.reset();
+ thread.join();
+}
+
+//! Test ipc::Process bind() and connect() methods connecting over a unix socket.
+void IpcSocketTest(const fs::path& datadir)
+{
+ std::unique_ptr<interfaces::Init> init{std::make_unique<TestInit>()};
+ std::unique_ptr<ipc::Protocol> protocol{ipc::capnp::MakeCapnpProtocol()};
+ std::unique_ptr<ipc::Process> process{ipc::MakeProcess()};
+
+ std::string invalid_bind{"invalid:"};
+ BOOST_CHECK_THROW(process->bind(datadir, "test_bitcoin", invalid_bind), std::invalid_argument);
+ BOOST_CHECK_THROW(process->connect(datadir, "test_bitcoin", invalid_bind), std::invalid_argument);
+
+ auto bind_and_listen{[&](const std::string& bind_address) {
+ std::string address{bind_address};
+ int serve_fd = process->bind(datadir, "test_bitcoin", address);
+ BOOST_CHECK_GE(serve_fd, 0);
+ BOOST_CHECK_EQUAL(address, bind_address);
+ protocol->listen(serve_fd, "test-serve", *init);
+ }};
+
+ auto connect_and_test{[&](const std::string& connect_address) {
+ std::string address{connect_address};
+ int connect_fd{process->connect(datadir, "test_bitcoin", address)};
+ BOOST_CHECK_EQUAL(address, connect_address);
+ std::unique_ptr<interfaces::Init> remote_init{protocol->connect(connect_fd, "test-connect")};
+ std::unique_ptr<interfaces::Echo> remote_echo{remote_init->makeEcho()};
+ BOOST_CHECK_EQUAL(remote_echo->echo("echo test"), "echo test");
+ }};
+
+ // Need to specify explicit socket addresses outside the data directory, because the data
+ // directory path is so long that the default socket address and any other
+ // addresses in the data directory would fail with errors like:
+ // Address 'unix' path '"/tmp/test_common_Bitcoin Core/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/test_bitcoin.sock"' exceeded maximum socket path length
+ std::vector<std::string> addresses{
+ strprintf("unix:%s", TempPath("bitcoin_sock0_XXXXXX")),
+ strprintf("unix:%s", TempPath("bitcoin_sock1_XXXXXX")),
+ };
+
+ // Bind and listen on multiple addresses
+ for (const auto& address : addresses) {
+ bind_and_listen(address);
+ }
+
+ // Connect and test each address multiple times.
+ for (int i : {0, 1, 0, 0, 1}) {
+ connect_and_test(addresses[i]);
+ }
+}
diff --git a/src/test/ipc_test.h b/src/test/ipc_test.h
index bcfcc2125c..2d215a20f1 100644
--- a/src/test/ipc_test.h
+++ b/src/test/ipc_test.h
@@ -7,6 +7,8 @@
#include <primitives/transaction.h>
#include <univalue.h>
+#include <util/fs.h>
+#include <validation.h>
class FooImplementation
{
@@ -14,8 +16,13 @@ public:
int add(int a, int b) { return a + b; }
COutPoint passOutPoint(COutPoint o) { return o; }
UniValue passUniValue(UniValue v) { return v; }
+ CTransactionRef passTransaction(CTransactionRef t) { return t; }
+ std::vector<char> passVectorChar(std::vector<char> v) { return v; }
+ BlockValidationState passBlockState(BlockValidationState s) { return s; }
};
-void IpcTest();
+void IpcPipeTest();
+void IpcSocketPairTest();
+void IpcSocketTest(const fs::path& datadir);
#endif // BITCOIN_TEST_IPC_TEST_H
diff --git a/src/test/ipc_test_types.h b/src/test/ipc_test_types.h
new file mode 100644
index 0000000000..b1d4829aa7
--- /dev/null
+++ b/src/test/ipc_test_types.h
@@ -0,0 +1,12 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_IPC_TEST_TYPES_H
+#define BITCOIN_TEST_IPC_TEST_TYPES_H
+
+#include <ipc/capnp/common-types.h>
+#include <ipc/capnp/mining-types.h>
+#include <test/ipc_test.capnp.h>
+
+#endif // BITCOIN_TEST_IPC_TEST_TYPES_H
diff --git a/src/test/ipc_tests.cpp b/src/test/ipc_tests.cpp
index 6e144b0f41..35a4f61117 100644
--- a/src/test/ipc_tests.cpp
+++ b/src/test/ipc_tests.cpp
@@ -2,12 +2,41 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <ipc/process.h>
#include <test/ipc_test.h>
+
+#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
-BOOST_AUTO_TEST_SUITE(ipc_tests)
+BOOST_FIXTURE_TEST_SUITE(ipc_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(ipc_tests)
{
- IpcTest();
+ IpcPipeTest();
+ IpcSocketPairTest();
+ IpcSocketTest(m_args.GetDataDirNet());
}
+
+// Test address parsing.
+BOOST_AUTO_TEST_CASE(parse_address_test)
+{
+ std::unique_ptr<ipc::Process> process{ipc::MakeProcess()};
+ fs::path datadir{"/var/empty/notexist"};
+ auto check_notexist{[](const std::system_error& e) { return e.code() == std::errc::no_such_file_or_directory; }};
+ auto check_address{[&](std::string address, std::string expect_address, std::string expect_error) {
+ if (expect_error.empty()) {
+ BOOST_CHECK_EXCEPTION(process->connect(datadir, "test_bitcoin", address), std::system_error, check_notexist);
+ } else {
+ BOOST_CHECK_EXCEPTION(process->connect(datadir, "test_bitcoin", address), std::invalid_argument, HasReason(expect_error));
+ }
+ BOOST_CHECK_EQUAL(address, expect_address);
+ }};
+ check_address("unix", "unix:/var/empty/notexist/test_bitcoin.sock", "");
+ check_address("unix:", "unix:/var/empty/notexist/test_bitcoin.sock", "");
+ check_address("unix:path.sock", "unix:/var/empty/notexist/path.sock", "");
+ check_address("unix:0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.sock",
+ "unix:/var/empty/notexist/0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.sock",
+ "Unix address path \"/var/empty/notexist/0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.sock\" exceeded maximum socket path length");
+ check_address("invalid", "invalid", "Unrecognized address 'invalid'");
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp
index fdac760d7f..77ec81e597 100644
--- a/src/test/logging_tests.cpp
+++ b/src/test/logging_tests.cpp
@@ -83,15 +83,15 @@ BOOST_AUTO_TEST_CASE(logging_timer)
BOOST_CHECK_EQUAL(micro_timer.LogMsg("msg").substr(0, result_prefix.size()), result_prefix);
}
-BOOST_FIXTURE_TEST_CASE(logging_LogPrintf_, LogSetup)
+BOOST_FIXTURE_TEST_CASE(logging_LogPrintStr, LogSetup)
{
LogInstance().m_log_sourcelocations = true;
- LogPrintf_("fn1", "src1", 1, BCLog::LogFlags::NET, BCLog::Level::Debug, "foo1: %s\n", "bar1");
- LogPrintf_("fn2", "src2", 2, BCLog::LogFlags::NET, BCLog::Level::Info, "foo2: %s\n", "bar2");
- LogPrintf_("fn3", "src3", 3, BCLog::LogFlags::ALL, BCLog::Level::Debug, "foo3: %s\n", "bar3");
- LogPrintf_("fn4", "src4", 4, BCLog::LogFlags::ALL, BCLog::Level::Info, "foo4: %s\n", "bar4");
- LogPrintf_("fn5", "src5", 5, BCLog::LogFlags::NONE, BCLog::Level::Debug, "foo5: %s\n", "bar5");
- LogPrintf_("fn6", "src6", 6, BCLog::LogFlags::NONE, BCLog::Level::Info, "foo6: %s\n", "bar6");
+ LogInstance().LogPrintStr("foo1: bar1", "fn1", "src1", 1, BCLog::LogFlags::NET, BCLog::Level::Debug);
+ LogInstance().LogPrintStr("foo2: bar2", "fn2", "src2", 2, BCLog::LogFlags::NET, BCLog::Level::Info);
+ LogInstance().LogPrintStr("foo3: bar3", "fn3", "src3", 3, BCLog::LogFlags::ALL, BCLog::Level::Debug);
+ LogInstance().LogPrintStr("foo4: bar4", "fn4", "src4", 4, BCLog::LogFlags::ALL, BCLog::Level::Info);
+ LogInstance().LogPrintStr("foo5: bar5", "fn5", "src5", 5, BCLog::LogFlags::NONE, BCLog::Level::Debug);
+ LogInstance().LogPrintStr("foo6: bar6", "fn6", "src6", 6, BCLog::LogFlags::NONE, BCLog::Level::Info);
std::ifstream file{tmp_log_path};
std::vector<std::string> log_lines;
for (std::string log; std::getline(file, log);) {
@@ -116,7 +116,6 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacrosDeprecated, LogSetup)
LogPrintLevel(BCLog::NET, BCLog::Level::Info, "foo8: %s\n", "bar8");
LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "foo9: %s\n", "bar9");
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "foo10: %s\n", "bar10");
- LogPrintfCategory(BCLog::VALIDATION, "foo11: %s\n", "bar11");
std::ifstream file{tmp_log_path};
std::vector<std::string> log_lines;
for (std::string log; std::getline(file, log);) {
@@ -128,18 +127,17 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacrosDeprecated, LogSetup)
"[net:info] foo8: bar8",
"[net:warning] foo9: bar9",
"[net:error] foo10: bar10",
- "[validation:info] foo11: bar11",
};
BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end());
}
BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup)
{
- LogTrace(BCLog::NET, "foo6: %s\n", "bar6"); // not logged
- LogDebug(BCLog::NET, "foo7: %s\n", "bar7");
- LogInfo("foo8: %s\n", "bar8");
- LogWarning("foo9: %s\n", "bar9");
- LogError("foo10: %s\n", "bar10");
+ LogTrace(BCLog::NET, "foo6: %s", "bar6"); // not logged
+ LogDebug(BCLog::NET, "foo7: %s", "bar7");
+ LogInfo("foo8: %s", "bar8");
+ LogWarning("foo9: %s", "bar9");
+ LogError("foo10: %s", "bar10");
std::ifstream file{tmp_log_path};
std::vector<std::string> log_lines;
for (std::string log; std::getline(file, log);) {
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 70308cb29a..2b1cf8595d 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -23,110 +23,6 @@ static uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vecto
return hash;
}
-/* This implements a constant-space merkle root/path calculator, limited to 2^32 leaves. */
-static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot, bool* pmutated, uint32_t branchpos, std::vector<uint256>* pbranch) {
- if (pbranch) pbranch->clear();
- if (leaves.size() == 0) {
- if (pmutated) *pmutated = false;
- if (proot) *proot = uint256();
- return;
- }
- bool mutated = false;
- // count is the number of leaves processed so far.
- uint32_t count = 0;
- // inner is an array of eagerly computed subtree hashes, indexed by tree
- // level (0 being the leaves).
- // For example, when count is 25 (11001 in binary), inner[4] is the hash of
- // the first 16 leaves, inner[3] of the next 8 leaves, and inner[0] equal to
- // the last leaf. The other inner entries are undefined.
- uint256 inner[32];
- // Which position in inner is a hash that depends on the matching leaf.
- int matchlevel = -1;
- // First process all leaves into 'inner' values.
- while (count < leaves.size()) {
- uint256 h = leaves[count];
- bool matchh = count == branchpos;
- count++;
- int level;
- // For each of the lower bits in count that are 0, do 1 step. Each
- // corresponds to an inner value that existed before processing the
- // current leaf, and each needs a hash to combine it.
- for (level = 0; !(count & ((uint32_t{1}) << level)); level++) {
- if (pbranch) {
- if (matchh) {
- pbranch->push_back(inner[level]);
- } else if (matchlevel == level) {
- pbranch->push_back(h);
- matchh = true;
- }
- }
- mutated |= (inner[level] == h);
- h = Hash(inner[level], h);
- }
- // Store the resulting hash at inner position level.
- inner[level] = h;
- if (matchh) {
- matchlevel = level;
- }
- }
- // Do a final 'sweep' over the rightmost branch of the tree to process
- // odd levels, and reduce everything to a single top value.
- // Level is the level (counted from the bottom) up to which we've sweeped.
- int level = 0;
- // As long as bit number level in count is zero, skip it. It means there
- // is nothing left at this level.
- while (!(count & ((uint32_t{1}) << level))) {
- level++;
- }
- uint256 h = inner[level];
- bool matchh = matchlevel == level;
- while (count != ((uint32_t{1}) << level)) {
- // If we reach this point, h is an inner value that is not the top.
- // We combine it with itself (Bitcoin's special rule for odd levels in
- // the tree) to produce a higher level one.
- if (pbranch && matchh) {
- pbranch->push_back(h);
- }
- h = Hash(h, h);
- // Increment count to the value it would have if two entries at this
- // level had existed.
- count += ((uint32_t{1}) << level);
- level++;
- // And propagate the result upwards accordingly.
- while (!(count & ((uint32_t{1}) << level))) {
- if (pbranch) {
- if (matchh) {
- pbranch->push_back(inner[level]);
- } else if (matchlevel == level) {
- pbranch->push_back(h);
- matchh = true;
- }
- }
- h = Hash(inner[level], h);
- level++;
- }
- }
- // Return result.
- if (pmutated) *pmutated = mutated;
- if (proot) *proot = h;
-}
-
-static std::vector<uint256> ComputeMerkleBranch(const std::vector<uint256>& leaves, uint32_t position) {
- std::vector<uint256> ret;
- MerkleComputation(leaves, nullptr, nullptr, position, &ret);
- return ret;
-}
-
-static std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position)
-{
- std::vector<uint256> leaves;
- leaves.resize(block.vtx.size());
- for (size_t s = 0; s < block.vtx.size(); s++) {
- leaves[s] = block.vtx[s]->GetHash();
- }
- return ComputeMerkleBranch(leaves, position);
-}
-
// Older version of the merkle root computation code, for comparison.
static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::vector<uint256>& vMerkleTree)
{
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 9f35690460..6cf2757b2d 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -608,7 +608,7 @@ void MinerTestingSetup::TestPrioritisedMining(const CScript& scriptPubKey, const
BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
{
// Note that by default, these tests run with size accounting enabled.
- CScript scriptPubKey = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex_v_u8 << OP_CHECKSIG;
+ CScript scriptPubKey = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex << OP_CHECKSIG;
std::unique_ptr<CBlockTemplate> pblocktemplate;
CTxMemPool& tx_mempool{*m_node.mempool};
diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp
index 7a3e8e3a47..29a73d03d2 100644
--- a/src/test/multisig_tests.cpp
+++ b/src/test/multisig_tests.cpp
@@ -10,6 +10,7 @@
#include <script/sign.h>
#include <script/signingprovider.h>
#include <test/util/setup_common.h>
+#include <test/util/transaction_utils.h>
#include <tinyformat.h>
#include <uint256.h>
diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp
index d4c52d7fe1..799f2c0fec 100644
--- a/src/test/orphanage_tests.cpp
+++ b/src/test/orphanage_tests.cpp
@@ -3,12 +3,15 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <arith_uint256.h>
+#include <consensus/validation.h>
+#include <policy/policy.h>
#include <primitives/transaction.h>
#include <pubkey.h>
#include <script/sign.h>
#include <script/signingprovider.h>
#include <test/util/random.h>
#include <test/util/setup_common.h>
+#include <test/util/transaction_utils.h>
#include <txorphanage.h>
#include <array>
@@ -370,4 +373,21 @@ BOOST_AUTO_TEST_CASE(get_children)
}
}
+BOOST_AUTO_TEST_CASE(too_large_orphan_tx)
+{
+ TxOrphanage orphanage;
+ CMutableTransaction tx;
+ tx.vin.resize(1);
+
+ // check that txs larger than MAX_STANDARD_TX_WEIGHT are not added to the orphanage
+ BulkTransaction(tx, MAX_STANDARD_TX_WEIGHT + 4);
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(tx)), MAX_STANDARD_TX_WEIGHT + 4);
+ BOOST_CHECK(!orphanage.AddTx(MakeTransactionRef(tx), 0));
+
+ tx.vout.clear();
+ BulkTransaction(tx, MAX_STANDARD_TX_WEIGHT);
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(tx)), MAX_STANDARD_TX_WEIGHT);
+ BOOST_CHECK(orphanage.AddTx(MakeTransactionRef(tx), 0));
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/raii_event_tests.cpp b/src/test/raii_event_tests.cpp
index ada61029ee..7d1079fbbe 100644
--- a/src/test/raii_event_tests.cpp
+++ b/src/test/raii_event_tests.cpp
@@ -86,14 +86,6 @@ BOOST_AUTO_TEST_CASE(raii_event_order)
event_set_mem_functions(malloc, realloc, free);
}
-#else
-
-BOOST_AUTO_TEST_CASE(raii_event_tests_SKIPPED)
-{
- // It would probably be ideal to report skipped, but boost::test doesn't seem to make that practical (at least not in versions available with common distros)
- BOOST_TEST_MESSAGE("Skipping raii_event_tess: libevent doesn't support event_set_mem_functions");
-}
-
#endif // EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_p2sh_tests.cpp b/src/test/script_p2sh_tests.cpp
index f91203cc48..096de0724f 100644
--- a/src/test/script_p2sh_tests.cpp
+++ b/src/test/script_p2sh_tests.cpp
@@ -11,6 +11,7 @@
#include <script/sign.h>
#include <script/signingprovider.h>
#include <test/util/setup_common.h>
+#include <test/util/transaction_utils.h>
#include <validation.h>
#include <vector>
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 0e2a1631ce..59eb90bd27 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -123,7 +123,6 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
ScriptError err;
const CTransaction txCredit{BuildCreditingTransaction(scriptPubKey, nValue)};
CMutableTransaction tx = BuildSpendingTransaction(scriptSig, scriptWitness, txCredit);
- CMutableTransaction tx2 = tx;
BOOST_CHECK_MESSAGE(VerifyScript(scriptSig, scriptPubKey, &scriptWitness, flags, MutableTransactionSignatureChecker(&tx, 0, txCredit.vout[0].nValue, MissingDataBehavior::ASSERT_FAIL), &err) == expect, message);
BOOST_CHECK_MESSAGE(err == scriptError, FormatScriptError(err) + " where " + FormatScriptError((ScriptError_t)scriptError) + " expected: " + message);
@@ -1368,6 +1367,13 @@ static CScript ScriptFromHex(const std::string& str)
return ToScript(*Assert(TryParseHex(str)));
}
+BOOST_AUTO_TEST_CASE(script_byte_array_u8_vector_equivalence)
+{
+ const CScript scriptPubKey1 = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex_v_u8 << OP_CHECKSIG;
+ const CScript scriptPubKey2 = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex << OP_CHECKSIG;
+ BOOST_CHECK(scriptPubKey1 == scriptPubKey2);
+}
+
BOOST_AUTO_TEST_CASE(script_FindAndDelete)
{
// Exercise the FindAndDelete functionality
@@ -1421,7 +1427,7 @@ BOOST_AUTO_TEST_CASE(script_FindAndDelete)
// prefix, leaving 02ff03 which is push-two-bytes:
s = ToScript("0302ff030302ff03"_hex);
d = ToScript("03"_hex);
- expect = CScript() << "ff03"_hex_v_u8 << "ff03"_hex_v_u8;
+ expect = CScript() << "ff03"_hex << "ff03"_hex;
BOOST_CHECK_EQUAL(FindAndDelete(s, d), 2);
BOOST_CHECK(s == expect);
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 9217f05945..777122df6d 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -261,7 +261,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file)
for (uint8_t j = 0; j < 40; ++j) {
file << j;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
// The buffer size (second arg) must be greater than the rewind
// amount (third arg).
@@ -391,7 +391,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_skip)
for (uint8_t j = 0; j < 40; ++j) {
file << j;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
// The buffer is 25 bytes, allow rewinding 10 bytes.
BufferedFile bf{file, 25, 10};
@@ -444,7 +444,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
for (uint8_t i = 0; i < fileSize; ++i) {
file << i;
}
- std::rewind(file.Get());
+ file.seek(0, SEEK_SET);
size_t bufSize = m_rng.randrange(300) + 1;
size_t rewindSize = m_rng.randrange(bufSize);
diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp
index baa759e42c..a5d9be07d5 100644
--- a/src/test/system_tests.cpp
+++ b/src/test/system_tests.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <test/util/setup_common.h>
#include <common/run_command.h>
#include <univalue.h>
@@ -16,13 +16,6 @@
BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup)
-// At least one test is required (in case ENABLE_EXTERNAL_SIGNER is not defined).
-// Workaround for https://github.com/bitcoin/bitcoin/issues/19128
-BOOST_AUTO_TEST_CASE(dummy)
-{
- BOOST_CHECK(true);
-}
-
#ifdef ENABLE_EXTERNAL_SIGNER
BOOST_AUTO_TEST_CASE(run_command)
@@ -54,8 +47,8 @@ BOOST_AUTO_TEST_CASE(run_command)
}
{
// Return non-zero exit code, with error message for stderr
- const std::string command{"ls nosuchfile"};
- const std::string expected{"No such file or directory"};
+ const std::string command{"sh -c 'echo err 1>&2 && false'"};
+ const std::string expected{"err"};
BOOST_CHECK_EXCEPTION(RunCommandParseJSON(command), std::runtime_error, [&](const std::runtime_error& e) {
const std::string what(e.what());
BOOST_CHECK(what.find(strprintf("RunCommandParseJSON error: process(%s) returned", command)) != std::string::npos);
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 462abd5222..3430a5bbfa 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -852,24 +852,24 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
CheckIsNotStandard(t, "scriptpubkey");
// MAX_OP_RETURN_RELAY-byte TxoutType::NULL_DATA (standard)
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex;
BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY, t.vout[0].scriptPubKey.size());
CheckIsStandard(t);
// MAX_OP_RETURN_RELAY+1-byte TxoutType::NULL_DATA (non-standard)
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3800"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3800"_hex;
BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY + 1, t.vout[0].scriptPubKey.size());
CheckIsNotStandard(t, "scriptpubkey");
// Data payload can be encoded in any way...
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << ""_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << ""_hex;
CheckIsStandard(t);
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << "00"_hex_v_u8 << "01"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << "00"_hex << "01"_hex;
CheckIsStandard(t);
// OP_RESERVED *is* considered to be a PUSHDATA type opcode by IsPushOnly()!
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RESERVED << -1 << 0 << "01"_hex_v_u8 << 2 << 3 << 4 << 5 << 6 << 7 << 8 << 9 << 10 << 11 << 12 << 13 << 14 << 15 << 16;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RESERVED << -1 << 0 << "01"_hex << 2 << 3 << 4 << 5 << 6 << 7 << 8 << 9 << 10 << 11 << 12 << 13 << 14 << 15 << 16;
CheckIsStandard(t);
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << 0 << "01"_hex_v_u8 << 2 << "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << 0 << "01"_hex << 2 << "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"_hex;
CheckIsStandard(t);
// ...so long as it only contains PUSHDATA's
@@ -883,13 +883,13 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
// Only one TxoutType::NULL_DATA permitted in all cases
t.vout.resize(2);
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex;
t.vout[0].nValue = 0;
- t.vout[1].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex_v_u8;
+ t.vout[1].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex;
t.vout[1].nValue = 0;
CheckIsNotStandard(t, "multi-op-return");
- t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex_v_u8;
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38"_hex;
t.vout[1].scriptPubKey = CScript() << OP_RETURN;
CheckIsNotStandard(t, "multi-op-return");
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 5a32b02ad9..9ee5387830 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -33,7 +33,7 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
BOOST_REQUIRE(txindex.StartBackgroundSync());
// Allow tx index to catch up with the block index.
- IndexWaitSynced(txindex, *Assert(m_node.shutdown));
+ IndexWaitSynced(txindex, *Assert(m_node.shutdown_signal));
// Check that txindex excludes genesis block transactions.
const CBlock& genesis_block = Params().GenesisBlock();
diff --git a/src/test/uint256_tests.cpp b/src/test/uint256_tests.cpp
index 8b76e0865a..142d7a6fde 100644
--- a/src/test/uint256_tests.cpp
+++ b/src/test/uint256_tests.cpp
@@ -2,7 +2,6 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <arith_uint256.h>
#include <streams.h>
#include <test/util/setup_common.h>
#include <uint256.h>
@@ -61,14 +60,6 @@ static std::string ArrayToString(const unsigned char A[], unsigned int width)
return Stream.str();
}
-// Takes hex string in reverse byte order.
-inline uint160 uint160S(std::string_view str)
-{
- uint160 rv;
- rv.SetHexDeprecated(str);
- return rv;
-}
-
BOOST_AUTO_TEST_CASE( basics ) // constructors, equality, inequality
{
// constructor uint256(vector<char>):
@@ -92,33 +83,22 @@ BOOST_AUTO_TEST_CASE( basics ) // constructors, equality, inequality
BOOST_CHECK_NE(MaxL, ZeroL); BOOST_CHECK_NE(MaxS, ZeroS);
// String Constructor and Copy Constructor
- BOOST_CHECK_EQUAL(uint256S("0x"+R1L.ToString()), R1L);
- BOOST_CHECK_EQUAL(uint256S("0x"+R2L.ToString()), R2L);
- BOOST_CHECK_EQUAL(uint256S("0x"+ZeroL.ToString()), ZeroL);
- BOOST_CHECK_EQUAL(uint256S("0x"+OneL.ToString()), OneL);
- BOOST_CHECK_EQUAL(uint256S("0x"+MaxL.ToString()), MaxL);
- BOOST_CHECK_EQUAL(uint256S(R1L.ToString()), R1L);
- BOOST_CHECK_EQUAL(uint256S(" 0x"+R1L.ToString()+" "), R1L);
- BOOST_CHECK_EQUAL(uint256S(" 0x"+R1L.ToString()+"-trash;%^& "), R1L);
- BOOST_CHECK_EQUAL(uint256S("\t \n \n \f\n\r\t\v\t 0x"+R1L.ToString()+" \t \n \n \f\n\r\t\v\t "), R1L);
- BOOST_CHECK_EQUAL(uint256S(""), ZeroL);
- BOOST_CHECK_EQUAL(uint256S("1"), OneL);
- BOOST_CHECK_EQUAL(R1L, uint256S(R1ArrayHex));
+ BOOST_CHECK_EQUAL(uint256::FromHex(R1L.ToString()).value(), R1L);
+ BOOST_CHECK_EQUAL(uint256::FromHex(R2L.ToString()).value(), R2L);
+ BOOST_CHECK_EQUAL(uint256::FromHex(ZeroL.ToString()).value(), ZeroL);
+ BOOST_CHECK_EQUAL(uint256::FromHex(OneL.ToString()).value(), OneL);
+ BOOST_CHECK_EQUAL(uint256::FromHex(MaxL.ToString()).value(), MaxL);
+ BOOST_CHECK_EQUAL(uint256::FromHex(R1ArrayHex).value(), R1L);
BOOST_CHECK_EQUAL(uint256(R1L), R1L);
BOOST_CHECK_EQUAL(uint256(ZeroL), ZeroL);
BOOST_CHECK_EQUAL(uint256(OneL), OneL);
- BOOST_CHECK_EQUAL(uint160S("0x"+R1S.ToString()), R1S);
- BOOST_CHECK_EQUAL(uint160S("0x"+R2S.ToString()), R2S);
- BOOST_CHECK_EQUAL(uint160S("0x"+ZeroS.ToString()), ZeroS);
- BOOST_CHECK_EQUAL(uint160S("0x"+OneS.ToString()), OneS);
- BOOST_CHECK_EQUAL(uint160S("0x"+MaxS.ToString()), MaxS);
- BOOST_CHECK_EQUAL(uint160S(R1S.ToString()), R1S);
- BOOST_CHECK_EQUAL(uint160S(" 0x"+R1S.ToString()+" "), R1S);
- BOOST_CHECK_EQUAL(uint160S(" 0x"+R1S.ToString()+"-trash;%^& "), R1S);
- BOOST_CHECK_EQUAL(uint160S(" \t \n \n \f\n\r\t\v\t 0x"+R1S.ToString()+" \t \n \n \f\n\r\t\v\t"), R1S);
- BOOST_CHECK_EQUAL(uint160S(""), ZeroS);
- BOOST_CHECK_EQUAL(R1S, uint160S(R1ArrayHex));
+ BOOST_CHECK_EQUAL(uint160::FromHex(R1S.ToString()).value(), R1S);
+ BOOST_CHECK_EQUAL(uint160::FromHex(R2S.ToString()).value(), R2S);
+ BOOST_CHECK_EQUAL(uint160::FromHex(ZeroS.ToString()).value(), ZeroS);
+ BOOST_CHECK_EQUAL(uint160::FromHex(OneS.ToString()).value(), OneS);
+ BOOST_CHECK_EQUAL(uint160::FromHex(MaxS.ToString()).value(), MaxS);
+ BOOST_CHECK_EQUAL(uint160::FromHex(std::string_view{R1ArrayHex + 24, 40}).value(), R1S);
BOOST_CHECK_EQUAL(uint160(R1S), R1S);
BOOST_CHECK_EQUAL(uint160(ZeroS), ZeroS);
@@ -264,82 +244,6 @@ BOOST_AUTO_TEST_CASE(methods) // GetHex SetHexDeprecated FromHex begin() end() s
ss.clear();
}
-BOOST_AUTO_TEST_CASE( conversion )
-{
- BOOST_CHECK_EQUAL(ArithToUint256(UintToArith256(ZeroL)), ZeroL);
- BOOST_CHECK_EQUAL(ArithToUint256(UintToArith256(OneL)), OneL);
- BOOST_CHECK_EQUAL(ArithToUint256(UintToArith256(R1L)), R1L);
- BOOST_CHECK_EQUAL(ArithToUint256(UintToArith256(R2L)), R2L);
- BOOST_CHECK_EQUAL(UintToArith256(ZeroL), 0);
- BOOST_CHECK_EQUAL(UintToArith256(OneL), 1);
- BOOST_CHECK_EQUAL(ArithToUint256(0), ZeroL);
- BOOST_CHECK_EQUAL(ArithToUint256(1), OneL);
- BOOST_CHECK_EQUAL(arith_uint256(UintToArith256(uint256S(R1L.GetHex()))), UintToArith256(R1L));
- BOOST_CHECK_EQUAL(arith_uint256(UintToArith256(uint256S(R2L.GetHex()))), UintToArith256(R2L));
- BOOST_CHECK_EQUAL(R1L.GetHex(), UintToArith256(R1L).GetHex());
- BOOST_CHECK_EQUAL(R2L.GetHex(), UintToArith256(R2L).GetHex());
-}
-
-BOOST_AUTO_TEST_CASE( operator_with_self )
-{
-
-/* Clang 16 and earlier detects v -= v and v /= v as self-assignments
- to 0 and 1 respectively.
- See: https://github.com/llvm/llvm-project/issues/42469
- and the fix in commit c5302325b2a62d77cf13dd16cd5c19141862fed0 .
-
- This makes some sense for arithmetic classes, but could be considered a bug
- elsewhere. Disable the warning here so that the code can be tested, but the
- warning should remain on as there will likely always be a better way to
- express this.
-*/
-
-#if defined(__clang__)
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wself-assign-overloaded"
-#endif
- arith_uint256 v = UintToArith256(uint256S("02"));
- v *= v;
- BOOST_CHECK_EQUAL(v, UintToArith256(uint256S("04")));
- v /= v;
- BOOST_CHECK_EQUAL(v, UintToArith256(uint256S("01")));
- v += v;
- BOOST_CHECK_EQUAL(v, UintToArith256(uint256S("02")));
- v -= v;
- BOOST_CHECK_EQUAL(v, UintToArith256(uint256S("0")));
-#if defined(__clang__)
-# pragma clang diagnostic pop
-#endif
-}
-
-BOOST_AUTO_TEST_CASE(parse)
-{
- {
- std::string s_12{"0000000000000000000000000000000000000000000000000000000000000012"};
- BOOST_CHECK_EQUAL(uint256S("12\0").GetHex(), s_12);
- BOOST_CHECK_EQUAL(uint256S(std::string_view{"12\0", 3}).GetHex(), s_12);
- BOOST_CHECK_EQUAL(uint256S("0x12").GetHex(), s_12);
- BOOST_CHECK_EQUAL(uint256S(" 0x12").GetHex(), s_12);
- BOOST_CHECK_EQUAL(uint256S(" 12").GetHex(), s_12);
- }
- {
- std::string s_1{uint256::ONE.GetHex()};
- BOOST_CHECK_EQUAL(uint256S("1\0").GetHex(), s_1);
- BOOST_CHECK_EQUAL(uint256S(std::string_view{"1\0", 2}).GetHex(), s_1);
- BOOST_CHECK_EQUAL(uint256S("0x1").GetHex(), s_1);
- BOOST_CHECK_EQUAL(uint256S(" 0x1").GetHex(), s_1);
- BOOST_CHECK_EQUAL(uint256S(" 1").GetHex(), s_1);
- }
- {
- std::string s_0{uint256::ZERO.GetHex()};
- BOOST_CHECK_EQUAL(uint256S("\0").GetHex(), s_0);
- BOOST_CHECK_EQUAL(uint256S(std::string_view{"\0", 1}).GetHex(), s_0);
- BOOST_CHECK_EQUAL(uint256S("0x").GetHex(), s_0);
- BOOST_CHECK_EQUAL(uint256S(" 0x").GetHex(), s_0);
- BOOST_CHECK_EQUAL(uint256S(" ").GetHex(), s_0);
- }
-}
-
/**
* Implemented as a templated function so it can be reused by other classes that have a FromHex()
* method that wraps base_blob::FromHex(), such as transaction_identifier::FromHex().
@@ -395,15 +299,15 @@ BOOST_AUTO_TEST_CASE(from_hex)
BOOST_AUTO_TEST_CASE(from_user_hex)
{
- BOOST_CHECK_EQUAL(uint256::FromUserHex("").value(), uint256::ZERO);
- BOOST_CHECK_EQUAL(uint256::FromUserHex("0x").value(), uint256::ZERO);
- BOOST_CHECK_EQUAL(uint256::FromUserHex("0").value(), uint256::ZERO);
- BOOST_CHECK_EQUAL(uint256::FromUserHex("00").value(), uint256::ZERO);
- BOOST_CHECK_EQUAL(uint256::FromUserHex("1").value(), uint256::ONE);
- BOOST_CHECK_EQUAL(uint256::FromUserHex("0x10").value(), uint256{0x10});
- BOOST_CHECK_EQUAL(uint256::FromUserHex("10").value(), uint256{0x10});
- BOOST_CHECK_EQUAL(uint256::FromUserHex("0xFf").value(), uint256{0xff});
- BOOST_CHECK_EQUAL(uint256::FromUserHex("Ff").value(), uint256{0xff});
+ BOOST_CHECK_EQUAL(uint256::FromUserHex(""), uint256::ZERO);
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("0x"), uint256::ZERO);
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("0"), uint256::ZERO);
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("00"), uint256::ZERO);
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("1"), uint256::ONE);
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("0x10"), uint256{0x10});
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("10"), uint256{0x10});
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("0xFf"), uint256{0xff});
+ BOOST_CHECK_EQUAL(uint256::FromUserHex("Ff"), uint256{0xff});
const std::string valid_hex_64{"0x0123456789abcdef0123456789abcdef0123456789ABDCEF0123456789ABCDEF"};
BOOST_REQUIRE_EQUAL(valid_hex_64.size(), 2 + 64); // 0x prefix and 64 hex digits
BOOST_CHECK_EQUAL(uint256::FromUserHex(valid_hex_64.substr(2)).value().ToString(), ToLower(valid_hex_64.substr(2)));
@@ -430,7 +334,7 @@ BOOST_AUTO_TEST_CASE( check_ONE )
BOOST_AUTO_TEST_CASE(FromHex_vs_uint256)
{
- auto runtime_uint{uint256::FromHex("4A5E1E4BAAB89F3A32518A88C31BC87F618f76673e2cc77ab2127b7afdeda33b").value()};
+ auto runtime_uint{uint256::FromHex("4A5E1E4BAAB89F3A32518A88C31BC87F618f76673e2cc77ab2127b7afdeda33b")};
constexpr uint256 consteval_uint{ "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"};
BOOST_CHECK_EQUAL(consteval_uint, runtime_uint);
}
diff --git a/src/test/util/cluster_linearize.h b/src/test/util/cluster_linearize.h
index 9477d2ed41..871aa9d74e 100644
--- a/src/test/util/cluster_linearize.h
+++ b/src/test/util/cluster_linearize.h
@@ -27,7 +27,7 @@ using TestBitSet = BitSet<32>;
template<typename SetType>
bool IsAcyclic(const DepGraph<SetType>& depgraph) noexcept
{
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ for (ClusterIndex i : depgraph.Positions()) {
if ((depgraph.Ancestors(i) & depgraph.Descendants(i)) != SetType::Singleton(i)) {
return false;
}
@@ -57,11 +57,14 @@ bool IsAcyclic(const DepGraph<SetType>& depgraph) noexcept
* by parent relations that were serialized before it).
* - The various insertion positions in the cluster, from the very end of the cluster, to the
* front.
+ * - The appending of 1, 2, 3, ... holes at the end of the cluster, followed by appending the new
+ * transaction.
*
- * Let's say you have a 7-transaction cluster, consisting of transactions F,A,C,B,G,E,D, but
- * serialized in order A,B,C,D,E,F,G, because that happens to be a topological ordering. By the
- * time G gets serialized, what has been serialized already represents the cluster F,A,C,B,E,D (in
- * that order). G has B and E as direct parents, and E depends on C.
+ * Let's say you have a 7-transaction cluster, consisting of transactions F,A,C,B,_,G,E,_,D
+ * (where _ represent holes; unused positions within the DepGraph) but serialized in order
+ * A,B,C,D,E,F,G, because that happens to be a topological ordering. By the time G gets serialized,
+ * what has been serialized already represents the cluster F,A,C,B,_,E,_,D (in that order). G has B
+ * and E as direct parents, and E depends on C.
*
* In this case, the possibilities are, in order:
* - [ ] the dependency G->F
@@ -71,17 +74,23 @@ bool IsAcyclic(const DepGraph<SetType>& depgraph) noexcept
* - [ ] the dependency G->A
* - [ ] put G at the end of the cluster
* - [ ] put G before D
+ * - [ ] put G before the hole before D
* - [X] put G before E
+ * - [ ] put G before the hole before E
* - [ ] put G before B
* - [ ] put G before C
* - [ ] put G before A
* - [ ] put G before F
+ * - [ ] add 1 hole at the end of the cluster, followed by G
+ * - [ ] add 2 holes at the end of the cluster, followed by G
+ * - [ ] add ...
*
- * The skip values in this case are 1 (G->F), 1 (G->D), 3 (G->A, G at end, G before D). No skip
- * after 3 is needed (or permitted), because there can only be one position for G. Also note that
- * G->C is not included in the list of possibilities, as it is implied by the included G->E and
- * E->C that came before it. On deserialization, if the last skip value was 8 or larger (putting
- * G before the beginning of the cluster), it is interpreted as wrapping around back to the end.
+ * The skip values in this case are 1 (G->F), 1 (G->D), 4 (G->A, G at end, G before D, G before
+ * hole). No skip after 4 is needed (or permitted), because there can only be one position for G.
+ * Also note that G->C is not included in the list of possibilities, as it is implied by the
+ * included G->E and E->C that came before it. On deserialization, if the last skip value was 8 or
+ * larger (putting G before the beginning of the cluster), it is interpreted as wrapping around
+ * back to the end.
*
*
* Rationale:
@@ -102,7 +111,7 @@ bool IsAcyclic(const DepGraph<SetType>& depgraph) noexcept
struct DepGraphFormatter
{
/** Convert x>=0 to 2x (even), x<0 to -2x-1 (odd). */
- static uint64_t SignedToUnsigned(int64_t x) noexcept
+ [[maybe_unused]] static uint64_t SignedToUnsigned(int64_t x) noexcept
{
if (x < 0) {
return 2 * uint64_t(-(x + 1)) + 1;
@@ -112,7 +121,7 @@ struct DepGraphFormatter
}
/** Convert even x to x/2 (>=0), odd x to -(x/2)-1 (<0). */
- static int64_t UnsignedToSigned(uint64_t x) noexcept
+ [[maybe_unused]] static int64_t UnsignedToSigned(uint64_t x) noexcept
{
if (x & 1) {
return -int64_t(x / 2) - 1;
@@ -125,18 +134,18 @@ struct DepGraphFormatter
static void Ser(Stream& s, const DepGraph<SetType>& depgraph)
{
/** Construct a topological order to serialize the transactions in. */
- std::vector<ClusterIndex> topo_order(depgraph.TxCount());
- std::iota(topo_order.begin(), topo_order.end(), ClusterIndex{0});
+ std::vector<ClusterIndex> topo_order;
+ topo_order.reserve(depgraph.TxCount());
+ for (auto i : depgraph.Positions()) topo_order.push_back(i);
std::sort(topo_order.begin(), topo_order.end(), [&](ClusterIndex a, ClusterIndex b) {
auto anc_a = depgraph.Ancestors(a).Count(), anc_b = depgraph.Ancestors(b).Count();
if (anc_a != anc_b) return anc_a < anc_b;
return a < b;
});
- /** Which transactions the deserializer already knows when it has deserialized what has
- * been serialized here so far, and in what order. */
- std::vector<ClusterIndex> rebuilt_order;
- rebuilt_order.reserve(depgraph.TxCount());
+ /** Which positions (incl. holes) the deserializer already knows when it has deserialized
+ * what has been serialized here so far. */
+ SetType done;
// Loop over the transactions in topological order.
for (ClusterIndex topo_idx = 0; topo_idx < topo_order.size(); ++topo_idx) {
@@ -166,14 +175,20 @@ struct DepGraphFormatter
}
}
// Write position information.
- ClusterIndex insert_distance = 0;
- while (insert_distance < rebuilt_order.size()) {
- // Loop to find how far from the end in rebuilt_order to insert.
- if (idx > *(rebuilt_order.end() - 1 - insert_distance)) break;
- ++insert_distance;
+ auto add_holes = SetType::Fill(idx) - done - depgraph.Positions();
+ if (add_holes.None()) {
+ // The new transaction is to be inserted N positions back from the end of the
+ // cluster. Emit N to indicate that that many insertion choices are skipped.
+ auto skips = (done - SetType::Fill(idx)).Count();
+ s << VARINT(diff + skips);
+ } else {
+ // The new transaction is to be appended at the end of the cluster, after N holes.
+ // Emit current_cluster_size + N, to indicate all insertion choices are skipped,
+ // plus N possibilities for the number of holes.
+ s << VARINT(diff + done.Count() + add_holes.Count());
+ done |= add_holes;
}
- rebuilt_order.insert(rebuilt_order.end() - insert_distance, idx);
- s << VARINT(diff + insert_distance);
+ done.Set(idx);
}
// Output a final 0 to denote the end of the graph.
@@ -186,13 +201,19 @@ struct DepGraphFormatter
/** The dependency graph which we deserialize into first, with transactions in
* topological serialization order, not original cluster order. */
DepGraph<SetType> topo_depgraph;
- /** Mapping from cluster order to serialization order, used later to reconstruct the
+ /** Mapping from serialization order to cluster order, used later to reconstruct the
* cluster order. */
std::vector<ClusterIndex> reordering;
+ /** How big the entries vector in the reconstructed depgraph will be (including holes). */
+ ClusterIndex total_size{0};
// Read transactions in topological order.
- try {
- while (true) {
+ while (true) {
+ FeeFrac new_feerate; //!< The new transaction's fee and size.
+ SetType new_ancestors; //!< The new transaction's ancestors (excluding itself).
+ uint64_t diff{0}; //!< How many potential parents/insertions we have to skip.
+ bool read_error{false};
+ try {
// Read size. Size 0 signifies the end of the DepGraph.
int32_t size;
s >> VARINT_MODE(size, VarIntMode::NONNEGATIVE_SIGNED);
@@ -204,21 +225,18 @@ struct DepGraphFormatter
s >> VARINT(coded_fee);
coded_fee &= 0xFFFFFFFFFFFFF; // Enough for fee between -21M...21M BTC.
static_assert(0xFFFFFFFFFFFFF > uint64_t{2} * 21000000 * 100000000);
- auto fee = UnsignedToSigned(coded_fee);
- // Extend topo_depgraph with the new transaction (at the end).
- auto topo_idx = topo_depgraph.AddTransaction({fee, size});
- reordering.push_back(topo_idx);
+ new_feerate = {UnsignedToSigned(coded_fee), size};
// Read dependency information.
- uint64_t diff = 0; //!< How many potential parents we have to skip.
+ auto topo_idx = reordering.size();
s >> VARINT(diff);
for (ClusterIndex dep_dist = 0; dep_dist < topo_idx; ++dep_dist) {
/** Which topo_depgraph index we are currently considering as parent of topo_idx. */
ClusterIndex dep_topo_idx = topo_idx - 1 - dep_dist;
// Ignore transactions which are already known ancestors of topo_idx.
- if (topo_depgraph.Descendants(dep_topo_idx)[topo_idx]) continue;
+ if (new_ancestors[dep_topo_idx]) continue;
if (diff == 0) {
// When the skip counter has reached 0, add an actual dependency.
- topo_depgraph.AddDependency(dep_topo_idx, topo_idx);
+ new_ancestors |= topo_depgraph.Ancestors(dep_topo_idx);
// And read the number of skips after it.
s >> VARINT(diff);
} else {
@@ -226,31 +244,52 @@ struct DepGraphFormatter
--diff;
}
}
- // If we reach this point, we can interpret the remaining skip value as how far from the
- // end of reordering topo_idx should be placed (wrapping around), so move it to its
- // correct location. The preliminary reordering.push_back(topo_idx) above was to make
- // sure that if a deserialization exception occurs, topo_idx still appears somewhere.
- reordering.pop_back();
- reordering.insert(reordering.end() - (diff % (reordering.size() + 1)), topo_idx);
+ } catch (const std::ios_base::failure&) {
+ // Continue even if a read error was encountered.
+ read_error = true;
}
- } catch (const std::ios_base::failure&) {}
-
- // Construct the original cluster order depgraph.
- depgraph = {};
- // Add transactions to depgraph in the original cluster order.
- for (auto topo_idx : reordering) {
- depgraph.AddTransaction(topo_depgraph.FeeRate(topo_idx));
- }
- // Translate dependencies from topological to cluster order.
- for (ClusterIndex idx = 0; idx < reordering.size(); ++idx) {
- ClusterIndex topo_idx = reordering[idx];
- for (ClusterIndex dep_idx = 0; dep_idx < reordering.size(); ++dep_idx) {
- ClusterIndex dep_topo_idx = reordering[dep_idx];
- if (topo_depgraph.Ancestors(topo_idx)[dep_topo_idx]) {
- depgraph.AddDependency(dep_idx, idx);
+ // Construct a new transaction whenever we made it past the new_feerate construction.
+ if (new_feerate.IsEmpty()) break;
+ assert(reordering.size() < SetType::Size());
+ auto topo_idx = topo_depgraph.AddTransaction(new_feerate);
+ topo_depgraph.AddDependencies(new_ancestors, topo_idx);
+ if (total_size < SetType::Size()) {
+ // Normal case.
+ diff %= SetType::Size();
+ if (diff <= total_size) {
+ // Insert the new transaction at distance diff back from the end.
+ for (auto& pos : reordering) {
+ pos += (pos >= total_size - diff);
+ }
+ reordering.push_back(total_size++ - diff);
+ } else {
+ // Append diff - total_size holes at the end, plus the new transaction.
+ total_size = diff;
+ reordering.push_back(total_size++);
+ }
+ } else {
+ // In case total_size == SetType::Size, it is not possible to insert the new
+ // transaction without exceeding SetType's size. Instead, interpret diff as an
+ // index into the holes, and overwrite a position there. This branch is never used
+ // when deserializing the output of the serializer, but gives meaning to otherwise
+ // invalid input.
+ diff %= (SetType::Size() - reordering.size());
+ SetType holes = SetType::Fill(SetType::Size());
+ for (auto pos : reordering) holes.Reset(pos);
+ for (auto pos : holes) {
+ if (diff == 0) {
+ reordering.push_back(pos);
+ break;
+ }
+ --diff;
}
}
+ // Stop if a read error was encountered during deserialization.
+ if (read_error) break;
}
+
+ // Construct the original cluster order depgraph.
+ depgraph = DepGraph(topo_depgraph, reordering, total_size);
}
};
@@ -258,8 +297,19 @@ struct DepGraphFormatter
template<typename SetType>
void SanityCheck(const DepGraph<SetType>& depgraph)
{
+ // Verify Positions and PositionRange consistency.
+ ClusterIndex num_positions{0};
+ ClusterIndex position_range{0};
+ for (ClusterIndex i : depgraph.Positions()) {
+ ++num_positions;
+ position_range = i + 1;
+ }
+ assert(num_positions == depgraph.TxCount());
+ assert(position_range == depgraph.PositionRange());
+ assert(position_range >= num_positions);
+ assert(position_range <= SetType::Size());
// Consistency check between ancestors internally.
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
+ for (ClusterIndex i : depgraph.Positions()) {
// Transactions include themselves as ancestors.
assert(depgraph.Ancestors(i)[i]);
// If a is an ancestor of b, then b's ancestors must include all of a's ancestors.
@@ -268,13 +318,27 @@ void SanityCheck(const DepGraph<SetType>& depgraph)
}
}
// Consistency check between ancestors and descendants.
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
- for (ClusterIndex j = 0; j < depgraph.TxCount(); ++j) {
+ for (ClusterIndex i : depgraph.Positions()) {
+ for (ClusterIndex j : depgraph.Positions()) {
assert(depgraph.Ancestors(i)[j] == depgraph.Descendants(j)[i]);
}
+ // No transaction is a parent or child of itself.
+ auto parents = depgraph.GetReducedParents(i);
+ auto children = depgraph.GetReducedChildren(i);
+ assert(!parents[i]);
+ assert(!children[i]);
+ // Parents of a transaction do not have ancestors inside those parents (except itself).
+ // Note that even the transaction itself may be missing (if it is part of a cycle).
+ for (auto parent : parents) {
+ assert((depgraph.Ancestors(parent) & parents).IsSubsetOf(SetType::Singleton(parent)));
+ }
+ // Similar for children and descendants.
+ for (auto child : children) {
+ assert((depgraph.Descendants(child) & children).IsSubsetOf(SetType::Singleton(child)));
+ }
}
- // If DepGraph is acyclic, serialize + deserialize must roundtrip.
if (IsAcyclic(depgraph)) {
+ // If DepGraph is acyclic, serialize + deserialize must roundtrip.
std::vector<unsigned char> ser;
VectorWriter writer(ser, 0);
writer << Using<DepGraphFormatter>(depgraph);
@@ -292,42 +356,36 @@ void SanityCheck(const DepGraph<SetType>& depgraph)
reader >> Using<DepGraphFormatter>(decoded_depgraph);
assert(depgraph == decoded_depgraph);
assert(reader.empty());
- }
-}
-/** Verify that a DepGraph corresponds to the information in a cluster. */
-template<typename SetType>
-void VerifyDepGraphFromCluster(const Cluster<SetType>& cluster, const DepGraph<SetType>& depgraph)
-{
- // Sanity check the depgraph, which includes a check for correspondence between ancestors and
- // descendants, so it suffices to check just ancestors below.
- SanityCheck(depgraph);
- // Verify transaction count.
- assert(cluster.size() == depgraph.TxCount());
- // Verify feerates.
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
- assert(depgraph.FeeRate(i) == cluster[i].first);
- }
- // Verify ancestors.
- for (ClusterIndex i = 0; i < depgraph.TxCount(); ++i) {
- // Start with the transaction having itself as ancestor.
- auto ancestors = SetType::Singleton(i);
- // Add parents of ancestors to the set of ancestors until it stops changing.
- while (true) {
- const auto old_ancestors = ancestors;
- for (auto ancestor : ancestors) {
- ancestors |= cluster[ancestor].second;
- }
- if (old_ancestors == ancestors) break;
+ // In acyclic graphs, the union of parents with parents of parents etc. yields the
+ // full ancestor set (and similar for children and descendants).
+ std::vector<SetType> parents(depgraph.PositionRange()), children(depgraph.PositionRange());
+ for (ClusterIndex i : depgraph.Positions()) {
+ parents[i] = depgraph.GetReducedParents(i);
+ children[i] = depgraph.GetReducedChildren(i);
}
- // Compare against depgraph.
- assert(depgraph.Ancestors(i) == ancestors);
- // Some additional sanity tests:
- // - Every transaction has itself as ancestor.
- assert(ancestors[i]);
- // - Every transaction has its direct parents as ancestors.
- for (auto parent : cluster[i].second) {
- assert(ancestors[parent]);
+ for (auto i : depgraph.Positions()) {
+ // Initialize the set of ancestors with just the current transaction itself.
+ SetType ancestors = SetType::Singleton(i);
+ // Iteratively add parents of all transactions in the ancestor set to itself.
+ while (true) {
+ const auto old_ancestors = ancestors;
+ for (auto j : ancestors) ancestors |= parents[j];
+ // Stop when no more changes are being made.
+ if (old_ancestors == ancestors) break;
+ }
+ assert(ancestors == depgraph.Ancestors(i));
+
+ // Initialize the set of descendants with just the current transaction itself.
+ SetType descendants = SetType::Singleton(i);
+ // Iteratively add children of all transactions in the descendant set to itself.
+ while (true) {
+ const auto old_descendants = descendants;
+ for (auto j : descendants) descendants |= children[j];
+ // Stop when no more changes are being made.
+ if (old_descendants == descendants) break;
+ }
+ assert(descendants == depgraph.Descendants(i));
}
}
}
@@ -341,7 +399,7 @@ void SanityCheck(const DepGraph<SetType>& depgraph, Span<const ClusterIndex> lin
TestBitSet done;
for (auto i : linearization) {
// Check transaction position is in range.
- assert(i < depgraph.TxCount());
+ assert(depgraph.Positions()[i]);
// Check topology and lack of duplicates.
assert((depgraph.Ancestors(i) - done) == TestBitSet::Singleton(i));
done.Set(i);
diff --git a/src/test/util/json.cpp b/src/test/util/json.cpp
index ad3c346c84..46a4a9f9a1 100644
--- a/src/test/util/json.cpp
+++ b/src/test/util/json.cpp
@@ -1,15 +1,15 @@
-// Copyright (c) 2023 The Bitcoin Core developers
+// Copyright (c) 2023-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <test/util/json.h>
-#include <string>
+#include <univalue.h>
#include <util/check.h>
-#include <univalue.h>
+#include <string_view>
-UniValue read_json(const std::string& jsondata)
+UniValue read_json(std::string_view jsondata)
{
UniValue v;
Assert(v.read(jsondata) && v.isArray());
diff --git a/src/test/util/json.h b/src/test/util/json.h
index 5b1026762e..f6f4e6ab71 100644
--- a/src/test/util/json.h
+++ b/src/test/util/json.h
@@ -1,14 +1,14 @@
-// Copyright (c) 2023 The Bitcoin Core developers
+// Copyright (c) 2023-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_TEST_UTIL_JSON_H
#define BITCOIN_TEST_UTIL_JSON_H
-#include <string>
-
#include <univalue.h>
-UniValue read_json(const std::string& jsondata);
+#include <string_view>
+
+UniValue read_json(std::string_view jsondata);
#endif // BITCOIN_TEST_UTIL_JSON_H
diff --git a/src/test/util/random.cpp b/src/test/util/random.cpp
index b568f275a5..32d785e45d 100644
--- a/src/test/util/random.cpp
+++ b/src/test/util/random.cpp
@@ -36,7 +36,7 @@ void SeedRandomStateForTest(SeedRand seedtype)
return GetRandHash();
}();
- const uint256& seed{seedtype == SeedRand::SEED ? ctx_seed : uint256::ZERO};
+ const uint256& seed{seedtype == SeedRand::FIXED_SEED ? ctx_seed : uint256::ZERO};
LogInfo("Setting random seed for current tests to %s=%s\n", RANDOM_CTX_SEED, seed.GetHex());
MakeRandDeterministicDANGEROUS(seed);
}
diff --git a/src/test/util/random.h b/src/test/util/random.h
index c458534d48..441150e666 100644
--- a/src/test/util/random.h
+++ b/src/test/util/random.h
@@ -12,8 +12,16 @@
#include <cstdint>
enum class SeedRand {
- ZEROS, //!< Seed with a compile time constant of zeros
- SEED, //!< Use (and report) random seed from environment, or a (truly) random one.
+ /**
+ * Seed with a compile time constant of zeros.
+ */
+ ZEROS,
+ /**
+ * Seed with a fixed value that never changes over the lifetime of this
+ * process. The seed is read from the RANDOM_CTX_SEED environment variable
+ * if set, otherwise generated randomly once, saved, and reused.
+ */
+ FIXED_SEED,
};
/** Seed the global RNG state for testing and log the seed value. This affects all randomness, except GetStrongRandBytes(). */
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 11bc5e2e8b..7465846356 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -2,8 +2,6 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
-
#include <test/util/setup_common.h>
#include <addrman.h>
@@ -76,27 +74,10 @@ using node::VerifyLoadedChainstate;
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
+constexpr inline auto TEST_DIR_PATH_ELEMENT{"test_common bitcoin"}; // Includes a space to catch possible path escape issues.
/** Random context to get unique temp data dirs. Separate from m_rng, which can be seeded from a const env var */
static FastRandomContext g_rng_temp_path;
-std::ostream& operator<<(std::ostream& os, const arith_uint256& num)
-{
- os << num.ToString();
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const uint160& num)
-{
- os << num.ToString();
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const uint256& num)
-{
- os << num.ToString();
- return os;
-}
-
struct NetworkSetup
{
NetworkSetup()
@@ -109,7 +90,7 @@ static NetworkSetup g_networksetup_instance;
/** Register test-only arguments */
static void SetupUnitTestArgs(ArgsManager& argsman)
{
- argsman.AddArg("-testdatadir", strprintf("Custom data directory (default: %s<random_string>)", fs::PathToString(fs::temp_directory_path() / "test_common_" PACKAGE_NAME / "")),
+ argsman.AddArg("-testdatadir", strprintf("Custom data directory (default: %s<random_string>)", fs::PathToString(fs::temp_directory_path() / TEST_DIR_PATH_ELEMENT / "")),
ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
}
@@ -123,7 +104,8 @@ static void ExitFailure(std::string_view str_err)
BasicTestingSetup::BasicTestingSetup(const ChainType chainType, TestOpts opts)
: m_args{}
{
- m_node.shutdown = &m_interrupt;
+ m_node.shutdown_signal = &m_interrupt;
+ m_node.shutdown_request = [this]{ return m_interrupt(); };
m_node.args = &gArgs;
std::vector<const char*> arguments = Cat(
{
@@ -155,12 +137,12 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, TestOpts opts)
// Use randomly chosen seed for deterministic PRNG, so that (by default) test
// data directories use a random name that doesn't overlap with other tests.
- SeedRandomForTest(SeedRand::SEED);
+ SeedRandomForTest(SeedRand::FIXED_SEED);
if (!m_node.args->IsArgSet("-testdatadir")) {
// By default, the data directory has a random name
const auto rand_str{g_rng_temp_path.rand256().ToString()};
- m_path_root = fs::temp_directory_path() / "test_common_" PACKAGE_NAME / rand_str;
+ m_path_root = fs::temp_directory_path() / TEST_DIR_PATH_ELEMENT / rand_str;
TryCreateDirectories(m_path_root);
} else {
// Custom data directory
@@ -170,7 +152,7 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, TestOpts opts)
root_dir = fs::absolute(root_dir);
const std::string test_path{G_TEST_GET_FULL_NAME ? G_TEST_GET_FULL_NAME() : ""};
- m_path_lock = root_dir / "test_common_" PACKAGE_NAME / fs::PathFromString(test_path);
+ m_path_lock = root_dir / TEST_DIR_PATH_ELEMENT / fs::PathFromString(test_path);
m_path_root = m_path_lock / "datadir";
// Try to obtain the lock; if unsuccessful don't disturb the existing test.
@@ -243,7 +225,7 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, TestOpts opts)
m_cache_sizes = CalculateCacheSizes(m_args);
- m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings));
+ m_node.notifications = std::make_unique<KernelNotifications>(Assert(m_node.shutdown_request), m_node.exit_status, *Assert(m_node.warnings));
m_make_chainman = [this, &chainparams, opts] {
Assert(!m_node.chainman);
@@ -264,7 +246,7 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, TestOpts opts)
.blocks_dir = m_args.GetBlocksDirPath(),
.notifications = chainman_opts.notifications,
};
- m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown), chainman_opts, blockman_opts);
+ m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown_signal), chainman_opts, blockman_opts);
LOCK(m_node.chainman->GetMutex());
m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<BlockTreeDB>(DBParams{
.path = m_args.GetDataDirNet() / "blocks" / "index",
@@ -607,3 +589,18 @@ CBlock getBlock13b8a()
stream >> TX_WITH_WITNESS(block);
return block;
}
+
+std::ostream& operator<<(std::ostream& os, const arith_uint256& num)
+{
+ return os << num.ToString();
+}
+
+std::ostream& operator<<(std::ostream& os, const uint160& num)
+{
+ return os << num.ToString();
+}
+
+std::ostream& operator<<(std::ostream& os, const uint256& num)
+{
+ return os << num.ToString();
+}
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index d995549ca6..f9cf5d9157 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -10,6 +10,8 @@
#include <key.h>
#include <node/caches.h>
#include <node/context.h> // IWYU pragma: export
+#include <optional>
+#include <ostream>
#include <primitives/transaction.h>
#include <pubkey.h>
#include <stdexcept>
@@ -29,6 +31,8 @@ class arith_uint256;
class CFeeRate;
class Chainstate;
class FastRandomContext;
+class uint160;
+class uint256;
/** This is connected to the logger. Can be used to redirect logs to any other log */
extern const std::function<void(const std::string&)> G_TEST_LOG_FUN;
@@ -39,15 +43,6 @@ extern const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUM
/** Retrieve the unit test name. */
extern const std::function<std::string()> G_TEST_GET_FULL_NAME;
-// Enable BOOST_CHECK_EQUAL for enum class types
-namespace std {
-template <typename T>
-std::ostream& operator<<(typename std::enable_if<std::is_enum<T>::value, std::ostream>::type& stream, const T& e)
-{
- return stream << static_cast<typename std::underlying_type<T>::type>(e);
-}
-} // namespace std
-
static constexpr CAmount CENT{1000000};
struct TestOpts {
@@ -68,7 +63,7 @@ struct BasicTestingSetup {
FastRandomContext m_rng;
/** Seed the global RNG state and m_rng for testing and log the seed value. This affects all randomness, except GetStrongRandBytes(). */
- void SeedRandomForTest(SeedRand seed = SeedRand::SEED)
+ void SeedRandomForTest(SeedRand seed)
{
SeedRandomStateForTest(seed);
m_rng.Reseed(GetRandHash());
@@ -80,6 +75,23 @@ struct BasicTestingSetup {
fs::path m_path_root;
fs::path m_path_lock;
bool m_has_custom_datadir{false};
+ /** @brief Test-specific arguments and settings.
+ *
+ * This member is intended to be the primary source of settings for code
+ * being tested by unit tests. It exists to make tests more self-contained
+ * and reduce reliance on global state.
+ *
+ * Usage guidelines:
+ * 1. Prefer using m_args where possible in test code.
+ * 2. If m_args is not accessible, use m_node.args as a fallback.
+ * 3. Avoid direct references to gArgs in test code.
+ *
+ * Note: Currently, m_node.args points to gArgs for backwards
+ * compatibility. In the future, it will point to m_args to further isolate
+ * test environments.
+ *
+ * @see https://github.com/bitcoin/bitcoin/issues/25055 for additional context.
+ */
ArgsManager m_args;
};
@@ -250,10 +262,26 @@ std::unique_ptr<T> MakeNoLogFileContext(const ChainType chain_type = ChainType::
CBlock getBlock13b8a();
-// Make types usable in BOOST_CHECK_*
+// Make types usable in BOOST_CHECK_* @{
+namespace std {
+template <typename T> requires std::is_enum_v<T>
+inline std::ostream& operator<<(std::ostream& os, const T& e)
+{
+ return os << static_cast<std::underlying_type_t<T>>(e);
+}
+
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const std::optional<T>& v)
+{
+ return v ? os << *v
+ : os << "std::nullopt";
+}
+} // namespace std
+
std::ostream& operator<<(std::ostream& os, const arith_uint256& num);
std::ostream& operator<<(std::ostream& os, const uint160& num);
std::ostream& operator<<(std::ostream& os, const uint256& num);
+// @}
/**
* BOOST_CHECK_EXCEPTION predicates to check the specific validation error.
@@ -263,11 +291,9 @@ std::ostream& operator<<(std::ostream& os, const uint256& num);
class HasReason
{
public:
- explicit HasReason(const std::string& reason) : m_reason(reason) {}
- bool operator()(const std::exception& e) const
- {
- return std::string(e.what()).find(m_reason) != std::string::npos;
- };
+ explicit HasReason(std::string_view reason) : m_reason(reason) {}
+ bool operator()(std::string_view s) const { return s.find(m_reason) != std::string_view::npos; }
+ bool operator()(const std::exception& e) const { return (*this)(e.what()); }
private:
const std::string m_reason;
diff --git a/src/test/util/transaction_utils.cpp b/src/test/util/transaction_utils.cpp
index 300caa577c..a588e61944 100644
--- a/src/test/util/transaction_utils.cpp
+++ b/src/test/util/transaction_utils.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <coins.h>
+#include <consensus/validation.h>
#include <script/signingprovider.h>
#include <test/util/transaction_utils.h>
@@ -69,3 +70,44 @@ std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keyst
return dummyTransactions;
}
+
+void BulkTransaction(CMutableTransaction& tx, int32_t target_weight)
+{
+ tx.vout.emplace_back(0, CScript() << OP_RETURN);
+ auto unpadded_weight{GetTransactionWeight(CTransaction(tx))};
+ assert(target_weight >= unpadded_weight);
+
+ // determine number of needed padding bytes by converting weight difference to vbytes
+ auto dummy_vbytes = (target_weight - unpadded_weight + (WITNESS_SCALE_FACTOR - 1)) / WITNESS_SCALE_FACTOR;
+ // compensate for the increase of the compact-size encoded script length
+ // (note that the length encoding of the unpadded output script needs one byte)
+ dummy_vbytes -= GetSizeOfCompactSize(dummy_vbytes) - 1;
+
+ // pad transaction by repeatedly appending a dummy opcode to the output script
+ tx.vout[0].scriptPubKey.insert(tx.vout[0].scriptPubKey.end(), dummy_vbytes, OP_1);
+
+ // actual weight should be at most 3 higher than target weight
+ assert(GetTransactionWeight(CTransaction(tx)) >= target_weight);
+ assert(GetTransactionWeight(CTransaction(tx)) <= target_weight + 3);
+}
+
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data)
+{
+ assert(nIn < txTo.vin.size());
+
+ MutableTransactionSignatureCreator creator(txTo, nIn, amount, nHashType);
+
+ bool ret = ProduceSignature(provider, creator, fromPubKey, sig_data);
+ UpdateInput(txTo.vin.at(nIn), sig_data);
+ return ret;
+}
+
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType, SignatureData& sig_data)
+{
+ assert(nIn < txTo.vin.size());
+ const CTxIn& txin = txTo.vin[nIn];
+ assert(txin.prevout.n < txFrom.vout.size());
+ const CTxOut& txout = txFrom.vout[txin.prevout.n];
+
+ return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType, sig_data);
+}
diff --git a/src/test/util/transaction_utils.h b/src/test/util/transaction_utils.h
index 6f2faeec6c..4a18ab6ab4 100644
--- a/src/test/util/transaction_utils.h
+++ b/src/test/util/transaction_utils.h
@@ -6,6 +6,7 @@
#define BITCOIN_TEST_UTIL_TRANSACTION_UTILS_H
#include <primitives/transaction.h>
+#include <script/sign.h>
#include <array>
@@ -26,4 +27,27 @@ CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CSc
// the second nValues[2] and nValues[3] outputs paid to a TxoutType::PUBKEYHASH.
std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues);
+// bulk transaction to reach a certain target weight,
+// by appending a single output with padded output script
+void BulkTransaction(CMutableTransaction& tx, int32_t target_weight);
+
+/**
+ * Produce a satisfying script (scriptSig or witness).
+ *
+ * @param provider Utility containing the information necessary to solve a script.
+ * @param fromPubKey The script to produce a satisfaction for.
+ * @param txTo The spending transaction.
+ * @param nIn The index of the input in `txTo` referring the output being spent.
+ * @param amount The value of the output being spent.
+ * @param nHashType Signature hash type.
+ * @param sig_data Additional data provided to solve a script. Filled with the resulting satisfying
+ * script and whether the satisfaction is complete.
+ *
+ * @return True if the produced script is entirely satisfying `fromPubKey`.
+ **/
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo,
+ unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data);
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo,
+ unsigned int nIn, int nHashType, SignatureData& sig_data);
+
#endif // BITCOIN_TEST_UTIL_TRANSACTION_UTILS_H
diff --git a/src/test/util_string_tests.cpp b/src/test/util_string_tests.cpp
new file mode 100644
index 0000000000..1574fe2358
--- /dev/null
+++ b/src/test/util_string_tests.cpp
@@ -0,0 +1,85 @@
+// Copyright (c) 2024-present The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <util/string.h>
+
+#include <boost/test/unit_test.hpp>
+#include <test/util/setup_common.h>
+
+using namespace util;
+
+BOOST_AUTO_TEST_SUITE(util_string_tests)
+
+// Helper to allow compile-time sanity checks while providing the number of
+// args directly. Normally PassFmt<sizeof...(Args)> would be used.
+template <unsigned NumArgs>
+inline void PassFmt(util::ConstevalFormatString<NumArgs> fmt)
+{
+ // This was already executed at compile-time, but is executed again at run-time to avoid -Wunused.
+ decltype(fmt)::Detail_CheckNumFormatSpecifiers(fmt.fmt);
+}
+template <unsigned WrongNumArgs>
+inline void FailFmtWithError(std::string_view wrong_fmt, std::string_view error)
+{
+ BOOST_CHECK_EXCEPTION(util::ConstevalFormatString<WrongNumArgs>::Detail_CheckNumFormatSpecifiers(wrong_fmt), const char*, HasReason(error));
+}
+
+BOOST_AUTO_TEST_CASE(ConstevalFormatString_NumSpec)
+{
+ PassFmt<0>("");
+ PassFmt<0>("%%");
+ PassFmt<1>("%s");
+ PassFmt<0>("%%s");
+ PassFmt<0>("s%%");
+ PassFmt<1>("%%%s");
+ PassFmt<1>("%s%%");
+ PassFmt<0>(" 1$s");
+ PassFmt<1>("%1$s");
+ PassFmt<1>("%1$s%1$s");
+ PassFmt<2>("%2$s");
+ PassFmt<2>("%2$s 4$s %2$s");
+ PassFmt<129>("%129$s 999$s %2$s");
+ PassFmt<1>("%02d");
+ PassFmt<1>("%+2s");
+ PassFmt<1>("%.6i");
+ PassFmt<1>("%5.2f");
+ PassFmt<1>("%#x");
+ PassFmt<1>("%1$5i");
+ PassFmt<1>("%1$-5i");
+ PassFmt<1>("%1$.5i");
+ // tinyformat accepts almost any "type" spec, even '%', or '_', or '\n'.
+ PassFmt<1>("%123%");
+ PassFmt<1>("%123%s");
+ PassFmt<1>("%_");
+ PassFmt<1>("%\n");
+
+ // The `*` specifier behavior is unsupported and can lead to runtime
+ // errors when used in a ConstevalFormatString. Please refer to the
+ // note in the ConstevalFormatString docs.
+ PassFmt<1>("%*c");
+ PassFmt<2>("%2$*3$d");
+ PassFmt<1>("%.*f");
+
+ auto err_mix{"Format specifiers must be all positional or all non-positional!"};
+ FailFmtWithError<1>("%s%1$s", err_mix);
+
+ auto err_num{"Format specifier count must match the argument count!"};
+ FailFmtWithError<1>("", err_num);
+ FailFmtWithError<0>("%s", err_num);
+ FailFmtWithError<2>("%s", err_num);
+ FailFmtWithError<0>("%1$s", err_num);
+ FailFmtWithError<2>("%1$s", err_num);
+
+ auto err_0_pos{"Positional format specifier must have position of at least 1"};
+ FailFmtWithError<1>("%$s", err_0_pos);
+ FailFmtWithError<1>("%$", err_0_pos);
+ FailFmtWithError<0>("%0$", err_0_pos);
+ FailFmtWithError<0>("%0$s", err_0_pos);
+
+ auto err_term{"Format specifier incorrectly terminated by end of string"};
+ FailFmtWithError<1>("%", err_term);
+ FailFmtWithError<1>("%1$", err_term);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index 015a5941ed..f5c4204c55 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -101,7 +101,7 @@ std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock>
// submit block header, so that miner can get the block height from the
// global state and the node has the topology of the chain
BlockValidationState ignored;
- BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({pblock->GetBlockHeader()}, true, ignored));
+ BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({{pblock->GetBlockHeader()}}, true, ignored));
return pblock;
}
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index 30c5982b17..c9cca8af04 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -4,6 +4,7 @@
//
#include <chainparams.h>
#include <consensus/validation.h>
+#include <node/kernel_notifications.h>
#include <random.h>
#include <rpc/blockchain.h>
#include <sync.h>
@@ -69,14 +70,18 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
{
ChainstateManager& chainman = *Assert(m_node.chainman);
- uint256 curr_tip = ::g_best_block;
+ const auto get_notify_tip{[&]() {
+ LOCK(m_node.notifications->m_tip_block_mutex);
+ return m_node.notifications->m_tip_block;
+ }};
+ uint256 curr_tip = get_notify_tip();
// Mine 10 more blocks, putting at us height 110 where a valid assumeutxo value can
// be found.
mineBlocks(10);
// After adding some blocks to the tip, best block should have changed.
- BOOST_CHECK(::g_best_block != curr_tip);
+ BOOST_CHECK(get_notify_tip() != curr_tip);
// Grab block 1 from disk; we'll add it to the background chain later.
std::shared_ptr<CBlock> pblockone = std::make_shared<CBlock>();
@@ -91,15 +96,15 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
// Ensure our active chain is the snapshot chainstate.
BOOST_CHECK(WITH_LOCK(::cs_main, return chainman.IsSnapshotActive()));
- curr_tip = ::g_best_block;
+ curr_tip = get_notify_tip();
// Mine a new block on top of the activated snapshot chainstate.
mineBlocks(1); // Defined in TestChain100Setup.
// After adding some blocks to the snapshot tip, best block should have changed.
- BOOST_CHECK(::g_best_block != curr_tip);
+ BOOST_CHECK(get_notify_tip() != curr_tip);
- curr_tip = ::g_best_block;
+ curr_tip = get_notify_tip();
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 2);
@@ -135,10 +140,10 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
// Ensure tip is as expected
BOOST_CHECK_EQUAL(background_cs.m_chain.Tip()->GetBlockHash(), pblockone->GetHash());
- // g_best_block should be unchanged after adding a block to the background
+ // get_notify_tip() should be unchanged after adding a block to the background
// validation chain.
BOOST_CHECK(block_added);
- BOOST_CHECK_EQUAL(curr_tip, ::g_best_block);
+ BOOST_CHECK_EQUAL(curr_tip, get_notify_tip());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 68563f9c7d..6c2a825e64 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -155,10 +155,10 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup)
manager.MaybeRebalanceCaches();
}
- BOOST_CHECK_CLOSE(c1.m_coinstip_cache_size_bytes, max_cache * 0.05, 1);
- BOOST_CHECK_CLOSE(c1.m_coinsdb_cache_size_bytes, max_cache * 0.05, 1);
- BOOST_CHECK_CLOSE(c2.m_coinstip_cache_size_bytes, max_cache * 0.95, 1);
- BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1);
+ BOOST_CHECK_CLOSE(double(c1.m_coinstip_cache_size_bytes), max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(double(c1.m_coinsdb_cache_size_bytes), max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(double(c2.m_coinstip_cache_size_bytes), max_cache * 0.95, 1);
+ BOOST_CHECK_CLOSE(double(c2.m_coinsdb_cache_size_bytes), max_cache * 0.95, 1);
}
struct SnapshotTestSetup : TestChain100Setup {
@@ -382,7 +382,7 @@ struct SnapshotTestSetup : TestChain100Setup {
LOCK(::cs_main);
chainman.ResetChainstates();
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 0);
- m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings));
+ m_node.notifications = std::make_unique<KernelNotifications>(Assert(m_node.shutdown_request), m_node.exit_status, *Assert(m_node.warnings));
const ChainstateManager::Options chainman_opts{
.chainparams = ::Params(),
.datadir = chainman.m_options.datadir,
@@ -397,7 +397,7 @@ struct SnapshotTestSetup : TestChain100Setup {
// For robustness, ensure the old manager is destroyed before creating a
// new one.
m_node.chainman.reset();
- m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown), chainman_opts, blockman_opts);
+ m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown_signal), chainman_opts, blockman_opts);
}
return *Assert(m_node.chainman);
}
@@ -806,22 +806,26 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_args, BasicTestingSetup)
};
// test -assumevalid
- BOOST_CHECK(!get_valid_opts({}).assumed_valid_block.has_value());
- BOOST_CHECK(get_valid_opts({"-assumevalid="}).assumed_valid_block.value().IsNull());
- BOOST_CHECK(get_valid_opts({"-assumevalid=0"}).assumed_valid_block.value().IsNull());
- BOOST_CHECK(get_valid_opts({"-noassumevalid"}).assumed_valid_block.value().IsNull());
- BOOST_CHECK_EQUAL(get_valid_opts({"-assumevalid=0x1234"}).assumed_valid_block.value().ToString(), std::string(60, '0') + "1234");
- const std::string cmd{"-assumevalid=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"};
- BOOST_CHECK_EQUAL(get_valid_opts({cmd.c_str()}).assumed_valid_block.value().ToString(), cmd.substr(13, cmd.size()));
+ BOOST_CHECK(!get_valid_opts({}).assumed_valid_block);
+ BOOST_CHECK_EQUAL(get_valid_opts({"-assumevalid="}).assumed_valid_block, uint256::ZERO);
+ BOOST_CHECK_EQUAL(get_valid_opts({"-assumevalid=0"}).assumed_valid_block, uint256::ZERO);
+ BOOST_CHECK_EQUAL(get_valid_opts({"-noassumevalid"}).assumed_valid_block, uint256::ZERO);
+ BOOST_CHECK_EQUAL(get_valid_opts({"-assumevalid=0x12"}).assumed_valid_block, uint256{0x12});
+
+ std::string assume_valid{"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"};
+ BOOST_CHECK_EQUAL(get_valid_opts({("-assumevalid=" + assume_valid).c_str()}).assumed_valid_block, uint256::FromHex(assume_valid));
BOOST_CHECK(!get_opts({"-assumevalid=xyz"})); // invalid hex characters
BOOST_CHECK(!get_opts({"-assumevalid=01234567890123456789012345678901234567890123456789012345678901234"})); // > 64 hex chars
// test -minimumchainwork
- BOOST_CHECK(!get_valid_opts({}).minimum_chain_work.has_value());
- BOOST_CHECK_EQUAL(get_valid_opts({"-minimumchainwork=0"}).minimum_chain_work.value().GetCompact(), 0U);
- BOOST_CHECK_EQUAL(get_valid_opts({"-nominimumchainwork"}).minimum_chain_work.value().GetCompact(), 0U);
- BOOST_CHECK_EQUAL(get_valid_opts({"-minimumchainwork=0x1234"}).minimum_chain_work.value().GetCompact(), 0x02123400U);
+ BOOST_CHECK(!get_valid_opts({}).minimum_chain_work);
+ BOOST_CHECK_EQUAL(get_valid_opts({"-minimumchainwork=0"}).minimum_chain_work, arith_uint256());
+ BOOST_CHECK_EQUAL(get_valid_opts({"-nominimumchainwork"}).minimum_chain_work, arith_uint256());
+ BOOST_CHECK_EQUAL(get_valid_opts({"-minimumchainwork=0x1234"}).minimum_chain_work, arith_uint256{0x1234});
+
+ std::string minimum_chainwork{"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"};
+ BOOST_CHECK_EQUAL(get_valid_opts({("-minimumchainwork=" + minimum_chainwork).c_str()}).minimum_chain_work, UintToArith256(uint256::FromHex(minimum_chainwork).value()));
BOOST_CHECK(!get_opts({"-minimumchainwork=xyz"})); // invalid hex characters
BOOST_CHECK(!get_opts({"-minimumchainwork=01234567890123456789012345678901234567890123456789012345678901234"})); // > 64 hex chars
diff --git a/src/txdb.h b/src/txdb.h
index e0acb09e98..412d6c6009 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -25,8 +25,6 @@ class uint256;
static const int64_t nDefaultDbCache = 450;
//! -dbbatchsize default (bytes)
static const int64_t nDefaultDbBatchSize = 16 << 20;
-//! max. -dbcache (MiB)
-static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB)
static const int64_t nMinDbCache = 4;
//! Max memory allocated to block tree DB specific cache, if no -txindex (MiB)
diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp
index 35a215c88a..ba4ba6c3b6 100644
--- a/src/txorphanage.cpp
+++ b/src/txorphanage.cpp
@@ -33,7 +33,7 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer)
return false;
}
- auto ret = m_orphans.emplace(wtxid, OrphanTx{tx, peer, Now<NodeSeconds>() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()});
+ auto ret = m_orphans.emplace(wtxid, OrphanTx{{tx, peer, Now<NodeSeconds>() + ORPHAN_TX_EXPIRE_TIME}, m_orphan_list.size()});
assert(ret.second);
m_orphan_list.push_back(ret.first);
for (const CTxIn& txin : tx->vin) {
@@ -277,3 +277,13 @@ std::vector<std::pair<CTransactionRef, NodeId>> TxOrphanage::GetChildrenFromDiff
}
return children_found;
}
+
+std::vector<TxOrphanage::OrphanTxBase> TxOrphanage::GetOrphanTransactions() const
+{
+ std::vector<OrphanTxBase> ret;
+ ret.reserve(m_orphans.size());
+ for (auto const& o : m_orphans) {
+ ret.push_back({o.second.tx, o.second.fromPeer, o.second.nTimeExpire});
+ }
+ return ret;
+}
diff --git a/src/txorphanage.h b/src/txorphanage.h
index 2c53d1d40f..5501d10922 100644
--- a/src/txorphanage.h
+++ b/src/txorphanage.h
@@ -72,11 +72,17 @@ public:
return m_orphans.size();
}
-protected:
- struct OrphanTx {
+ /** Allows providing orphan information externally */
+ struct OrphanTxBase {
CTransactionRef tx;
NodeId fromPeer;
NodeSeconds nTimeExpire;
+ };
+
+ std::vector<OrphanTxBase> GetOrphanTransactions() const;
+
+protected:
+ struct OrphanTx : public OrphanTxBase {
size_t list_pos;
};
diff --git a/src/uint256.h b/src/uint256.h
index c255f49ad6..8223787041 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -199,14 +199,4 @@ public:
static const uint256 ONE;
};
-/* uint256 from std::string_view, containing byte-reversed hex encoding.
- * DEPRECATED. Unlike FromHex this accepts any invalid input, thus it is fragile and deprecated!
- */
-inline uint256 uint256S(std::string_view str)
-{
- uint256 rv;
- rv.SetHexDeprecated(str);
- return rv;
-}
-
#endif // BITCOIN_UINT256_H
diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt
index 26c6271f9b..4999dbf13f 100644
--- a/src/util/CMakeLists.txt
+++ b/src/util/CMakeLists.txt
@@ -42,4 +42,5 @@ target_link_libraries(bitcoin_util
bitcoin_clientversion
bitcoin_crypto
$<$<PLATFORM_ID:Windows>:ws2_32>
+ $<$<PLATFORM_ID:Windows>:iphlpapi>
)
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
index f50cd8a28c..04b0673c49 100644
--- a/src/util/asmap.cpp
+++ b/src/util/asmap.cpp
@@ -203,10 +203,10 @@ std::vector<bool> DecodeAsmap(fs::path path)
LogPrintf("Failed to open asmap file from disk\n");
return bits;
}
- fseek(filestr, 0, SEEK_END);
- int length = ftell(filestr);
+ file.seek(0, SEEK_END);
+ int length = file.tell();
LogPrintf("Opened asmap file %s (%d bytes) from disk\n", fs::quoted(fs::PathToString(path)), length);
- fseek(filestr, 0, SEEK_SET);
+ file.seek(0, SEEK_SET);
uint8_t cur_byte;
for (int i = 0; i < length; ++i) {
file >> cur_byte;
diff --git a/src/util/check.cpp b/src/util/check.cpp
index eb3885832b..e1956042c3 100644
--- a/src/util/check.cpp
+++ b/src/util/check.cpp
@@ -4,7 +4,7 @@
#include <util/check.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <clientversion.h>
#include <tinyformat.h>
diff --git a/src/util/check.h b/src/util/check.h
index a02a1de8dc..8f28f5dc94 100644
--- a/src/util/check.h
+++ b/src/util/check.h
@@ -40,7 +40,7 @@ void assertion_fail(std::string_view file, int line, std::string_view func, std:
/** Helper for Assert()/Assume() */
template <bool IS_ASSERT, typename T>
-T&& inline_assertion_check(LIFETIMEBOUND T&& val, [[maybe_unused]] const char* file, [[maybe_unused]] int line, [[maybe_unused]] const char* func, [[maybe_unused]] const char* assertion)
+constexpr T&& inline_assertion_check(LIFETIMEBOUND T&& val, [[maybe_unused]] const char* file, [[maybe_unused]] int line, [[maybe_unused]] const char* func, [[maybe_unused]] const char* assertion)
{
if constexpr (IS_ASSERT
#ifdef ABORT_ON_FAILED_ASSUME
diff --git a/src/util/feefrac.h b/src/util/feefrac.h
index 9772162010..161322b50a 100644
--- a/src/util/feefrac.h
+++ b/src/util/feefrac.h
@@ -64,13 +64,13 @@ struct FeeFrac
int32_t size;
/** Construct an IsEmpty() FeeFrac. */
- inline FeeFrac() noexcept : fee{0}, size{0} {}
+ constexpr inline FeeFrac() noexcept : fee{0}, size{0} {}
/** Construct a FeeFrac with specified fee and size. */
- inline FeeFrac(int64_t f, int32_t s) noexcept : fee{f}, size{s} {}
+ constexpr inline FeeFrac(int64_t f, int32_t s) noexcept : fee{f}, size{s} {}
- inline FeeFrac(const FeeFrac&) noexcept = default;
- inline FeeFrac& operator=(const FeeFrac&) noexcept = default;
+ constexpr inline FeeFrac(const FeeFrac&) noexcept = default;
+ constexpr inline FeeFrac& operator=(const FeeFrac&) noexcept = default;
/** Check if this is empty (size and fee are 0). */
bool inline IsEmpty() const noexcept {
diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp
index 41c8fe3b8f..7ac7b829d8 100644
--- a/src/util/fs_helpers.cpp
+++ b/src/util/fs_helpers.cpp
@@ -5,7 +5,7 @@
#include <util/fs_helpers.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <logging.h>
#include <sync.h>
@@ -22,7 +22,7 @@
#include <utility>
#ifndef WIN32
-// for posix_fallocate, in configure.ac we check if it is present after this
+// for posix_fallocate, in cmake/introspection.cmake we check if it is present after this
#ifdef __linux__
#ifdef _POSIX_C_SOURCE
diff --git a/src/util/string.h b/src/util/string.h
index 30c0a0d6c1..c5183d6c80 100644
--- a/src/util/string.h
+++ b/src/util/string.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2019-2022 The Bitcoin Core developers
+// Copyright (c) 2019-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -6,6 +6,7 @@
#define BITCOIN_UTIL_STRING_H
#include <span.h>
+#include <tinyformat.h>
#include <array>
#include <cstdint>
@@ -17,6 +18,67 @@
#include <vector>
namespace util {
+/**
+ * @brief A wrapper for a compile-time partially validated format string
+ *
+ * This struct can be used to enforce partial compile-time validation of format
+ * strings, to reduce the likelihood of tinyformat throwing exceptions at
+ * run-time. Validation is partial to try and prevent the most common errors
+ * while avoiding re-implementing the entire parsing logic.
+ *
+ * @note Counting of `*` dynamic width and precision fields (such as `%*c`,
+ * `%2$*3$d`, `%.*f`) is not implemented to minimize code complexity as long as
+ * they are not used in the codebase. Usage of these fields is not counted and
+ * can lead to run-time exceptions. Code wanting to use the `*` specifier can
+ * side-step this struct and call tinyformat directly.
+ */
+template <unsigned num_params>
+struct ConstevalFormatString {
+ const char* const fmt;
+ consteval ConstevalFormatString(const char* str) : fmt{str} { Detail_CheckNumFormatSpecifiers(fmt); }
+ constexpr static void Detail_CheckNumFormatSpecifiers(std::string_view str)
+ {
+ unsigned count_normal{0}; // Number of "normal" specifiers, like %s
+ unsigned count_pos{0}; // Max number in positional specifier, like %8$s
+ for (auto it{str.begin()}; it < str.end();) {
+ if (*it != '%') {
+ ++it;
+ continue;
+ }
+
+ if (++it >= str.end()) throw "Format specifier incorrectly terminated by end of string";
+ if (*it == '%') {
+ // Percent escape: %%
+ ++it;
+ continue;
+ }
+
+ unsigned maybe_num{0};
+ while ('0' <= *it && *it <= '9') {
+ maybe_num *= 10;
+ maybe_num += *it - '0';
+ ++it;
+ };
+
+ if (*it == '$') {
+ // Positional specifier, like %8$s
+ if (maybe_num == 0) throw "Positional format specifier must have position of at least 1";
+ count_pos = std::max(count_pos, maybe_num);
+ if (++it >= str.end()) throw "Format specifier incorrectly terminated by end of string";
+ } else {
+ // Non-positional specifier, like %s
+ ++count_normal;
+ ++it;
+ }
+ // The remainder "[flags][width][.precision][length]type" of the
+ // specifier is not checked. Parsing continues with the next '%'.
+ }
+ if (count_normal && count_pos) throw "Format specifiers must be all positional or all non-positional!";
+ unsigned count{count_normal | count_pos};
+ if (num_params != count) throw "Format specifier count must match the argument count!";
+ }
+};
+
void ReplaceAll(std::string& in_out, const std::string& search, const std::string& substitute);
/** Split a string on any char found in separators, returning a vector.
@@ -173,4 +235,12 @@ template <typename T1, size_t PREFIX_LEN>
}
} // namespace util
+namespace tinyformat {
+template <typename... Args>
+std::string format(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
+{
+ return format(fmt.fmt, args...);
+}
+} // namespace tinyformat
+
#endif // BITCOIN_UTIL_STRING_H
diff --git a/src/util/syserror.cpp b/src/util/syserror.cpp
index 6f3a724483..a902826f8e 100644
--- a/src/util/syserror.cpp
+++ b/src/util/syserror.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <tinyformat.h>
#include <util/syserror.h>
diff --git a/src/util/threadnames.cpp b/src/util/threadnames.cpp
index 0249de37e3..37c5b8f617 100644
--- a/src/util/threadnames.cpp
+++ b/src/util/threadnames.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <cstring>
#include <string>
diff --git a/src/util/tokenpipe.cpp b/src/util/tokenpipe.cpp
index 16fbb664ea..9425c62ebf 100644
--- a/src/util/tokenpipe.cpp
+++ b/src/util/tokenpipe.cpp
@@ -3,7 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <util/tokenpipe.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#ifndef WIN32
diff --git a/src/util/trace.h b/src/util/trace.h
index d9ed65e3aa..72a486d562 100644
--- a/src/util/trace.h
+++ b/src/util/trace.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_UTIL_TRACE_H
#define BITCOIN_UTIL_TRACE_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#ifdef ENABLE_TRACING
diff --git a/src/validation.cpp b/src/validation.cpp
index 3a64be753b..fe07686209 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <validation.h>
@@ -70,6 +70,7 @@
#include <numeric>
#include <optional>
#include <ranges>
+#include <span>
#include <string>
#include <tuple>
#include <utility>
@@ -107,10 +108,6 @@ const std::vector<std::string> CHECKLEVEL_DOC {
* */
static constexpr int PRUNE_LOCK_BUFFER{10};
-GlobalMutex g_best_block_mutex;
-std::condition_variable g_best_block_cv;
-uint256 g_best_block;
-
const CBlockIndex* Chainstate::FindForkInGlobalIndex(const CBlockLocator& locator) const
{
AssertLockHeld(cs_main);
@@ -2023,7 +2020,8 @@ void Chainstate::CheckForkWarningConditions()
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial sync)
- if (m_chainman.IsInitialBlockDownload()) {
+ // Also not applicable to the background chainstate
+ if (m_chainman.IsInitialBlockDownload() || this->GetRole() == ChainstateRole::BACKGROUND) {
return;
}
@@ -2987,12 +2985,6 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
m_mempool->AddTransactionsUpdated(1);
}
- {
- LOCK(g_best_block_mutex);
- g_best_block = pindexNew->GetBlockHash();
- g_best_block_cv.notify_all();
- }
-
std::vector<bilingual_str> warning_messages;
if (!m_chainman.IsInitialBlockDownload()) {
const CBlockIndex* pindex = pindexNew;
@@ -3541,7 +3533,6 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<
m_chainman.m_options.signals->UpdatedBlockTip(pindexNewTip, pindexFork, still_in_ibd);
}
- // Always notify the UI if a new block tip was connected
if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd, m_chainman.m_blockman.m_blockfiles_indexed), *pindexNewTip))) {
// Just breaking and returning success for now. This could
// be changed to bubble up the kernel::Interrupted value to
@@ -3574,8 +3565,8 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<
//
// This cannot be done while holding cs_main (within
// MaybeCompleteSnapshotValidation) or a cs_main deadlock will occur.
- if (m_chainman.restart_indexes) {
- m_chainman.restart_indexes();
+ if (m_chainman.snapshot_download_completed) {
+ m_chainman.snapshot_download_completed();
}
break;
}
@@ -4136,7 +4127,7 @@ bool IsBlockMutated(const CBlock& block, bool check_witness_root)
return false;
}
-arith_uint256 CalculateClaimedHeadersWork(const std::vector<CBlockHeader>& headers)
+arith_uint256 CalculateClaimedHeadersWork(std::span<const CBlockHeader> headers)
{
arith_uint256 total_work{0};
for (const CBlockHeader& header : headers) {
@@ -4384,7 +4375,7 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida
}
// Exposed wrapper for AcceptBlockHeader
-bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex)
+bool ChainstateManager::ProcessNewBlockHeaders(std::span<const CBlockHeader> headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex)
{
AssertLockNotHeld(cs_main);
{
@@ -6013,7 +6004,7 @@ util::Result<void> ChainstateManager::PopulateAndValidateSnapshot(
index = snapshot_chainstate.m_chain[i];
// Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload()
- // won't ask to rewind the entire assumed-valid chain on startup.
+ // won't ask for -reindex on startup.
if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) {
index->nStatus |= BLOCK_OPT_WITNESS;
}
diff --git a/src/validation.h b/src/validation.h
index f905d6e624..f6aeea3faa 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -39,9 +39,9 @@
#include <memory>
#include <optional>
#include <set>
+#include <span>
#include <stdint.h>
#include <string>
-#include <thread>
#include <type_traits>
#include <utility>
#include <vector>
@@ -85,11 +85,6 @@ enum class SynchronizationState {
POST_INIT
};
-extern GlobalMutex g_best_block_mutex;
-extern std::condition_variable g_best_block_cv;
-/** Used to notify getblocktemplate RPC of new tips. */
-extern uint256 g_best_block;
-
/** Documentation for argument 'checklevel'. */
extern const std::vector<std::string> CHECKLEVEL_DOC;
@@ -407,7 +402,7 @@ bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consens
bool IsBlockMutated(const CBlock& block, bool check_witness_root);
/** Return the sum of the claimed work on a given set of headers. No verification of PoW is done. */
-arith_uint256 CalculateClaimedHeadersWork(const std::vector<CBlockHeader>& headers);
+arith_uint256 CalculateClaimedHeadersWork(std::span<const CBlockHeader> headers);
enum class VerifyDBResult {
SUCCESS,
@@ -914,7 +909,7 @@ private:
//! Internal helper for ActivateSnapshot().
//!
//! De-serialization of a snapshot that is created with
- //! CreateUTXOSnapshot() in rpc/blockchain.cpp.
+ //! the dumptxoutset RPC.
//! To reduce space the serialization format of the snapshot avoids
//! duplication of tx hashes. The code takes advantage of the guarantee by
//! leveldb that keys are lexicographically sorted.
@@ -976,7 +971,7 @@ public:
//! Function to restart active indexes; set dynamically to avoid a circular
//! dependency on `base/index.cpp`.
- std::function<void()> restart_indexes = std::function<void()>();
+ std::function<void()> snapshot_download_completed = std::function<void()>();
const CChainParams& GetParams() const { return m_options.chainparams; }
const Consensus::Params& GetConsensus() const { return m_options.chainparams.GetConsensus(); }
@@ -1007,7 +1002,6 @@ public:
const util::SignalInterrupt& m_interrupt;
const Options m_options;
- std::thread m_thread_load;
//! A single BlockManager instance is shared across each constructed
//! chainstate to avoid duplicating block metadata.
node::BlockManager m_blockman;
@@ -1217,12 +1211,12 @@ public:
* May not be called in a
* validationinterface callback.
*
- * @param[in] block The block headers themselves
+ * @param[in] headers The block headers themselves
* @param[in] min_pow_checked True if proof-of-work anti-DoS checks have been done by caller for headers chain
* @param[out] state This may be set to an Error state if any error occurred processing them
* @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
*/
- bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex = nullptr) LOCKS_EXCLUDED(cs_main);
+ bool ProcessNewBlockHeaders(std::span<const CBlockHeader> headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex = nullptr) LOCKS_EXCLUDED(cs_main);
/**
* Sufficiently validate a block for disk storage (and store on disk).
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index e8ff1d78e3..da2685d771 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -9,6 +9,7 @@
#include <consensus/validation.h>
#include <kernel/chain.h>
#include <kernel/mempool_entry.h>
+#include <kernel/mempool_removal_reason.h>
#include <logging.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
@@ -19,8 +20,6 @@
#include <unordered_map>
#include <utility>
-std::string RemovalReasonToString(const MemPoolRemovalReason& r) noexcept;
-
/**
* ValidationSignalsImpl manages a list of shared_ptr<CValidationInterface> callbacks.
*
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 14d22bb54e..cfd09a2e10 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <common/args.h>
#include <init.h>
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 129b5c7c2a..2b5c021cda 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -69,7 +69,7 @@ bool VerifyWallets(WalletContext& context)
// Pass write=false because no need to write file and probably
// better not to. If unnamed wallet needs to be added next startup
// and the setting is empty, this code will just run again.
- chain.overwriteRwSetting("wallet", wallets, /*write=*/false);
+ chain.overwriteRwSetting("wallet", std::move(wallets), interfaces::SettingsAction::SKIP_WRITE);
}
}
@@ -77,6 +77,11 @@ bool VerifyWallets(WalletContext& context)
std::set<fs::path> wallet_paths;
for (const auto& wallet : chain.getSettingsList("wallet")) {
+ if (!wallet.isStr()) {
+ chain.initError(_("Invalid value detected for '-wallet' or '-nowallet'. "
+ "'-wallet' requires a string value, while '-nowallet' accepts only '1' to disable all wallets"));
+ return false;
+ }
const auto& wallet_file = wallet.get_str();
const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(wallet_file));
@@ -110,6 +115,11 @@ bool LoadWallets(WalletContext& context)
try {
std::set<fs::path> wallet_paths;
for (const auto& wallet : chain.getSettingsList("wallet")) {
+ if (!wallet.isStr()) {
+ chain.initError(_("Invalid value detected for '-wallet' or '-nowallet'. "
+ "'-wallet' requires a string value, while '-nowallet' accepts only '1' to disable all wallets"));
+ return false;
+ }
const auto& name = wallet.get_str();
if (!wallet_paths.insert(fs::PathFromString(name)).second) {
continue;
diff --git a/src/wallet/rpc/addresses.cpp b/src/wallet/rpc/addresses.cpp
index 838d062108..1c2951deee 100644
--- a/src/wallet/rpc/addresses.cpp
+++ b/src/wallet/rpc/addresses.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <core_io.h>
#include <key_io.h>
diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp
index 20d09b1d9a..4ffc6f1e0d 100644
--- a/src/wallet/rpc/backup.cpp
+++ b/src/wallet/rpc/backup.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <chain.h>
#include <clientversion.h>
diff --git a/src/wallet/rpc/util.cpp b/src/wallet/rpc/util.cpp
index 67b5ae0fe2..ec3b7c1085 100644
--- a/src/wallet/rpc/util.cpp
+++ b/src/wallet/rpc/util.cpp
@@ -91,7 +91,7 @@ std::shared_ptr<CWallet> GetWalletForJSONRPCRequest(const JSONRPCRequest& reques
RPC_WALLET_NOT_FOUND, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)");
}
throw JSONRPCError(RPC_WALLET_NOT_SPECIFIED,
- "Wallet file not specified (must request wallet RPC through /wallet/<filename> uri-path).");
+ "Multiple wallets are loaded. Please select which wallet to use by requesting the RPC through the /wallet/<walletname> URI path.");
}
void EnsureWalletIsUnlocked(const CWallet& wallet)
diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp
index 39582b3f6a..5140ac8c05 100644
--- a/src/wallet/rpc/wallet.cpp
+++ b/src/wallet/rpc/wallet.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <core_io.h>
#include <key_io.h>
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index ba3562c638..cf7b7eaf31 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -254,9 +254,9 @@ public:
/** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
template <typename... Params>
- void WalletLogPrintf(const char* fmt, Params... parameters) const
+ void WalletLogPrintf(util::ConstevalFormatString<sizeof...(Params)> wallet_fmt, const Params&... params) const
{
- LogPrintf(("%s " + std::string{fmt}).c_str(), m_storage.GetDisplayName(), parameters...);
+ LogInfo("%s %s", m_storage.GetDisplayName(), tfm::format(wallet_fmt, params...));
};
/** Watch-only address added */
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 7abf7f59c0..aceed24a86 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -1167,6 +1167,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
result.GetSelectedValue());
// vouts to the payees
+ txNew.vout.reserve(vecSend.size() + 1); // + 1 because of possible later insert
for (const auto& recipient : vecSend)
{
txNew.vout.emplace_back(recipient.nAmount, GetScriptForDestination(recipient.dest));
@@ -1217,6 +1218,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
// behavior."
bool use_anti_fee_sniping = true;
const uint32_t default_sequence{coin_control.m_signal_bip125_rbf.value_or(wallet.m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : CTxIn::MAX_SEQUENCE_NONFINAL};
+ txNew.vin.reserve(selected_coins.size());
for (const auto& coin : selected_coins) {
std::optional<uint32_t> sequence = coin_control.GetSequence(coin->outpoint);
if (sequence) {
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index f2110ea3f7..ab082327de 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <wallet/sqlite.h>
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 2fac356263..ea32199497 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <boost/test/unit_test.hpp>
@@ -28,7 +28,7 @@ inline std::ostream& operator<<(std::ostream& os, const std::pair<const Serializ
{
Span key{kv.first}, value{kv.second};
os << "(\"" << std::string_view{reinterpret_cast<const char*>(key.data()), key.size()} << "\", \""
- << std::string_view{reinterpret_cast<const char*>(key.data()), key.size()} << "\")";
+ << std::string_view{reinterpret_cast<const char*>(value.data()), value.size()} << "\")";
return os;
}
diff --git a/src/wallet/test/fuzz/coinselection.cpp b/src/wallet/test/fuzz/coinselection.cpp
index 209c87fd42..31fa00c0a2 100644
--- a/src/wallet/test/fuzz/coinselection.cpp
+++ b/src/wallet/test/fuzz/coinselection.cpp
@@ -252,7 +252,7 @@ FUZZ_TARGET(coinselection)
GroupCoins(fuzzed_data_provider, utxo_pool, coin_params, /*positive_only=*/false, group_all);
for (const OutputGroup& group : group_all) {
- const CoinEligibilityFilter filter(fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>());
+ const CoinEligibilityFilter filter{fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeIntegral<int>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>()};
(void)group.EligibleForSpending(filter);
}
diff --git a/src/wallet/test/fuzz/scriptpubkeyman.cpp b/src/wallet/test/fuzz/scriptpubkeyman.cpp
index 88f8c151e7..091d42f6cf 100644
--- a/src/wallet/test/fuzz/scriptpubkeyman.cpp
+++ b/src/wallet/test/fuzz/scriptpubkeyman.cpp
@@ -186,7 +186,10 @@ FUZZ_TARGET(scriptpubkeyman, .init = initialize_spkm)
auto psbt{*opt_psbt};
const PrecomputedTransactionData txdata{PrecomputePSBTData(psbt)};
const int sighash_type{fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 150)};
- (void)spk_manager->FillPSBT(psbt, txdata, sighash_type, fuzzed_data_provider.ConsumeBool(), fuzzed_data_provider.ConsumeBool(), nullptr, fuzzed_data_provider.ConsumeBool());
+ auto sign = fuzzed_data_provider.ConsumeBool();
+ auto bip32derivs = fuzzed_data_provider.ConsumeBool();
+ auto finalize = fuzzed_data_provider.ConsumeBool();
+ (void)spk_manager->FillPSBT(psbt, txdata, sighash_type, sign, bip32derivs, nullptr, finalize);
}
);
}
diff --git a/src/wallet/test/fuzz/wallet_bdb_parser.cpp b/src/wallet/test/fuzz/wallet_bdb_parser.cpp
index 6fbd695fc5..6482b65d06 100644
--- a/src/wallet/test/fuzz/wallet_bdb_parser.cpp
+++ b/src/wallet/test/fuzz/wallet_bdb_parser.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -18,6 +18,13 @@
#include <fstream>
#include <iostream>
+// There is an inconsistency in BDB on Windows.
+// See: https://github.com/bitcoin/bitcoin/pull/26606#issuecomment-2322763212
+#undef USE_BDB_NON_MSVC
+#if defined(USE_BDB) && !defined(_MSC_VER)
+#define USE_BDB_NON_MSVC
+#endif
+
using wallet::DatabaseOptions;
using wallet::DatabaseStatus;
@@ -50,7 +57,7 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
}
g_setup->m_args.ForceSetArg("-dumpfile", fs::PathToString(bdb_ro_dumpfile));
-#ifdef USE_BDB
+#ifdef USE_BDB_NON_MSVC
bool bdb_ro_err = false;
bool bdb_ro_strict_err = false;
#endif
@@ -58,7 +65,7 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
if (db) {
assert(DumpWallet(g_setup->m_args, *db, error));
} else {
-#ifdef USE_BDB
+#ifdef USE_BDB_NON_MSVC
bdb_ro_err = true;
#endif
if (error.original.starts_with("AutoFile::ignore: end of file") ||
@@ -90,7 +97,7 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
error.original == "Subdatabase has an unexpected name" ||
error.original == "Unsupported BDB data file version number" ||
error.original == "BDB builtin encryption is not supported") {
-#ifdef USE_BDB
+#ifdef USE_BDB_NON_MSVC
bdb_ro_strict_err = true;
#endif
} else {
@@ -98,7 +105,7 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
}
}
-#ifdef USE_BDB
+#ifdef USE_BDB_NON_MSVC
// Try opening with BDB
fs::path bdb_dumpfile{g_setup->m_args.GetDataDirNet() / "fuzzed_dumpfile_bdb.dump"};
if (fs::exists(bdb_dumpfile)) { // Writing into an existing dump file will throw an exception
diff --git a/src/wallet/test/ismine_tests.cpp b/src/wallet/test/ismine_tests.cpp
index 8deab74fac..f6688ed30a 100644
--- a/src/wallet/test/ismine_tests.cpp
+++ b/src/wallet/test/ismine_tests.cpp
@@ -684,7 +684,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
- scriptPubKey << OP_0 << "aabb"_hex_v_u8;
+ scriptPubKey << OP_0 << "aabb"_hex;
result = keystore.GetLegacyScriptPubKeyMan()->IsMine(scriptPubKey);
BOOST_CHECK_EQUAL(result, ISMINE_NO);
@@ -699,7 +699,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
- scriptPubKey << OP_16 << "aabb"_hex_v_u8;
+ scriptPubKey << OP_16 << "aabb"_hex;
result = keystore.GetLegacyScriptPubKeyMan()->IsMine(scriptPubKey);
BOOST_CHECK_EQUAL(result, ISMINE_NO);
diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h
index fc7674e961..ba12f5f6bf 100644
--- a/src/wallet/test/util.h
+++ b/src/wallet/test/util.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_WALLET_TEST_UTIL_H
#define BITCOIN_WALLET_TEST_UTIL_H
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <addresstype.h>
#include <wallet/db.h>
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 5a520cbfe9..b5de4b4b3d 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -334,12 +334,11 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// concurrently, ensuring no race conditions occur during either process.
BOOST_FIXTURE_TEST_CASE(write_wallet_settings_concurrently, TestingSetup)
{
- WalletContext context;
- context.chain = m_node.chain.get();
+ auto chain = m_node.chain.get();
const auto NUM_WALLETS{5};
// Since we're counting the number of wallets, ensure we start without any.
- BOOST_REQUIRE(context.chain->getRwSetting("wallet").isNull());
+ BOOST_REQUIRE(chain->getRwSetting("wallet").isNull());
const auto& check_concurrent_wallet = [&](const auto& settings_function, int num_expected_wallets) {
std::vector<std::thread> threads;
@@ -347,19 +346,19 @@ BOOST_FIXTURE_TEST_CASE(write_wallet_settings_concurrently, TestingSetup)
for (auto i{0}; i < NUM_WALLETS; ++i) threads.emplace_back(settings_function, i);
for (auto& t : threads) t.join();
- auto wallets = context.chain->getRwSetting("wallet");
+ auto wallets = chain->getRwSetting("wallet");
BOOST_CHECK_EQUAL(wallets.getValues().size(), num_expected_wallets);
};
// Add NUM_WALLETS wallets concurrently, ensure we end up with NUM_WALLETS stored.
- check_concurrent_wallet([&context](int i) {
- Assert(AddWalletSetting(*context.chain, strprintf("wallet_%d", i)));
+ check_concurrent_wallet([&chain](int i) {
+ Assert(AddWalletSetting(*chain, strprintf("wallet_%d", i)));
},
/*num_expected_wallets=*/NUM_WALLETS);
// Remove NUM_WALLETS wallets concurrently, ensure we end up with 0 wallets.
- check_concurrent_wallet([&context](int i) {
- Assert(RemoveWalletSetting(*context.chain, strprintf("wallet_%d", i)));
+ check_concurrent_wallet([&chain](int i) {
+ Assert(RemoveWalletSetting(*chain, strprintf("wallet_%d", i)));
},
/*num_expected_wallets=*/0);
}
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 83e96adf07..de565102cc 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -5,7 +5,7 @@
#include <wallet/wallet.h>
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <addresstype.h>
#include <blockfilter.h>
#include <chain.h>
@@ -3410,6 +3410,14 @@ void CWallet::postInitProcess()
bool CWallet::BackupWallet(const std::string& strDest) const
{
+ if (m_chain) {
+ CBlockLocator loc;
+ WITH_LOCK(cs_wallet, chain().findBlock(m_last_block_processed, FoundBlock().locator(loc)));
+ if (!loc.IsNull()) {
+ WalletBatch batch(GetDatabase());
+ batch.WriteBestBlock(loc);
+ }
+ }
return GetDatabase().Backup(strDest);
}
@@ -4390,6 +4398,11 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
return util::Error{_("Error: This wallet is already a descriptor wallet")};
}
+ // Flush chain state before unloading wallet
+ CBlockLocator locator;
+ WITH_LOCK(wallet->cs_wallet, context.chain->findBlock(wallet->GetLastBlockHash(), FoundBlock().locator(locator)));
+ if (!locator.IsNull()) wallet->chainStateFlushed(ChainstateRole::NORMAL, locator);
+
if (!RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings)) {
return util::Error{_("Unable to unload the wallet before migrating")};
}
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 485eed11fa..d3a7208b15 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -927,9 +927,9 @@ public:
/** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
template <typename... Params>
- void WalletLogPrintf(const char* fmt, Params... parameters) const
+ void WalletLogPrintf(util::ConstevalFormatString<sizeof...(Params)> wallet_fmt, const Params&... params) const
{
- LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...);
+ LogInfo("%s %s", GetDisplayName(), tfm::format(wallet_fmt, params...));
};
/** Upgrade the wallet */
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 47b84f7e6a..597a4ef9a4 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -3,7 +3,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <wallet/walletdb.h>
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index 10785ad354..b78985264a 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
#include <wallet/wallettool.h>
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 9fd4e6e84e..3a5998697d 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -27,7 +27,7 @@ function(create_test_config)
set_configure_variable(ENABLE_EXTERNAL_SIGNER ENABLE_EXTERNAL_SIGNER)
set_configure_variable(WITH_USDT ENABLE_USDT_TRACEPOINTS)
- configure_file(config.ini.in config.ini @ONLY)
+ configure_file(config.ini.in config.ini USE_SOURCE_PERMISSIONS @ONLY)
endfunction()
create_test_config()
diff --git a/test/functional/README.md b/test/functional/README.md
index a4994f2e7c..a34bf1827c 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -10,7 +10,8 @@ that file and modify to fit your needs.
#### Coverage
-Running `test/functional/test_runner.py` with the `--coverage` argument tracks which RPCs are
+Assuming the build directory is `build`,
+running `build/test/functional/test_runner.py` with the `--coverage` argument tracks which RPCs are
called by the tests and prints a report of uncovered RPCs in the summary. This
can be used (along with the `--extended` argument) to find out which RPCs we
don't have test cases for.
diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py
index a212704311..2995ece42f 100755
--- a/test/functional/feature_assumeutxo.py
+++ b/test/functional/feature_assumeutxo.py
@@ -9,6 +9,7 @@ to a hash that has been compiled into bitcoind.
The assumeutxo value generated and used here is committed to in
`CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`.
"""
+import time
from shutil import rmtree
from dataclasses import dataclass
@@ -16,12 +17,22 @@ from test_framework.blocktools import (
create_block,
create_coinbase
)
-from test_framework.messages import tx_from_hex
+from test_framework.messages import (
+ CBlockHeader,
+ from_hex,
+ msg_headers,
+ tx_from_hex
+)
+from test_framework.p2p import (
+ P2PInterface,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_raises_rpc_error,
+ sha256sum_file,
+ try_rpc,
)
from test_framework.wallet import (
getnewdestination,
@@ -247,6 +258,74 @@ class AssumeutxoTest(BitcoinTestFramework):
node1.submitheader(main_block1)
node1.submitheader(main_block2)
+ def test_sync_from_assumeutxo_node(self, snapshot):
+ """
+ This test verifies that:
+ 1. An IBD node can sync headers from an AssumeUTXO node at any time.
+ 2. IBD nodes do not request historical blocks from AssumeUTXO nodes while they are syncing the background-chain.
+ 3. The assumeUTXO node dynamically adjusts the network services it offers according to its state.
+ 4. IBD nodes can fully sync from AssumeUTXO nodes after they finish the background-chain sync.
+ """
+ self.log.info("Testing IBD-sync from assumeUTXO node")
+ # Node2 starts clean and loads the snapshot.
+ # Node3 starts clean and seeks to sync-up from snapshot_node.
+ miner = self.nodes[0]
+ snapshot_node = self.nodes[2]
+ ibd_node = self.nodes[3]
+
+ # Start test fresh by cleaning up node directories
+ for node in (snapshot_node, ibd_node):
+ self.stop_node(node.index)
+ rmtree(node.chain_path)
+ self.start_node(node.index, extra_args=self.extra_args[node.index])
+
+ # Sync-up headers chain on snapshot_node to load snapshot
+ headers_provider_conn = snapshot_node.add_p2p_connection(P2PInterface())
+ headers_provider_conn.wait_for_getheaders()
+ msg = msg_headers()
+ for block_num in range(1, miner.getblockcount()+1):
+ msg.headers.append(from_hex(CBlockHeader(), miner.getblockheader(miner.getblockhash(block_num), verbose=False)))
+ headers_provider_conn.send_message(msg)
+
+ # Ensure headers arrived
+ default_value = {'status': ''} # No status
+ headers_tip_hash = miner.getbestblockhash()
+ self.wait_until(lambda: next(filter(lambda x: x['hash'] == headers_tip_hash, snapshot_node.getchaintips()), default_value)['status'] == "headers-only")
+ snapshot_node.disconnect_p2ps()
+
+ # Load snapshot
+ snapshot_node.loadtxoutset(snapshot['path'])
+
+ # Connect nodes and verify the ibd_node can sync-up the headers-chain from the snapshot_node
+ self.connect_nodes(ibd_node.index, snapshot_node.index)
+ snapshot_block_hash = snapshot['base_hash']
+ self.wait_until(lambda: next(filter(lambda x: x['hash'] == snapshot_block_hash, ibd_node.getchaintips()), default_value)['status'] == "headers-only")
+
+ # Once the headers-chain is synced, the ibd_node must avoid requesting historical blocks from the snapshot_node.
+ # If it does request such blocks, the snapshot_node will ignore requests it cannot fulfill, causing the ibd_node
+ # to stall. This stall could last for up to 10 min, ultimately resulting in an abrupt disconnection due to the
+ # ibd_node's perceived unresponsiveness.
+ time.sleep(3) # Sleep here because we can't detect when a node avoids requesting blocks from other peer.
+ assert_equal(len(ibd_node.getpeerinfo()[0]['inflight']), 0)
+
+ # Now disconnect nodes and finish background chain sync
+ self.disconnect_nodes(ibd_node.index, snapshot_node.index)
+ self.connect_nodes(snapshot_node.index, miner.index)
+ self.sync_blocks(nodes=(miner, snapshot_node))
+ # Check the base snapshot block was stored and ensure node signals full-node service support
+ self.wait_until(lambda: not try_rpc(-1, "Block not available (not fully downloaded)", snapshot_node.getblock, snapshot_block_hash))
+ self.wait_until(lambda: 'NETWORK' in snapshot_node.getnetworkinfo()['localservicesnames'])
+
+ # Now that the snapshot_node is synced, verify the ibd_node can sync from it
+ self.connect_nodes(snapshot_node.index, ibd_node.index)
+ assert 'NETWORK' in ibd_node.getpeerinfo()[0]['servicesnames']
+ self.sync_blocks(nodes=(ibd_node, snapshot_node))
+
+ def assert_only_network_limited_service(self, node):
+ node_services = node.getnetworkinfo()['localservicesnames']
+ assert 'NETWORK' not in node_services
+ assert 'NETWORK_LIMITED' in node_services
+
def run_test(self):
"""
Bring up two (disconnected) nodes, mine some new blocks on the first,
@@ -295,7 +374,7 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n1.getblockcount(), START_HEIGHT)
self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}")
- dump_output = n0.dumptxoutset('utxos.dat')
+ dump_output = n0.dumptxoutset('utxos.dat', "latest")
self.log.info("Test loading snapshot when the node tip is on the same block as the snapshot")
assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT)
@@ -320,12 +399,16 @@ class AssumeutxoTest(BitcoinTestFramework):
for n in self.nodes:
assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT)
- assert_equal(
- dump_output['txoutset_hash'],
- "a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27")
- assert_equal(dump_output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx)
assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
+ def check_dump_output(output):
+ assert_equal(
+ output['txoutset_hash'],
+ "a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27")
+ assert_equal(output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx)
+
+ check_dump_output(dump_output)
+
# Mine more blocks on top of the snapshot that n1 hasn't yet seen. This
# will allow us to test n1's sync-to-tip on top of a snapshot.
self.generate(n0, nblocks=100, sync_fun=self.no_op)
@@ -335,6 +418,39 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
+ self.log.info(f"Check that dumptxoutset works for past block heights")
+ # rollback defaults to the snapshot base height
+ dump_output2 = n0.dumptxoutset('utxos2.dat', "rollback")
+ check_dump_output(dump_output2)
+ assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output2['path']))
+
+ # Rollback with specific height
+ dump_output3 = n0.dumptxoutset('utxos3.dat', rollback=SNAPSHOT_BASE_HEIGHT)
+ check_dump_output(dump_output3)
+ assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output3['path']))
+
+ # Specified height that is not a snapshot height
+ prev_snap_height = SNAPSHOT_BASE_HEIGHT - 1
+ dump_output4 = n0.dumptxoutset(path='utxos4.dat', rollback=prev_snap_height)
+ assert_equal(
+ dump_output4['txoutset_hash'],
+ "8a1db0d6e958ce0d7c963bc6fc91ead596c027129bacec68acc40351037b09d7")
+ assert sha256sum_file(dump_output['path']) != sha256sum_file(dump_output4['path'])
+
+ # Use a hash instead of a height
+ prev_snap_hash = n0.getblockhash(prev_snap_height)
+ dump_output5 = n0.dumptxoutset('utxos5.dat', rollback=prev_snap_hash)
+ assert_equal(sha256sum_file(dump_output4['path']), sha256sum_file(dump_output5['path']))
+
+ # TODO: This is a hack to set m_best_header to the correct value after
+ # dumptxoutset/reconsiderblock. Otherwise the wrong error messages are
+ # returned in following tests. It can be removed once this bug is
+ # fixed. See also https://github.com/bitcoin/bitcoin/issues/26245
+ self.restart_node(0, ["-reindex"])
+
+ # Ensure n0 is back at the tip
+ assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
+
self.test_snapshot_with_less_work(dump_output['path'])
self.test_invalid_mempool_state(dump_output['path'])
self.test_invalid_snapshot_scenarios(dump_output['path'])
@@ -343,6 +459,9 @@ class AssumeutxoTest(BitcoinTestFramework):
self.test_snapshot_block_invalidated(dump_output['path'])
self.test_snapshot_not_on_most_work_chain(dump_output['path'])
+ # Prune-node sanity check
+ assert 'NETWORK' not in n1.getnetworkinfo()['localservicesnames']
+
self.log.info(f"Loading snapshot into second node from {dump_output['path']}")
# This node's tip is on an ancestor block of the snapshot, which should
# be the normal case
@@ -350,6 +469,10 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
+ self.log.info("Confirm that local services remain unchanged")
+ # Since n1 is a pruned node, the 'NETWORK' service flag must always be unset.
+ self.assert_only_network_limited_service(n1)
+
self.log.info("Check that UTXO-querying RPCs operate on snapshot chainstate")
snapshot_hash = loaded['tip_hash']
snapshot_num_coins = loaded['coins_loaded']
@@ -362,7 +485,7 @@ class AssumeutxoTest(BitcoinTestFramework):
# find coinbase output at snapshot height on node0 and scan for it on node1,
# where the block is not available, but the snapshot was loaded successfully
coinbase_tx = n0.getblock(snapshot_hash, verbosity=2)['tx'][0]
- assert_raises_rpc_error(-1, "Block not found on disk", n1.getblock, snapshot_hash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, snapshot_hash)
coinbase_output_descriptor = coinbase_tx['vout'][0]['scriptPubKey']['desc']
scan_result = n1.scantxoutset('start', [coinbase_output_descriptor])
assert_equal(scan_result['success'], True)
@@ -434,7 +557,7 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool")
# spend the coinbase output of the first block that is not available on node1
spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1)
- assert_raises_rpc_error(-1, "Block not found on disk", n1.getblock, spend_coin_blockhash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, spend_coin_blockhash)
prev_tx = n0.getblock(spend_coin_blockhash, 3)['tx'][0]
prevout = {"txid": prev_tx['txid'], "vout": 0, "scriptPubKey": prev_tx['vout'][0]['scriptPubKey']['hex']}
privkey = n0.get_deterministic_priv_key().key
@@ -453,6 +576,9 @@ class AssumeutxoTest(BitcoinTestFramework):
self.restart_node(1, extra_args=[
f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]])
+ # Upon restart during snapshot tip sync, the node must remain in 'limited' mode.
+ self.assert_only_network_limited_service(n1)
+
# Finally connect the nodes and let them sync.
#
# Set `wait_for_connect=False` to avoid a race between performing connection
@@ -469,6 +595,9 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Restarted node before snapshot validation completed, reloading...")
self.restart_node(1, extra_args=self.extra_args[1])
+ # Upon restart, the node must remain in 'limited' mode
+ self.assert_only_network_limited_service(n1)
+
# Send snapshot block to n1 out of order. This makes the test less
# realistic because normally the snapshot block is one of the last
# blocks downloaded, but its useful to test because it triggers more
@@ -487,6 +616,10 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Ensuring background validation completes")
self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1)
+ # Since n1 is a pruned node, it will not signal NODE_NETWORK after
+ # completing the background sync.
+ self.assert_only_network_limited_service(n1)
+
# Ensure indexes have synced.
completed_idx_state = {
'basic block filter index': COMPLETE_IDX,
@@ -517,12 +650,18 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("-- Testing all indexes + reindex")
assert_equal(n2.getblockcount(), START_HEIGHT)
+ assert 'NETWORK' in n2.getnetworkinfo()['localservicesnames'] # sanity check
self.log.info(f"Loading snapshot into third node from {dump_output['path']}")
loaded = n2.loadtxoutset(dump_output['path'])
assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)
assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)
+ # Even though n2 is a full node, it will unset the 'NETWORK' service flag during snapshot loading.
+ # This indicates other peers that the node will temporarily not provide historical blocks.
+ self.log.info("Check node2 updated the local services during snapshot load")
+ self.assert_only_network_limited_service(n2)
+
for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']:
self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate")
self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]])
@@ -546,6 +685,11 @@ class AssumeutxoTest(BitcoinTestFramework):
msg = "Unable to load UTXO snapshot: Can't activate a snapshot-based chainstate more than once"
assert_raises_rpc_error(-32603, msg, n2.loadtxoutset, dump_output['path'])
+ # Upon restart, the node must stay in 'limited' mode until the background
+ # chain sync completes.
+ self.restart_node(2, extra_args=self.extra_args[2])
+ self.assert_only_network_limited_service(n2)
+
self.connect_nodes(0, 2)
self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)
self.sync_blocks(nodes=(n0, n2))
@@ -553,6 +697,9 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Ensuring background validation completes")
self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)
+ # Once background chain sync completes, the full node must start offering historical blocks again.
+ self.wait_until(lambda: {'NETWORK', 'NETWORK_LIMITED'}.issubset(n2.getnetworkinfo()['localservicesnames']))
+
completed_idx_state = {
'basic block filter index': COMPLETE_IDX,
'coinstatsindex': COMPLETE_IDX,
@@ -587,6 +734,9 @@ class AssumeutxoTest(BitcoinTestFramework):
self.test_snapshot_in_a_divergent_chain(dump_output['path'])
+ # The following test cleans node2 and node3 chain directories.
+ self.test_sync_from_assumeutxo_node(snapshot=dump_output)
+
@dataclass
class Block:
hash: str
diff --git a/test/functional/feature_blocksxor.py b/test/functional/feature_blocksxor.py
index 7698a66ec4..9824bf9715 100755
--- a/test/functional/feature_blocksxor.py
+++ b/test/functional/feature_blocksxor.py
@@ -31,7 +31,7 @@ class BlocksXORTest(BitcoinTestFramework):
node = self.nodes[0]
wallet = MiniWallet(node)
for _ in range(5):
- wallet.send_self_transfer(from_node=node, target_weight=80000)
+ wallet.send_self_transfer(from_node=node, target_vsize=20000)
self.generate(wallet, 1)
block_files = list(node.blocks_path.glob('blk[0-9][0-9][0-9][0-9][0-9].dat'))
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index bb20e2baa8..44c7edf962 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -153,6 +153,13 @@ class ConfArgsTest(BitcoinTestFramework):
expected_msg='Error: Error parsing command line arguments: Can not set -proxy with no value. Please specify value with -proxy=value.',
extra_args=['-proxy'],
)
+ # Provide a value different from 1 to the -wallet negated option
+ if self.is_wallet_compiled():
+ for value in [0, 'not_a_boolean']:
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg="Error: Invalid value detected for '-wallet' or '-nowallet'. '-wallet' requires a string value, while '-nowallet' accepts only '1' to disable all wallets",
+ extra_args=[f'-nowallet={value}'],
+ )
def test_log_buffer(self):
self.stop_node(0)
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index a3dcb7afda..974d8268a2 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from copy import deepcopy
-from decimal import Decimal
+from decimal import Decimal, ROUND_DOWN
import os
import random
import time
@@ -40,7 +40,7 @@ def small_txpuzzle_randfee(
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
- fee = min_fee - fee_increment + satoshi_round(rand_fee)
+ fee = min_fee - fee_increment + satoshi_round(rand_fee, rounding=ROUND_DOWN)
utxos_to_spend = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
@@ -398,6 +398,7 @@ class EstimateFeeTest(BitcoinTestFramework):
self.start_node(0)
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
+ self.sync_blocks()
assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"])
def broadcast_and_mine(self, broadcaster, miner, feerate, count):
diff --git a/test/functional/feature_framework_miniwallet.py b/test/functional/feature_framework_miniwallet.py
index d1aa24e7cd..f723f7f31e 100755
--- a/test/functional/feature_framework_miniwallet.py
+++ b/test/functional/feature_framework_miniwallet.py
@@ -9,7 +9,7 @@ import string
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
- assert_greater_than_or_equal,
+ assert_equal,
)
from test_framework.wallet import (
MiniWallet,
@@ -22,17 +22,15 @@ class FeatureFrameworkMiniWalletTest(BitcoinTestFramework):
self.num_nodes = 1
def test_tx_padding(self):
- """Verify that MiniWallet's transaction padding (`target_weight` parameter)
- works accurately enough (i.e. at most 3 WUs higher) with all modes."""
+ """Verify that MiniWallet's transaction padding (`target_vsize` parameter)
+ works accurately with all modes."""
for mode_name, wallet in self.wallets:
self.log.info(f"Test tx padding with MiniWallet mode {mode_name}...")
utxo = wallet.get_utxo(mark_as_spent=False)
- for target_weight in [1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 4000000,
- 989, 2001, 4337, 13371, 23219, 49153, 102035, 223419, 3999989]:
- tx = wallet.create_self_transfer(utxo_to_spend=utxo, target_weight=target_weight)["tx"]
- self.log.debug(f"-> target weight: {target_weight}, actual weight: {tx.get_weight()}")
- assert_greater_than_or_equal(tx.get_weight(), target_weight)
- assert_greater_than_or_equal(target_weight + 3, tx.get_weight())
+ for target_vsize in [250, 500, 1250, 2500, 5000, 12500, 25000, 50000, 1000000,
+ 248, 501, 1085, 3343, 5805, 12289, 25509, 55855, 999998]:
+ tx = wallet.create_self_transfer(utxo_to_spend=utxo, target_vsize=target_vsize)["tx"]
+ assert_equal(tx.get_vsize(), target_vsize)
def test_wallet_tagging(self):
"""Verify that tagged wallet instances are able to send funds."""
diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py
index 2189eac7dd..a7294944bf 100755
--- a/test/functional/feature_settings.py
+++ b/test/functional/feature_settings.py
@@ -13,11 +13,32 @@ from test_framework.util import assert_equal
class SettingsTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ self.add_wallet_options(parser)
+
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.wallet_names = []
+ def test_wallet_settings(self, settings_path):
+ if not self.is_wallet_compiled():
+ return
+
+ self.log.info("Testing wallet settings..")
+ node = self.nodes[0]
+ # Create wallet to use it during tests
+ self.start_node(0)
+ node.createwallet(wallet_name='w1')
+ self.stop_node(0)
+
+ # Verify wallet settings can only be strings. Either names or paths. Not booleans, nums nor anything else.
+ for wallets_data in [[10], [True], [[]], [{}], ["w1", 10], ["w1", False]]:
+ with settings_path.open("w") as fp:
+ json.dump({"wallet": wallets_data}, fp)
+ node.assert_start_raises_init_error(expected_msg="Error: Invalid value detected for '-wallet' or '-nowallet'. '-wallet' requires a string value, while '-nowallet' accepts only '1' to disable all wallets",
+ extra_args=[f'-settings={settings_path}'])
+
def run_test(self):
node, = self.nodes
settings = node.chain_path / "settings.json"
@@ -86,6 +107,8 @@ class SettingsTest(BitcoinTestFramework):
self.start_node(0, extra_args=[f"-settings={altsettings}"])
self.stop_node(0)
+ self.test_wallet_settings(settings)
+
if __name__ == '__main__':
SettingsTest(__file__).main()
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
index e7113f8335..7194c8ece4 100755
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -30,7 +30,12 @@ JSON_PARSING_ERROR = 'error: Error parsing JSON: foo'
BLOCKS_VALUE_OF_ZERO = 'error: the first argument (number of blocks to generate, default: 1) must be an integer value greater than zero'
TOO_MANY_ARGS = 'error: too many arguments (maximum 2 for nblocks and maxtries)'
WALLET_NOT_LOADED = 'Requested wallet does not exist or is not loaded'
-WALLET_NOT_SPECIFIED = 'Wallet file not specified'
+WALLET_NOT_SPECIFIED = (
+ "Multiple wallets are loaded. Please select which wallet to use by requesting the RPC "
+ "through the /wallet/<walletname> URI path. Or for the CLI, specify the \"-rpcwallet=<walletname>\" "
+ "option before the command (run \"bitcoin-cli -h\" for help or \"bitcoin-cli listwallets\" to see "
+ "which wallets are currently loaded)."
+)
def cli_get_info_string_to_dict(cli_get_info_string):
@@ -331,6 +336,10 @@ class TestBitcoinCli(BitcoinTestFramework):
n4 = 10
blocks = self.nodes[0].getblockcount()
+ self.log.info('Test -generate -rpcwallet=<filename> raise RPC error')
+ wallet2_path = f'-rpcwallet={self.nodes[0].wallets_path / wallets[2] / self.wallet_data_filename}'
+ assert_raises_rpc_error(-18, WALLET_NOT_LOADED, self.nodes[0].cli(wallet2_path, '-generate').echo)
+
self.log.info('Test -generate -rpcwallet with no args')
generate = self.nodes[0].cli(rpcwallet2, '-generate').send_cli()
assert_equal(set(generate.keys()), {'address', 'blocks'})
@@ -381,6 +390,9 @@ class TestBitcoinCli(BitcoinTestFramework):
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcwait', '-rpcwaittimeout=5').echo)
assert_greater_than_or_equal(time.time(), start_time + 5)
+ self.log.info("Test that only one of -addrinfo, -generate, -getinfo, -netinfo may be specified at a time")
+ assert_raises_process_error(1, "Only one of -getinfo, -netinfo may be specified", self.nodes[0].cli('-getinfo', '-netinfo').send_cli)
+
if __name__ == '__main__':
TestBitcoinCli(__file__).main()
diff --git a/test/functional/interface_usdt_coinselection.py b/test/functional/interface_usdt_coinselection.py
index dc40986a75..f684848aed 100755
--- a/test/functional/interface_usdt_coinselection.py
+++ b/test/functional/interface_usdt_coinselection.py
@@ -181,7 +181,7 @@ class CoinSelectionTracepointTest(BitcoinTestFramework):
# 5. aps_create_tx_internal (type 4)
wallet.sendtoaddress(wallet.getnewaddress(), 10)
events = self.get_tracepoints([1, 2, 3, 1, 4])
- success, use_aps, algo, waste, change_pos = self.determine_selection_from_usdt(events)
+ success, use_aps, _algo, _waste, change_pos = self.determine_selection_from_usdt(events)
assert_equal(success, True)
assert_greater_than(change_pos, -1)
@@ -190,7 +190,7 @@ class CoinSelectionTracepointTest(BitcoinTestFramework):
# 1. normal_create_tx_internal (type 2)
assert_raises_rpc_error(-6, "Insufficient funds", wallet.sendtoaddress, wallet.getnewaddress(), 102 * 50)
events = self.get_tracepoints([2])
- success, use_aps, algo, waste, change_pos = self.determine_selection_from_usdt(events)
+ success, use_aps, _algo, _waste, change_pos = self.determine_selection_from_usdt(events)
assert_equal(success, False)
self.log.info("Explicitly enabling APS results in 2 tracepoints")
@@ -200,7 +200,7 @@ class CoinSelectionTracepointTest(BitcoinTestFramework):
wallet.setwalletflag("avoid_reuse")
wallet.sendtoaddress(address=wallet.getnewaddress(), amount=10, avoid_reuse=True)
events = self.get_tracepoints([1, 2])
- success, use_aps, algo, waste, change_pos = self.determine_selection_from_usdt(events)
+ success, use_aps, _algo, _waste, change_pos = self.determine_selection_from_usdt(events)
assert_equal(success, True)
assert_equal(use_aps, None)
@@ -213,7 +213,7 @@ class CoinSelectionTracepointTest(BitcoinTestFramework):
# 5. aps_create_tx_internal (type 4)
wallet.sendtoaddress(address=wallet.getnewaddress(), amount=wallet.getbalance(), subtractfeefromamount=True, avoid_reuse=False)
events = self.get_tracepoints([1, 2, 3, 1, 4])
- success, use_aps, algo, waste, change_pos = self.determine_selection_from_usdt(events)
+ success, use_aps, _algo, _waste, change_pos = self.determine_selection_from_usdt(events)
assert_equal(success, True)
assert_equal(change_pos, -1)
@@ -223,7 +223,7 @@ class CoinSelectionTracepointTest(BitcoinTestFramework):
# 2. normal_create_tx_internal (type 2)
wallet.sendtoaddress(address=wallet.getnewaddress(), amount=wallet.getbalance(), subtractfeefromamount=True)
events = self.get_tracepoints([1, 2])
- success, use_aps, algo, waste, change_pos = self.determine_selection_from_usdt(events)
+ success, use_aps, _algo, _waste, change_pos = self.determine_selection_from_usdt(events)
assert_equal(success, True)
assert_equal(change_pos, -1)
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index 626928a49a..a29c103c3f 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -55,12 +55,12 @@ class MempoolLimitTest(BitcoinTestFramework):
self.generate(node, 1)
# tx_A needs to be RBF'd, set minfee at set size
- A_weight = 1000
+ A_vsize = 250
mempoolmin_feerate = node.getmempoolinfo()["mempoolminfee"]
tx_A = self.wallet.send_self_transfer(
from_node=node,
fee_rate=mempoolmin_feerate,
- target_weight=A_weight,
+ target_vsize=A_vsize,
utxo_to_spend=rbf_utxo,
confirmed_only=True
)
@@ -68,15 +68,15 @@ class MempoolLimitTest(BitcoinTestFramework):
# RBF's tx_A, is not yet submitted
tx_B = self.wallet.create_self_transfer(
fee=tx_A["fee"] * 4,
- target_weight=A_weight,
+ target_vsize=A_vsize,
utxo_to_spend=rbf_utxo,
confirmed_only=True
)
# Spends tx_B's output, too big for cpfp carveout (because that would also increase the descendant limit by 1)
- non_cpfp_carveout_weight = 40001 # EXTRA_DESCENDANT_TX_SIZE_LIMIT + 1
+ non_cpfp_carveout_vsize = 10001 # EXTRA_DESCENDANT_TX_SIZE_LIMIT + 1
tx_C = self.wallet.create_self_transfer(
- target_weight=non_cpfp_carveout_weight,
+ target_vsize=non_cpfp_carveout_vsize,
fee_rate=mempoolmin_feerate,
utxo_to_spend=tx_B["new_utxo"],
confirmed_only=True
@@ -103,14 +103,14 @@ class MempoolLimitTest(BitcoinTestFramework):
# UTXOs to be spent by the ultimate child transaction
parent_utxos = []
- evicted_weight = 8000
+ evicted_vsize = 2000
# Mempool transaction which is evicted due to being at the "bottom" of the mempool when the
# mempool overflows and evicts by descendant score. It's important that the eviction doesn't
# happen in the middle of package evaluation, as it can invalidate the coins cache.
mempool_evicted_tx = self.wallet.send_self_transfer(
from_node=node,
fee_rate=mempoolmin_feerate,
- target_weight=evicted_weight,
+ target_vsize=evicted_vsize,
confirmed_only=True
)
# Already in mempool when package is submitted.
@@ -132,14 +132,16 @@ class MempoolLimitTest(BitcoinTestFramework):
# Series of parents that don't need CPFP and are submitted individually. Each one is large and
# high feerate, which means they should trigger eviction but not be evicted.
- parent_weight = 100000
+ parent_vsize = 25000
num_big_parents = 3
- assert_greater_than(parent_weight * num_big_parents, current_info["maxmempool"] - current_info["bytes"])
+ # Need to be large enough to trigger eviction
+ # (note that the mempool usage of a tx is about three times its vsize)
+ assert_greater_than(parent_vsize * num_big_parents * 3, current_info["maxmempool"] - current_info["bytes"])
parent_feerate = 100 * mempoolmin_feerate
big_parent_txids = []
for i in range(num_big_parents):
- parent = self.wallet.create_self_transfer(fee_rate=parent_feerate, target_weight=parent_weight, confirmed_only=True)
+ parent = self.wallet.create_self_transfer(fee_rate=parent_feerate, target_vsize=parent_vsize, confirmed_only=True)
parent_utxos.append(parent["new_utxo"])
package_hex.append(parent["hex"])
big_parent_txids.append(parent["txid"])
@@ -311,8 +313,9 @@ class MempoolLimitTest(BitcoinTestFramework):
entry = node.getmempoolentry(txid)
worst_feerate_btcvb = min(worst_feerate_btcvb, entry["fees"]["descendant"] / entry["descendantsize"])
# Needs to be large enough to trigger eviction
- target_weight_each = 200000
- assert_greater_than(target_weight_each * 2, node.getmempoolinfo()["maxmempool"] - node.getmempoolinfo()["bytes"])
+ # (note that the mempool usage of a tx is about three times its vsize)
+ target_vsize_each = 50000
+ assert_greater_than(target_vsize_each * 2 * 3, node.getmempoolinfo()["maxmempool"] - node.getmempoolinfo()["bytes"])
# Should be a true CPFP: parent's feerate is just below mempool min feerate
parent_feerate = mempoolmin_feerate - Decimal("0.000001") # 0.1 sats/vbyte below min feerate
# Parent + child is above mempool minimum feerate
@@ -320,8 +323,8 @@ class MempoolLimitTest(BitcoinTestFramework):
# However, when eviction is triggered, these transactions should be at the bottom.
# This assertion assumes parent and child are the same size.
miniwallet.rescan_utxos()
- tx_parent_just_below = miniwallet.create_self_transfer(fee_rate=parent_feerate, target_weight=target_weight_each)
- tx_child_just_above = miniwallet.create_self_transfer(utxo_to_spend=tx_parent_just_below["new_utxo"], fee_rate=child_feerate, target_weight=target_weight_each)
+ tx_parent_just_below = miniwallet.create_self_transfer(fee_rate=parent_feerate, target_vsize=target_vsize_each)
+ tx_child_just_above = miniwallet.create_self_transfer(utxo_to_spend=tx_parent_just_below["new_utxo"], fee_rate=child_feerate, target_vsize=target_vsize_each)
# This package ranks below the lowest descendant package in the mempool
package_fee = tx_parent_just_below["fee"] + tx_child_just_above["fee"]
package_vsize = tx_parent_just_below["tx"].get_vsize() + tx_child_just_above["tx"].get_vsize()
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
index 6e26a684e2..3290ff43c4 100755
--- a/test/functional/mempool_package_limits.py
+++ b/test/functional/mempool_package_limits.py
@@ -4,9 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for limiting mempool and package ancestors/descendants."""
from test_framework.blocktools import COINBASE_MATURITY
-from test_framework.messages import (
- WITNESS_SCALE_FACTOR,
-)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -290,19 +287,18 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
parent_utxos = []
target_vsize = 30_000
high_fee = 10 * target_vsize # 10 sats/vB
- target_weight = target_vsize * WITNESS_SCALE_FACTOR
self.log.info("Check that in-mempool and in-package ancestor size limits are calculated properly in packages")
# Mempool transactions A and B
for _ in range(2):
- bulked_tx = self.wallet.create_self_transfer(target_weight=target_weight)
+ bulked_tx = self.wallet.create_self_transfer(target_vsize=target_vsize)
self.wallet.sendrawtransaction(from_node=node, tx_hex=bulked_tx["hex"])
parent_utxos.append(bulked_tx["new_utxo"])
# Package transaction C
- pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_utxos, fee_per_output=high_fee, target_weight=target_weight)
+ pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_utxos, fee_per_output=high_fee, target_vsize=target_vsize)
# Package transaction D
- pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0], target_weight=target_weight)
+ pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0], target_vsize=target_vsize)
assert_equal(2, node.getmempoolinfo()["size"])
return [pc_tx["hex"], pd_tx["hex"]]
@@ -321,20 +317,19 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
node = self.nodes[0]
target_vsize = 21_000
high_fee = 10 * target_vsize # 10 sats/vB
- target_weight = target_vsize * WITNESS_SCALE_FACTOR
self.log.info("Check that in-mempool and in-package descendant sizes are calculated properly in packages")
# Top parent in mempool, Ma
- ma_tx = self.wallet.create_self_transfer_multi(num_outputs=2, fee_per_output=high_fee // 2, target_weight=target_weight)
+ ma_tx = self.wallet.create_self_transfer_multi(num_outputs=2, fee_per_output=high_fee // 2, target_vsize=target_vsize)
self.wallet.sendrawtransaction(from_node=node, tx_hex=ma_tx["hex"])
package_hex = []
for j in range(2): # Two legs (left and right)
# Mempool transaction (Mb and Mc)
- mempool_tx = self.wallet.create_self_transfer(utxo_to_spend=ma_tx["new_utxos"][j], target_weight=target_weight)
+ mempool_tx = self.wallet.create_self_transfer(utxo_to_spend=ma_tx["new_utxos"][j], target_vsize=target_vsize)
self.wallet.sendrawtransaction(from_node=node, tx_hex=mempool_tx["hex"])
# Package transaction (Pd and Pe)
- package_tx = self.wallet.create_self_transfer(utxo_to_spend=mempool_tx["new_utxo"], target_weight=target_weight)
+ package_tx = self.wallet.create_self_transfer(utxo_to_spend=mempool_tx["new_utxo"], target_vsize=target_vsize)
package_hex.append(package_tx["hex"])
assert_equal(3, node.getmempoolinfo()["size"])
diff --git a/test/functional/mempool_package_rbf.py b/test/functional/mempool_package_rbf.py
index 9b4269f0a0..a5b8fa5f87 100755
--- a/test/functional/mempool_package_rbf.py
+++ b/test/functional/mempool_package_rbf.py
@@ -189,7 +189,7 @@ class PackageRBFTest(BitcoinTestFramework):
package_hex4, package_txns4 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE)
node.submitpackage(package_hex4)
self.assert_mempool_contents(expected=package_txns4)
- package_hex5, package_txns5 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE)
+ package_hex5, _package_txns5 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE)
pkg_results5 = node.submitpackage(package_hex5)
assert 'package RBF failed: package feerate is less than or equal to parent feerate' in pkg_results5["package_msg"]
self.assert_mempool_contents(expected=package_txns4)
@@ -336,16 +336,16 @@ class PackageRBFTest(BitcoinTestFramework):
self.assert_mempool_contents(expected=expected_txns)
# Now make conflicting packages for each coin
- package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex1, _package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex1)
assert_equal(f"package RBF failed: {parent_result['tx'].rehash()} has 2 descendants, max 1 allowed", package_result["package_msg"])
- package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex2, _package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex2)
assert_equal(f"package RBF failed: {child_result['tx'].rehash()} has both ancestor and descendant, exceeding cluster limit of 2", package_result["package_msg"])
- package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex3, _package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex3)
assert_equal(f"package RBF failed: {grandchild_result['tx'].rehash()} has 2 ancestors, max 1 allowed", package_result["package_msg"])
@@ -389,15 +389,15 @@ class PackageRBFTest(BitcoinTestFramework):
self.assert_mempool_contents(expected=expected_txns)
# Now make conflicting packages for each coin
- package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex1, _package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex1)
assert_equal(f"package RBF failed: {parent1_result['tx'].rehash()} is not the only parent of child {child_result['tx'].rehash()}", package_result["package_msg"])
- package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex2, _package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex2)
assert_equal(f"package RBF failed: {parent2_result['tx'].rehash()} is not the only parent of child {child_result['tx'].rehash()}", package_result["package_msg"])
- package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex3, _package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex3)
assert_equal(f"package RBF failed: {child_result['tx'].rehash()} has 2 ancestors, max 1 allowed", package_result["package_msg"])
@@ -443,15 +443,15 @@ class PackageRBFTest(BitcoinTestFramework):
self.assert_mempool_contents(expected=expected_txns)
# Now make conflicting packages for each coin
- package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex1, _package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex1)
assert_equal(f"package RBF failed: {parent_result['tx'].rehash()} has 2 descendants, max 1 allowed", package_result["package_msg"])
- package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex2, _package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex2)
assert_equal(f"package RBF failed: {child1_result['tx'].rehash()} is not the only child of parent {parent_result['tx'].rehash()}", package_result["package_msg"])
- package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex3, _package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
package_result = node.submitpackage(package_hex3)
assert_equal(f"package RBF failed: {child2_result['tx'].rehash()} is not the only child of parent {parent_result['tx'].rehash()}", package_result["package_msg"])
@@ -519,7 +519,7 @@ class PackageRBFTest(BitcoinTestFramework):
# Package 2 feerate is below the feerate of directly conflicted parent, so it fails even though
# total fees are higher than the original package
- package_hex2, package_txns2 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001"), child_fee=DEFAULT_CHILD_FEE)
+ package_hex2, _package_txns2 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001"), child_fee=DEFAULT_CHILD_FEE)
pkg_results2 = node.submitpackage(package_hex2)
assert_equal(pkg_results2["package_msg"], 'package RBF failed: insufficient feerate: does not improve feerate diagram')
self.assert_mempool_contents(expected=package_txns1)
@@ -554,7 +554,7 @@ class PackageRBFTest(BitcoinTestFramework):
self.generate(node, 1)
def test_child_conflicts_parent_mempool_ancestor(self):
- fill_mempool(self, self.nodes[0])
+ fill_mempool(self, self.nodes[0], tx_sync_fun=self.no_op)
# Reset coins since we filled the mempool with current coins
self.coins = self.wallet.get_utxos(mark_as_spent=False, confirmed_only=True)
diff --git a/test/functional/mempool_sigoplimit.py b/test/functional/mempool_sigoplimit.py
index 4656176a75..47df0c614a 100755
--- a/test/functional/mempool_sigoplimit.py
+++ b/test/functional/mempool_sigoplimit.py
@@ -154,7 +154,7 @@ class BytesPerSigOpTest(BitcoinTestFramework):
return (tx_utxo, tx)
tx_parent_utxo, tx_parent = create_bare_multisig_tx()
- tx_child_utxo, tx_child = create_bare_multisig_tx(tx_parent_utxo)
+ _tx_child_utxo, tx_child = create_bare_multisig_tx(tx_parent_utxo)
# Separately, the parent tx is ok
parent_individual_testres = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex()])[0]
diff --git a/test/functional/mempool_truc.py b/test/functional/mempool_truc.py
index 28f3256ef1..54a258215d 100755
--- a/test/functional/mempool_truc.py
+++ b/test/functional/mempool_truc.py
@@ -6,7 +6,6 @@ from decimal import Decimal
from test_framework.messages import (
MAX_BIP125_RBF_SEQUENCE,
- WITNESS_SCALE_FACTOR,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -23,6 +22,7 @@ from test_framework.wallet import (
MAX_REPLACEMENT_CANDIDATES = 100
TRUC_MAX_VSIZE = 10000
+TRUC_CHILD_MAX_VSIZE = 1000
def cleanup(extra_args=None):
def decorator(func):
@@ -55,14 +55,14 @@ class MempoolTRUC(BitcoinTestFramework):
def test_truc_max_vsize(self):
node = self.nodes[0]
self.log.info("Test TRUC-specific maximum transaction vsize")
- tx_v3_heavy = self.wallet.create_self_transfer(target_weight=(TRUC_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=3)
+ tx_v3_heavy = self.wallet.create_self_transfer(target_vsize=TRUC_MAX_VSIZE + 1, version=3)
assert_greater_than_or_equal(tx_v3_heavy["tx"].get_vsize(), TRUC_MAX_VSIZE)
expected_error_heavy = f"TRUC-violation, version=3 tx {tx_v3_heavy['txid']} (wtxid={tx_v3_heavy['wtxid']}) is too big"
assert_raises_rpc_error(-26, expected_error_heavy, node.sendrawtransaction, tx_v3_heavy["hex"])
self.check_mempool([])
# Ensure we are hitting the TRUC-specific limit and not something else
- tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_weight=(TRUC_MAX_VSIZE + 1) * WITNESS_SCALE_FACTOR, version=2)
+ tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_vsize=TRUC_MAX_VSIZE + 1, version=2)
self.check_mempool([tx_v2_heavy["txid"]])
@cleanup(extra_args=["-datacarriersize=1000"])
@@ -73,10 +73,10 @@ class MempoolTRUC(BitcoinTestFramework):
self.check_mempool([tx_v3_parent_normal["txid"]])
tx_v3_child_heavy = self.wallet.create_self_transfer(
utxo_to_spend=tx_v3_parent_normal["new_utxo"],
- target_weight=4004,
+ target_vsize=TRUC_CHILD_MAX_VSIZE + 1,
version=3
)
- assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), 1000)
+ assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), TRUC_CHILD_MAX_VSIZE)
expected_error_child_heavy = f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big"
assert_raises_rpc_error(-26, expected_error_child_heavy, node.sendrawtransaction, tx_v3_child_heavy["hex"])
self.check_mempool([tx_v3_parent_normal["txid"]])
@@ -88,20 +88,21 @@ class MempoolTRUC(BitcoinTestFramework):
from_node=node,
fee_rate=DEFAULT_FEE,
utxo_to_spend=tx_v3_parent_normal["new_utxo"],
- target_weight=3987,
+ target_vsize=TRUC_CHILD_MAX_VSIZE - 3,
version=3
)
- assert_greater_than_or_equal(1000, tx_v3_child_almost_heavy["tx"].get_vsize())
+ assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_almost_heavy["tx"].get_vsize())
self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy["txid"]])
assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)
tx_v3_child_almost_heavy_rbf = self.wallet.send_self_transfer(
from_node=node,
fee_rate=DEFAULT_FEE * 2,
utxo_to_spend=tx_v3_parent_normal["new_utxo"],
- target_weight=3500,
+ target_vsize=875,
version=3
)
- assert_greater_than_or_equal(tx_v3_child_almost_heavy["tx"].get_vsize() + tx_v3_child_almost_heavy_rbf["tx"].get_vsize(), 1000)
+ assert_greater_than_or_equal(tx_v3_child_almost_heavy["tx"].get_vsize() + tx_v3_child_almost_heavy_rbf["tx"].get_vsize(),
+ TRUC_CHILD_MAX_VSIZE)
self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy_rbf["txid"]])
assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)
@@ -199,8 +200,8 @@ class MempoolTRUC(BitcoinTestFramework):
self.check_mempool([])
tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2)
tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3)
- tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_weight=5000, version=3)
- assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], 1000)
+ tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3)
+ assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE)
self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
node.invalidateblock(block[0])
self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
@@ -217,22 +218,22 @@ class MempoolTRUC(BitcoinTestFramework):
"""
node = self.nodes[0]
self.log.info("Test that a decreased limitdescendantsize also applies to TRUC child")
- parent_target_weight = 9990 * WITNESS_SCALE_FACTOR
- child_target_weight = 500 * WITNESS_SCALE_FACTOR
+ parent_target_vsize = 9990
+ child_target_vsize = 500
tx_v3_parent_large1 = self.wallet.send_self_transfer(
from_node=node,
- target_weight=parent_target_weight,
+ target_vsize=parent_target_vsize,
version=3
)
tx_v3_child_large1 = self.wallet.create_self_transfer(
utxo_to_spend=tx_v3_parent_large1["new_utxo"],
- target_weight=child_target_weight,
+ target_vsize=child_target_vsize,
version=3
)
# Parent and child are within v3 limits, but parent's 10kvB descendant limit is exceeded
assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large1["tx"].get_vsize())
- assert_greater_than_or_equal(1000, tx_v3_child_large1["tx"].get_vsize())
+ assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large1["tx"].get_vsize())
assert_greater_than(tx_v3_parent_large1["tx"].get_vsize() + tx_v3_child_large1["tx"].get_vsize(), 10000)
assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds descendant size limit for tx {tx_v3_parent_large1['txid']}", node.sendrawtransaction, tx_v3_child_large1["hex"])
@@ -244,18 +245,18 @@ class MempoolTRUC(BitcoinTestFramework):
self.restart_node(0, extra_args=["-limitancestorsize=10", "-datacarriersize=40000"])
tx_v3_parent_large2 = self.wallet.send_self_transfer(
from_node=node,
- target_weight=parent_target_weight,
+ target_vsize=parent_target_vsize,
version=3
)
tx_v3_child_large2 = self.wallet.create_self_transfer(
utxo_to_spend=tx_v3_parent_large2["new_utxo"],
- target_weight=child_target_weight,
+ target_vsize=child_target_vsize,
version=3
)
# Parent and child are within TRUC limits
assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize())
- assert_greater_than_or_equal(1000, tx_v3_child_large2["tx"].get_vsize())
+ assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large2["tx"].get_vsize())
assert_greater_than(tx_v3_parent_large2["tx"].get_vsize() + tx_v3_child_large2["tx"].get_vsize(), 10000)
assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])
@@ -267,12 +268,12 @@ class MempoolTRUC(BitcoinTestFramework):
node = self.nodes[0]
tx_v3_parent_normal = self.wallet.create_self_transfer(
fee_rate=0,
- target_weight=4004,
+ target_vsize=1001,
version=3
)
tx_v3_parent_2_normal = self.wallet.create_self_transfer(
fee_rate=0,
- target_weight=4004,
+ target_vsize=1001,
version=3
)
tx_v3_child_multiparent = self.wallet.create_self_transfer_multi(
@@ -282,7 +283,7 @@ class MempoolTRUC(BitcoinTestFramework):
)
tx_v3_child_heavy = self.wallet.create_self_transfer_multi(
utxos_to_spend=[tx_v3_parent_normal["new_utxo"]],
- target_weight=4004,
+ target_vsize=TRUC_CHILD_MAX_VSIZE + 1,
fee_per_output=10000,
version=3
)
@@ -294,7 +295,7 @@ class MempoolTRUC(BitcoinTestFramework):
self.check_mempool([])
result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_child_heavy["hex"]])
- # tx_v3_child_heavy is heavy based on weight, not sigops.
+ # tx_v3_child_heavy is heavy based on vsize, not sigops.
assert_equal(result['package_msg'], f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes")
self.check_mempool([])
@@ -416,7 +417,7 @@ class MempoolTRUC(BitcoinTestFramework):
node = self.nodes[0]
tx_v3_parent = self.wallet.create_self_transfer(
fee_rate=0,
- target_weight=4004,
+ target_vsize=1001,
version=3
)
tx_v2_child = self.wallet.create_self_transfer_multi(
diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py
index c3cdb3e0b3..cdc4e1691d 100755
--- a/test/functional/p2p_1p1c_network.py
+++ b/test/functional/p2p_1p1c_network.py
@@ -49,9 +49,6 @@ class PackageRelayTest(BitcoinTestFramework):
def raise_network_minfee(self):
fill_mempool(self, self.nodes[0])
- self.log.debug("Wait for the network to sync mempools")
- self.sync_mempools()
-
self.log.debug("Check that all nodes' mempool minimum feerates are above min relay feerate")
for node in self.nodes:
assert_equal(node.getmempoolinfo()['minrelaytxfee'], FEERATE_1SAT_VB)
@@ -107,7 +104,7 @@ class PackageRelayTest(BitcoinTestFramework):
# 3: 2-parent-1-child package. Both parents are above mempool min feerate. No package submission happens.
# We require packages to be child-with-unconfirmed-parents and only allow 1-parent-1-child packages.
- package_hex_3, parent_31, parent_32, child_3 = self.create_package_2p1c(self.wallet)
+ package_hex_3, parent_31, _parent_32, child_3 = self.create_package_2p1c(self.wallet)
# 4: parent + child package where the child spends 2 different outputs from the parent.
package_hex_4, parent_4, child_4 = self.create_package_2outs(self.wallet)
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index df6e6a2e28..7788be6adb 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -102,10 +102,10 @@ class NodeNetworkLimitedTest(BitcoinTestFramework):
tip_height = pruned_node.getblockcount()
limit_buffer = 2
# Prevent races by waiting for the tip to arrive first
- self.wait_until(lambda: not try_rpc(-1, "Block not found", full_node.getblock, pruned_node.getbestblockhash()))
+ self.wait_until(lambda: not try_rpc(-1, "Block not available (not fully downloaded)", full_node.getblock, pruned_node.getbestblockhash()))
for height in range(start_height_full_node + 1, tip_height + 1):
if height <= tip_height - (NODE_NETWORK_LIMITED_MIN_BLOCKS - limit_buffer):
- assert_raises_rpc_error(-1, "Block not found on disk", full_node.getblock, pruned_node.getblockhash(height))
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", full_node.getblock, pruned_node.getblockhash(height))
else:
full_node.getblock(pruned_node.getblockhash(height)) # just assert it does not throw an exception
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index c881dd6ff4..c37061c307 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -14,8 +14,10 @@ from test_framework.p2p import P2PDataStore
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
+ append_config,
assert_equal,
p2p_port,
+ tor_port,
)
from test_framework.wallet import MiniWallet
@@ -57,11 +59,14 @@ class P2PPermissionsTests(BitcoinTestFramework):
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.nodes[1].replace_in_config([("bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)])
+ # Explicitly bind the tor port to prevent collisions with the default tor port
+ append_config(self.nodes[1].datadir_path, [f"bind=127.0.0.1:{tor_port(self.nodes[1].index)}=onion"])
self.checkpermission(
["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay", "download"])
self.nodes[1].replace_in_config([("whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")])
+ self.nodes[1].replace_in_config([(f"bind=127.0.0.1:{tor_port(self.nodes[1].index)}=onion", "")])
self.checkpermission(
# legacy whitelistrelay should be ignored
diff --git a/test/functional/p2p_seednode.py b/test/functional/p2p_seednode.py
new file mode 100755
index 0000000000..6c510a6a0b
--- /dev/null
+++ b/test/functional/p2p_seednode.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# Copyright (c) 2019-2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+"""
+Test seednode interaction with the AddrMan
+"""
+import random
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+
+ADD_NEXT_SEEDNODE = 10
+
+
+class P2PSeedNodes(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.disable_autoconnect = False
+
+ def test_no_seednode(self):
+ # Check that if no seednode is provided, the node proceeds as usual (without waiting)
+ with self.nodes[0].assert_debug_log(expected_msgs=[], unexpected_msgs=["Empty addrman, adding seednode", f"Couldn't connect to peers from addrman after {ADD_NEXT_SEEDNODE} seconds. Adding seednode"], timeout=ADD_NEXT_SEEDNODE):
+ self.restart_node(0)
+
+ def test_seednode_empty_addrman(self):
+ seed_node = "0.0.0.1"
+ # Check that the seednode is added to m_addr_fetches on bootstrap on an empty addrman
+ with self.nodes[0].assert_debug_log(expected_msgs=[f"Empty addrman, adding seednode ({seed_node}) to addrfetch"], timeout=ADD_NEXT_SEEDNODE):
+ self.restart_node(0, extra_args=[f'-seednode={seed_node}'])
+
+ def test_seednode_addrman_unreachable_peers(self):
+ seed_node = "0.0.0.2"
+ node = self.nodes[0]
+ # Fill the addrman with unreachable nodes
+ for i in range(10):
+ ip = f"{random.randrange(128,169)}.{random.randrange(1,255)}.{random.randrange(1,255)}.{random.randrange(1,255)}"
+ port = 8333 + i
+ node.addpeeraddress(ip, port)
+
+ # Restart the node so seednode is processed again
+ with node.assert_debug_log(expected_msgs=[f"Couldn't connect to peers from addrman after {ADD_NEXT_SEEDNODE} seconds. Adding seednode ({seed_node}) to addrfetch"], unexpected_msgs=["Empty addrman, adding seednode"], timeout=ADD_NEXT_SEEDNODE * 1.5):
+ self.restart_node(0, extra_args=[f'-seednode={seed_node}'])
+ node.setmocktime(int(time.time()) + ADD_NEXT_SEEDNODE + 1)
+
+ def run_test(self):
+ self.test_no_seednode()
+ self.test_seednode_empty_addrman()
+ self.test_seednode_addrman_unreachable_peers()
+
+
+if __name__ == '__main__':
+ P2PSeedNodes(__file__).main()
+
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 11b4d9cc3b..c69d6ff405 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -156,9 +156,9 @@ class TxDownloadTest(BitcoinTestFramework):
# One of the peers is asked for the tx
peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
- peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ _peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
assert_equal(peer_fallback.tx_getdata_count, 0)
- self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to peer_expiry to expire
+ self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to _peer_expiry to expire
peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
self.restart_node(0) # reset mocktime
@@ -250,7 +250,7 @@ class TxDownloadTest(BitcoinTestFramework):
def test_rejects_filter_reset(self):
self.log.info('Check that rejected tx is not requested again')
node = self.nodes[0]
- fill_mempool(self, node)
+ fill_mempool(self, node, tx_sync_fun=self.no_op)
self.wallet.rescan_utxos()
mempoolminfee = node.getmempoolinfo()['mempoolminfee']
peer = node.add_p2p_connection(TestP2PConn())
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
index 835ecbf184..1430131a97 100755
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -119,7 +119,7 @@ class AcceptBlockTest(BitcoinTestFramework):
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
- assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
@@ -191,7 +191,7 @@ class AcceptBlockTest(BitcoinTestFramework):
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
- assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
@@ -230,7 +230,7 @@ class AcceptBlockTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
- assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain")
# 8. Create a chain which is invalid at a height longer than the
@@ -260,7 +260,7 @@ class AcceptBlockTest(BitcoinTestFramework):
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
- assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_and_ping(msg_block(block_290f))
diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py
index 8c76c1f5f5..69afd45b9a 100755
--- a/test/functional/rpc_bind.py
+++ b/test/functional/rpc_bind.py
@@ -45,6 +45,19 @@ class RPCBindTest(BitcoinTestFramework):
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
+ def run_invalid_bind_test(self, allow_ips, addresses):
+ '''
+ Attempt to start a node with requested rpcallowip and rpcbind
+ parameters, expecting that the node will fail.
+ '''
+ self.log.info(f'Invalid bind test for {addresses}')
+ base_args = ['-disablewallet', '-nolisten']
+ if allow_ips:
+ base_args += ['-rpcallowip=' + x for x in allow_ips]
+ init_error = 'Error: Invalid port specified in -rpcbind: '
+ for addr in addresses:
+ self.nodes[0].assert_start_raises_init_error(base_args + [f'-rpcbind={addr}'], init_error + f"'{addr}'")
+
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
@@ -84,6 +97,10 @@ class RPCBindTest(BitcoinTestFramework):
if not self.options.run_nonloopback:
self._run_loopback_tests()
+ if self.options.run_ipv4:
+ self.run_invalid_bind_test(['127.0.0.1'], ['127.0.0.1:notaport', '127.0.0.1:-18443', '127.0.0.1:0', '127.0.0.1:65536'])
+ if self.options.run_ipv6:
+ self.run_invalid_bind_test(['[::1]'], ['[::1]:notaport', '[::1]:-18443', '[::1]:0', '[::1]:65536'])
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 98147237b1..f02e6914ef 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -32,14 +32,16 @@ from test_framework.blocktools import (
TIME_GENESIS_BLOCK,
create_block,
create_coinbase,
+ create_tx_with_script,
)
from test_framework.messages import (
CBlockHeader,
+ COIN,
from_hex,
msg_block,
)
from test_framework.p2p import P2PInterface
-from test_framework.script import hash256
+from test_framework.script import hash256, OP_TRUE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -88,6 +90,7 @@ class BlockchainTest(BitcoinTestFramework):
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
+ self._test_waitforblock() # also tests waitfornewblock
self._test_waitforblockheight()
self._test_getblock()
self._test_getdeploymentinfo()
@@ -505,6 +508,38 @@ class BlockchainTest(BitcoinTestFramework):
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7)
+ def _test_waitforblock(self):
+ self.log.info("Test waitforblock and waitfornewblock")
+ node = self.nodes[0]
+
+ current_height = node.getblock(node.getbestblockhash())['height']
+ current_hash = node.getblock(node.getbestblockhash())['hash']
+
+ self.log.debug("Roll the chain back a few blocks and then reconsider it")
+ rollback_height = current_height - 100
+ rollback_hash = node.getblockhash(rollback_height)
+ rollback_header = node.getblockheader(rollback_hash)
+
+ node.invalidateblock(rollback_hash)
+ assert_equal(node.getblockcount(), rollback_height - 1)
+
+ self.log.debug("waitforblock should return the same block after its timeout")
+ assert_equal(node.waitforblock(blockhash=current_hash, timeout=1)['hash'], rollback_header['previousblockhash'])
+
+ node.reconsiderblock(rollback_hash)
+ # The chain has probably already been restored by the time reconsiderblock returns,
+ # but poll anyway.
+ self.wait_until(lambda: node.waitforblock(blockhash=current_hash, timeout=100)['hash'] == current_hash)
+
+ # roll back again
+ node.invalidateblock(rollback_hash)
+ assert_equal(node.getblockcount(), rollback_height - 1)
+
+ node.reconsiderblock(rollback_hash)
+ # The chain has probably already been restored by the time reconsiderblock returns,
+ # but poll anyway.
+ self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash)
+
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
@@ -556,12 +591,12 @@ class BlockchainTest(BitcoinTestFramework):
block = node.getblock(blockhash, verbosity)
assert_equal(blockhash, hash256(bytes.fromhex(block[:160]))[::-1].hex())
- def assert_fee_not_in_block(verbosity):
- block = node.getblock(blockhash, verbosity)
+ def assert_fee_not_in_block(hash, verbosity):
+ block = node.getblock(hash, verbosity)
assert 'fee' not in block['tx'][1]
- def assert_fee_in_block(verbosity):
- block = node.getblock(blockhash, verbosity)
+ def assert_fee_in_block(hash, verbosity):
+ block = node.getblock(hash, verbosity)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
@@ -580,8 +615,8 @@ class BlockchainTest(BitcoinTestFramework):
total_vout += vout["value"]
assert_equal(total_vin, total_vout + tx["fee"])
- def assert_vin_does_not_contain_prevout(verbosity):
- block = node.getblock(blockhash, verbosity)
+ def assert_vin_does_not_contain_prevout(hash, verbosity):
+ block = node.getblock(hash, verbosity)
tx = block["tx"][1]
if isinstance(tx, str):
# In verbosity level 1, only the transaction hashes are written
@@ -595,16 +630,16 @@ class BlockchainTest(BitcoinTestFramework):
assert_hexblock_hashes(False)
self.log.info("Test that getblock with verbosity 1 doesn't include fee")
- assert_fee_not_in_block(1)
- assert_fee_not_in_block(True)
+ assert_fee_not_in_block(blockhash, 1)
+ assert_fee_not_in_block(blockhash, True)
self.log.info('Test that getblock with verbosity 2 and 3 includes expected fee')
- assert_fee_in_block(2)
- assert_fee_in_block(3)
+ assert_fee_in_block(blockhash, 2)
+ assert_fee_in_block(blockhash, 3)
self.log.info("Test that getblock with verbosity 1 and 2 does not include prevout")
- assert_vin_does_not_contain_prevout(1)
- assert_vin_does_not_contain_prevout(2)
+ assert_vin_does_not_contain_prevout(blockhash, 1)
+ assert_vin_does_not_contain_prevout(blockhash, 2)
self.log.info("Test that getblock with verbosity 3 includes prevout")
assert_vin_contains_prevout(3)
@@ -612,7 +647,7 @@ class BlockchainTest(BitcoinTestFramework):
self.log.info("Test getblock with invalid verbosity type returns proper error message")
assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", node.getblock, blockhash, "2")
- self.log.info("Test that getblock with verbosity 2 and 3 still works with pruned Undo data")
+ self.log.info("Test that getblock doesn't work with deleted Undo data")
def move_block_file(old, new):
old_path = self.nodes[0].blocks_path / old
@@ -622,10 +657,8 @@ class BlockchainTest(BitcoinTestFramework):
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
- assert_fee_not_in_block(2)
- assert_fee_not_in_block(3)
- assert_vin_does_not_contain_prevout(2)
- assert_vin_does_not_contain_prevout(3)
+ assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 2))
+ assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 3))
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
@@ -633,6 +666,31 @@ class BlockchainTest(BitcoinTestFramework):
assert 'previousblockhash' not in node.getblock(node.getblockhash(0))
assert 'nextblockhash' not in node.getblock(node.getbestblockhash())
+ self.log.info("Test getblock when only header is known")
+ current_height = node.getblock(node.getbestblockhash())['height']
+ block_time = node.getblock(node.getbestblockhash())['time'] + 1
+ block = create_block(int(blockhash, 16), create_coinbase(current_height + 1, nValue=100), block_time)
+ block.solve()
+ node.submitheader(block.serialize().hex())
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", lambda: node.getblock(block.hash))
+
+ self.log.info("Test getblock when block data is available but undo data isn't")
+ # Submits a block building on the header-only block, so it can't be connected and has no undo data
+ tx = create_tx_with_script(block.vtx[0], 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN)
+ block_noundo = create_block(block.sha256, create_coinbase(current_height + 2, nValue=100), block_time + 1, txlist=[tx])
+ block_noundo.solve()
+ node.submitblock(block_noundo.serialize().hex())
+
+ assert_fee_not_in_block(block_noundo.hash, 2)
+ assert_fee_not_in_block(block_noundo.hash, 3)
+ assert_vin_does_not_contain_prevout(block_noundo.hash, 2)
+ assert_vin_does_not_contain_prevout(block_noundo.hash, 3)
+
+ self.log.info("Test getblock when block is missing")
+ move_block_file('blk00000.dat', 'blk00000.dat.bak')
+ assert_raises_rpc_error(-1, "Block not found on disk", node.getblock, blockhash)
+ move_block_file('blk00000.dat.bak', 'blk00000.dat')
+
if __name__ == '__main__':
BlockchainTest(__file__).main()
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
index 9f4e17a328..d95820bbf8 100755
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -47,7 +47,7 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
return node.get_wallet_rpc(wallet_name)
def run_test(self):
- node0, node1, node2 = self.nodes
+ node0, node1, _node2 = self.nodes
self.wallet = MiniWallet(test_node=node0)
if self.is_wallet_compiled():
@@ -122,7 +122,7 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
assert_raises_rpc_error(-4, "Unsupported multisig script size for legacy wallet. Upgrade to descriptors to overcome this limitation for p2sh-segwit or bech32 scripts", wallet_multi.addmultisigaddress, 16, pubkeys, '', 'bech32')
def do_multisig(self, nkeys, nsigs, output_type, wallet_multi):
- node0, node1, node2 = self.nodes
+ node0, _node1, node2 = self.nodes
pub_keys = self.pub[0: nkeys]
priv_keys = self.priv[0: nkeys]
diff --git a/test/functional/rpc_dumptxoutset.py b/test/functional/rpc_dumptxoutset.py
index aa12da6ceb..ad05060210 100755
--- a/test/functional/rpc_dumptxoutset.py
+++ b/test/functional/rpc_dumptxoutset.py
@@ -19,6 +19,17 @@ class DumptxoutsetTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 1
+ def check_expected_network(self, node, active):
+ rev_file = node.blocks_path / "rev00000.dat"
+ bogus_file = node.blocks_path / "bogus.dat"
+ rev_file.rename(bogus_file)
+ assert_raises_rpc_error(
+ -1, 'Could not roll back to requested height.', node.dumptxoutset, 'utxos.dat', rollback=99)
+ assert_equal(node.getnetworkinfo()['networkactive'], active)
+
+ # Cleanup
+ bogus_file.rename(rev_file)
+
def run_test(self):
"""Test a trivial usage of the dumptxoutset RPC command."""
node = self.nodes[0]
@@ -27,8 +38,8 @@ class DumptxoutsetTest(BitcoinTestFramework):
self.generate(node, COINBASE_MATURITY)
FILENAME = 'txoutset.dat'
- out = node.dumptxoutset(FILENAME)
- expected_path = node.datadir_path / self.chain / FILENAME
+ out = node.dumptxoutset(FILENAME, "latest")
+ expected_path = node.chain_path / FILENAME
assert expected_path.is_file()
@@ -51,10 +62,22 @@ class DumptxoutsetTest(BitcoinTestFramework):
# Specifying a path to an existing or invalid file will fail.
assert_raises_rpc_error(
- -8, '{} already exists'.format(FILENAME), node.dumptxoutset, FILENAME)
+ -8, '{} already exists'.format(FILENAME), node.dumptxoutset, FILENAME, "latest")
invalid_path = node.datadir_path / "invalid" / "path"
assert_raises_rpc_error(
- -8, "Couldn't open file {}.incomplete for writing".format(invalid_path), node.dumptxoutset, invalid_path)
+ -8, "Couldn't open file {}.incomplete for writing".format(invalid_path), node.dumptxoutset, invalid_path, "latest")
+
+ self.log.info(f"Test that dumptxoutset with unknown dump type fails")
+ assert_raises_rpc_error(
+ -8, 'Invalid snapshot type "bogus" specified. Please specify "rollback" or "latest"', node.dumptxoutset, 'utxos.dat', "bogus")
+
+ self.log.info(f"Test that dumptxoutset failure does not leave the network activity suspended when it was on previously")
+ self.check_expected_network(node, True)
+
+ self.log.info(f"Test that dumptxoutset failure leaves the network activity suspended when it was off")
+ node.setnetworkactive(False)
+ self.check_expected_network(node, False)
+ node.setnetworkactive(True)
if __name__ == '__main__':
diff --git a/test/functional/rpc_getblockfrompeer.py b/test/functional/rpc_getblockfrompeer.py
index e309018516..62b3d664e0 100755
--- a/test/functional/rpc_getblockfrompeer.py
+++ b/test/functional/rpc_getblockfrompeer.py
@@ -58,7 +58,7 @@ class GetBlockFromPeerTest(BitcoinTestFramework):
self.log.info("Node 0 should only have the header for node 1's block 3")
x = next(filter(lambda x: x['hash'] == short_tip, self.nodes[0].getchaintips()))
assert_equal(x['status'], "headers-only")
- assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, short_tip)
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].getblock, short_tip)
self.log.info("Fetch block from node 1")
peers = self.nodes[0].getpeerinfo()
diff --git a/test/functional/rpc_getblockstats.py b/test/functional/rpc_getblockstats.py
index d1e4895eb6..002763201a 100755
--- a/test/functional/rpc_getblockstats.py
+++ b/test/functional/rpc_getblockstats.py
@@ -114,7 +114,7 @@ class GetblockstatsTest(BitcoinTestFramework):
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
- self.log.info('Checking block %d\n' % (i))
+ self.log.info('Checking block %d' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
@@ -182,5 +182,16 @@ class GetblockstatsTest(BitcoinTestFramework):
assert_equal(tip_stats["utxo_increase_actual"], 4)
assert_equal(tip_stats["utxo_size_inc_actual"], 300)
+ self.log.info("Test when only header is known")
+ block = self.generateblock(self.nodes[0], output="raw(55)", transactions=[], submit=False)
+ self.nodes[0].submitheader(block["hex"])
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", lambda: self.nodes[0].getblockstats(block['hash']))
+
+ self.log.info('Test when block is missing')
+ (self.nodes[0].blocks_path / 'blk00000.dat').rename(self.nodes[0].blocks_path / 'blk00000.dat.backup')
+ assert_raises_rpc_error(-1, 'Block not found on disk', self.nodes[0].getblockstats, hash_or_height=1)
+ (self.nodes[0].blocks_path / 'blk00000.dat.backup').rename(self.nodes[0].blocks_path / 'blk00000.dat')
+
+
if __name__ == '__main__':
GetblockstatsTest(__file__).main()
diff --git a/test/functional/rpc_getorphantxs.py b/test/functional/rpc_getorphantxs.py
new file mode 100755
index 0000000000..8d32ce1638
--- /dev/null
+++ b/test/functional/rpc_getorphantxs.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2024 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the getorphantxs RPC."""
+
+from test_framework.mempool_util import tx_in_orphanage
+from test_framework.messages import msg_tx
+from test_framework.p2p import P2PInterface
+from test_framework.util import assert_equal
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.wallet import MiniWallet
+
+
+class GetOrphanTxsTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def run_test(self):
+ self.wallet = MiniWallet(self.nodes[0])
+ self.test_orphan_activity()
+ self.test_orphan_details()
+
+ def test_orphan_activity(self):
+ self.log.info("Check that orphaned transactions are returned with getorphantxs")
+ node = self.nodes[0]
+
+ self.log.info("Create two 1P1C packages, but only broadcast the children")
+ tx_parent_1 = self.wallet.create_self_transfer()
+ tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])
+ tx_parent_2 = self.wallet.create_self_transfer()
+ tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"])
+ peer = node.add_p2p_connection(P2PInterface())
+ peer.send_and_ping(msg_tx(tx_child_1["tx"]))
+ peer.send_and_ping(msg_tx(tx_child_2["tx"]))
+
+ self.log.info("Check that neither parent is in the mempool")
+ assert_equal(node.getmempoolinfo()["size"], 0)
+
+ self.log.info("Check that both children are in the orphanage")
+
+ orphanage = node.getorphantxs(verbosity=0)
+ self.log.info("Check the size of the orphanage")
+ assert_equal(len(orphanage), 2)
+ self.log.info("Check that negative verbosity is treated as 0")
+ assert_equal(orphanage, node.getorphantxs(verbosity=-1))
+ assert tx_in_orphanage(node, tx_child_1["tx"])
+ assert tx_in_orphanage(node, tx_child_2["tx"])
+
+ self.log.info("Broadcast parent 1")
+ peer.send_and_ping(msg_tx(tx_parent_1["tx"]))
+ self.log.info("Check that parent 1 and child 1 are in the mempool")
+ raw_mempool = node.getrawmempool()
+ assert_equal(len(raw_mempool), 2)
+ assert tx_parent_1["txid"] in raw_mempool
+ assert tx_child_1["txid"] in raw_mempool
+
+ self.log.info("Check that orphanage only contains child 2")
+ orphanage = node.getorphantxs()
+ assert_equal(len(orphanage), 1)
+ assert tx_in_orphanage(node, tx_child_2["tx"])
+
+ peer.send_and_ping(msg_tx(tx_parent_2["tx"]))
+ self.log.info("Check that all parents and children are now in the mempool")
+ raw_mempool = node.getrawmempool()
+ assert_equal(len(raw_mempool), 4)
+ assert tx_parent_1["txid"] in raw_mempool
+ assert tx_child_1["txid"] in raw_mempool
+ assert tx_parent_2["txid"] in raw_mempool
+ assert tx_child_2["txid"] in raw_mempool
+ self.log.info("Check that the orphanage is empty")
+ assert_equal(len(node.getorphantxs()), 0)
+
+ self.log.info("Confirm the transactions (clears mempool)")
+ self.generate(node, 1)
+ assert_equal(node.getmempoolinfo()["size"], 0)
+
+ def test_orphan_details(self):
+ self.log.info("Check the transaction details returned from getorphantxs")
+ node = self.nodes[0]
+
+ self.log.info("Create two orphans, from different peers")
+ tx_parent_1 = self.wallet.create_self_transfer()
+ tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])
+ tx_parent_2 = self.wallet.create_self_transfer()
+ tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"])
+ peer_1 = node.add_p2p_connection(P2PInterface())
+ peer_2 = node.add_p2p_connection(P2PInterface())
+ peer_1.send_and_ping(msg_tx(tx_child_1["tx"]))
+ peer_2.send_and_ping(msg_tx(tx_child_2["tx"]))
+
+ orphanage = node.getorphantxs(verbosity=2)
+ assert tx_in_orphanage(node, tx_child_1["tx"])
+ assert tx_in_orphanage(node, tx_child_2["tx"])
+
+ self.log.info("Check that orphan 1 and 2 were from different peers")
+ assert orphanage[0]["from"][0] != orphanage[1]["from"][0]
+
+ self.log.info("Unorphan child 2")
+ peer_2.send_and_ping(msg_tx(tx_parent_2["tx"]))
+ assert not tx_in_orphanage(node, tx_child_2["tx"])
+
+ self.log.info("Checking orphan details")
+ orphanage = node.getorphantxs(verbosity=1)
+ assert_equal(len(node.getorphantxs()), 1)
+ orphan_1 = orphanage[0]
+ self.orphan_details_match(orphan_1, tx_child_1, verbosity=1)
+
+ self.log.info("Checking orphan details (verbosity 2)")
+ orphanage = node.getorphantxs(verbosity=2)
+ orphan_1 = orphanage[0]
+ self.orphan_details_match(orphan_1, tx_child_1, verbosity=2)
+
+ def orphan_details_match(self, orphan, tx, verbosity):
+ self.log.info("Check txid/wtxid of orphan")
+ assert_equal(orphan["txid"], tx["txid"])
+ assert_equal(orphan["wtxid"], tx["wtxid"])
+
+ self.log.info("Check the sizes of orphan")
+ assert_equal(orphan["bytes"], len(tx["tx"].serialize()))
+ assert_equal(orphan["vsize"], tx["tx"].get_vsize())
+ assert_equal(orphan["weight"], tx["tx"].get_weight())
+
+ if verbosity == 2:
+ self.log.info("Check the transaction hex of orphan")
+ assert_equal(orphan["hex"], tx["hex"])
+
+
+if __name__ == '__main__':
+ GetOrphanTxsTest(__file__).main()
diff --git a/test/functional/rpc_txoutproof.py b/test/functional/rpc_txoutproof.py
index 387132b680..90572245d6 100755
--- a/test/functional/rpc_txoutproof.py
+++ b/test/functional/rpc_txoutproof.py
@@ -67,6 +67,10 @@ class MerkleBlockTest(BitcoinTestFramework):
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].gettxoutproof, [txid_spent], "0000000000000000000000000000000000000000000000000000000000000000")
+ # We can't get the proof if we only have the header of the specified block
+ block = self.generateblock(self.nodes[0], output="raw(55)", transactions=[], submit=False)
+ self.nodes[0].submitheader(block["hex"])
+ assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", self.nodes[0].gettxoutproof, [txid_spent], block['hash'])
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py
index 44187ce790..49eb64abad 100755
--- a/test/functional/rpc_users.py
+++ b/test/functional/rpc_users.py
@@ -139,15 +139,32 @@ class HTTPBasicsTest(BitcoinTestFramework):
init_error = 'Error: Unable to start HTTP server. See debug log for details.'
self.log.info('Check -rpcauth are validated')
- # Empty -rpcauth= are ignored
- self.restart_node(0, extra_args=['-rpcauth='])
+ self.log.info('Empty -rpcauth are treated as error')
self.stop_node(0)
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth'])
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth='])
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=""'])
+ self.log.info('Check malformed -rpcauth')
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo:bar'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo:bar:baz'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo$bar:baz'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo$bar$baz'])
+ self.log.info('Check interactions between blank and non-blank rpcauth')
+ # pw = bitcoin
+ rpcauth_user1 = '-rpcauth=user1:6dd184e5e69271fdd69103464630014f$eb3d7ce67c4d1ff3564270519b03b636c0291012692a5fa3dd1d2075daedd07b'
+ rpcauth_user2 = '-rpcauth=user2:57b2f77c919eece63cfa46c2f06e46ae$266b63902f99f97eeaab882d4a87f8667ab84435c3799f2ce042ef5a994d620b'
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=[rpcauth_user1, rpcauth_user2, '-rpcauth='])
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=[rpcauth_user1, '-rpcauth=', rpcauth_user2])
+ self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=', rpcauth_user1, rpcauth_user2])
+
+ self.log.info('Check -norpcauth disables previous -rpcauth params')
+ self.restart_node(0, extra_args=[rpcauth_user1, rpcauth_user2, '-norpcauth'])
+ assert_equal(401, call_with_auth(self.nodes[0], 'user1', 'bitcoin').status)
+ assert_equal(401, call_with_auth(self.nodes[0], 'rt', self.rtpassword).status)
+ self.stop_node(0)
+
self.log.info('Check that failure to write cookie file will abort the node gracefully')
(self.nodes[0].chain_path / ".cookie.tmp").mkdir()
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index 5c2fa28a31..705b8e8fe5 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2022 The Bitcoin Core developers
+# Copyright (c) 2015-present The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
@@ -74,7 +74,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl
block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
- if tmpl and not tmpl.get('bits') is None:
+ if tmpl and tmpl.get('bits') is not None:
block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
diff --git a/test/functional/test_framework/mempool_util.py b/test/functional/test_framework/mempool_util.py
index 148cc935ed..a6a7940c60 100644
--- a/test/functional/test_framework/mempool_util.py
+++ b/test/functional/test_framework/mempool_util.py
@@ -8,6 +8,7 @@ from decimal import Decimal
from .blocktools import (
COINBASE_MATURITY,
)
+from .messages import CTransaction
from .util import (
assert_equal,
assert_greater_than,
@@ -19,14 +20,11 @@ from .wallet import (
)
-def fill_mempool(test_framework, node):
+def fill_mempool(test_framework, node, *, tx_sync_fun=None):
"""Fill mempool until eviction.
Allows for simpler testing of scenarios with floating mempoolminfee > minrelay
- Requires -datacarriersize=100000 and
- -maxmempool=5.
- It will not ensure mempools become synced as it
- is based on a single node and assumes -minrelaytxfee
+ Requires -datacarriersize=100000 and -maxmempool=5 and assumes -minrelaytxfee
is 1 sat/vbyte.
To avoid unintentional tx dependencies, the mempool filling txs are created with a
tagged ephemeral miniwallet instance.
@@ -57,18 +55,25 @@ def fill_mempool(test_framework, node):
tx_to_be_evicted_id = ephemeral_miniwallet.send_self_transfer(
from_node=node, utxo_to_spend=confirmed_utxos.pop(0), fee_rate=relayfee)["txid"]
+ def send_batch(fee):
+ utxos = confirmed_utxos[:tx_batch_size]
+ create_lots_of_big_transactions(ephemeral_miniwallet, node, fee, tx_batch_size, txouts, utxos)
+ del confirmed_utxos[:tx_batch_size]
+
# Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool
# The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB)
# by 130 should result in a fee that corresponds to 2x of that fee rate
base_fee = relayfee * 130
+ batch_fees = [(i + 1) * base_fee for i in range(num_of_batches)]
test_framework.log.debug("Fill up the mempool with txs with higher fee rate")
- with node.assert_debug_log(["rolling minimum fee bumped"]):
- for batch_of_txid in range(num_of_batches):
- fee = (batch_of_txid + 1) * base_fee
- utxos = confirmed_utxos[:tx_batch_size]
- create_lots_of_big_transactions(ephemeral_miniwallet, node, fee, tx_batch_size, txouts, utxos)
- del confirmed_utxos[:tx_batch_size]
+ for fee in batch_fees[:-3]:
+ send_batch(fee)
+ tx_sync_fun() if tx_sync_fun else test_framework.sync_mempools() # sync before any eviction
+ assert_equal(node.getmempoolinfo()["mempoolminfee"], Decimal("0.00001000"))
+ for fee in batch_fees[-3:]:
+ send_batch(fee)
+ tx_sync_fun() if tx_sync_fun else test_framework.sync_mempools() # sync after all evictions
test_framework.log.debug("The tx should be evicted by now")
# The number of transactions created should be greater than the ones present in the mempool
@@ -79,3 +84,8 @@ def fill_mempool(test_framework, node):
test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee")
assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
+
+def tx_in_orphanage(node, tx: CTransaction) -> bool:
+ """Returns true if the transaction is in the orphanage."""
+ found = [o for o in node.getorphantxs(verbosity=1) if o["txid"] == tx.rehash() and o["wtxid"] == tx.getwtxid()]
+ return len(found) == 1
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 00fe5b08e4..ce68de7eaa 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -5,7 +5,7 @@
"""Helpful routines for regression testing."""
from base64 import b64encode
-from decimal import Decimal, ROUND_DOWN
+from decimal import Decimal
from subprocess import CalledProcessError
import hashlib
import inspect
@@ -21,7 +21,9 @@ import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from collections.abc import Callable
-from typing import Optional
+from typing import Optional, Union
+
+SATOSHI_PRECISION = Decimal('0.00000001')
logger = logging.getLogger("TestFramework.utils")
@@ -261,8 +263,9 @@ def get_fee(tx_size, feerate_btc_kvb):
return target_fee_sat / Decimal(1e8) # Return result in BTC
-def satoshi_round(amount):
- return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+def satoshi_round(amount: Union[int, float, str], *, rounding: str) -> Decimal:
+ """Rounds a Decimal amount to the nearest satoshi using the specified rounding mode."""
+ return Decimal(amount).quantize(SATOSHI_PRECISION, rounding=rounding)
def wait_until_helper_internal(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index f3713f297e..1cef714705 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -7,7 +7,6 @@
from copy import deepcopy
from decimal import Decimal
from enum import Enum
-import math
from typing import (
Any,
Optional,
@@ -35,7 +34,6 @@ from test_framework.messages import (
CTxOut,
hash256,
ser_compact_size,
- WITNESS_SCALE_FACTOR,
)
from test_framework.script import (
CScript,
@@ -119,20 +117,18 @@ class MiniWallet:
def _create_utxo(self, *, txid, vout, value, height, coinbase, confirmations):
return {"txid": txid, "vout": vout, "value": value, "height": height, "coinbase": coinbase, "confirmations": confirmations}
- def _bulk_tx(self, tx, target_weight):
- """Pad a transaction with extra outputs until it reaches a target weight (or higher).
+ def _bulk_tx(self, tx, target_vsize):
+ """Pad a transaction with extra outputs until it reaches a target vsize.
returns the tx
"""
tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN])))
- # determine number of needed padding bytes by converting weight difference to vbytes
- dummy_vbytes = (target_weight - tx.get_weight() + 3) // 4
+ # determine number of needed padding bytes
+ dummy_vbytes = target_vsize - tx.get_vsize()
# compensate for the increase of the compact-size encoded script length
# (note that the length encoding of the unpadded output script needs one byte)
dummy_vbytes -= len(ser_compact_size(dummy_vbytes)) - 1
tx.vout[-1].scriptPubKey = CScript([OP_RETURN] + [OP_1] * dummy_vbytes)
- # Actual weight should be at most 3 higher than target weight
- assert_greater_than_or_equal(tx.get_weight(), target_weight)
- assert_greater_than_or_equal(target_weight + 3, tx.get_weight())
+ assert_equal(tx.get_vsize(), target_vsize)
def get_balance(self):
return sum(u['value'] for u in self._utxos)
@@ -309,7 +305,7 @@ class MiniWallet:
locktime=0,
sequence=0,
fee_per_output=1000,
- target_weight=0,
+ target_vsize=0,
confirmed_only=False,
):
"""
@@ -338,8 +334,8 @@ class MiniWallet:
self.sign_tx(tx)
- if target_weight:
- self._bulk_tx(tx, target_weight)
+ if target_vsize:
+ self._bulk_tx(tx, target_vsize)
txid = tx.rehash()
return {
@@ -364,7 +360,7 @@ class MiniWallet:
fee_rate=Decimal("0.003"),
fee=Decimal("0"),
utxo_to_spend=None,
- target_weight=0,
+ target_vsize=0,
confirmed_only=False,
**kwargs,
):
@@ -379,20 +375,18 @@ class MiniWallet:
vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other)
else:
assert False
- if target_weight and not fee: # respect fee_rate if target weight is passed
- # the actual weight might be off by 3 WUs, so calculate based on that (see self._bulk_tx)
- max_actual_weight = target_weight + 3
- fee = get_fee(math.ceil(max_actual_weight / WITNESS_SCALE_FACTOR), fee_rate)
+ if target_vsize and not fee: # respect fee_rate if target vsize is passed
+ fee = get_fee(target_vsize, fee_rate)
send_value = utxo_to_spend["value"] - (fee or (fee_rate * vsize / 1000))
# create tx
tx = self.create_self_transfer_multi(
utxos_to_spend=[utxo_to_spend],
amount_per_output=int(COIN * send_value),
- target_weight=target_weight,
+ target_vsize=target_vsize,
**kwargs,
)
- if not target_weight:
+ if not target_vsize:
assert_equal(tx["tx"].get_vsize(), vsize)
tx["new_utxo"] = tx.pop("new_utxos")[0]
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 59c37aa18f..3d8c230066 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -96,10 +96,13 @@ BASE_SCRIPTS = [
'feature_fee_estimation.py',
'feature_taproot.py',
'feature_block.py',
+ 'p2p_node_network_limited.py --v1transport',
+ 'p2p_node_network_limited.py --v2transport',
# vv Tests less than 2m vv
'mining_getblocktemplate_longpoll.py',
'p2p_segwit.py',
'feature_maxuploadtarget.py',
+ 'feature_assumeutxo.py',
'mempool_updatefromblock.py',
'mempool_persist.py --descriptors',
# vv Tests less than 60s vv
@@ -157,6 +160,7 @@ BASE_SCRIPTS = [
'wallet_importmulti.py --legacy-wallet',
'mempool_limit.py',
'rpc_txoutproof.py',
+ 'rpc_getorphantxs.py',
'wallet_listreceivedby.py --legacy-wallet',
'wallet_listreceivedby.py --descriptors',
'wallet_abandonconflict.py --legacy-wallet',
@@ -354,7 +358,6 @@ BASE_SCRIPTS = [
'wallet_coinbase_category.py --descriptors',
'feature_filelock.py',
'feature_loadblock.py',
- 'feature_assumeutxo.py',
'wallet_assumeutxo.py --descriptors',
'p2p_dos_header_tree.py',
'p2p_add_connections.py',
@@ -385,8 +388,6 @@ BASE_SCRIPTS = [
'feature_coinstatsindex.py',
'wallet_orphanedreward.py',
'wallet_timelock.py',
- 'p2p_node_network_limited.py --v1transport',
- 'p2p_node_network_limited.py --v2transport',
'p2p_permissions.py',
'feature_blocksdir.py',
'wallet_startup.py',
@@ -406,6 +407,7 @@ BASE_SCRIPTS = [
'feature_shutdown.py',
'wallet_migration.py',
'p2p_ibd_txrelay.py',
+ 'p2p_seednode.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
@@ -445,8 +447,8 @@ def main():
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument('--resultsfile', '-r', help='store test results (as CSV) to the provided file')
-
args, unknown_args = parser.parse_known_args()
+ fail_on_warn = args.ci
if not args.ansi:
global DEFAULT, BOLD, GREEN, RED
DEFAULT = ("", "")
@@ -487,7 +489,7 @@ def main():
if not enable_bitcoind:
print("No functional tests to run.")
- print("Rerun ./configure with --with-daemon and then make")
+ print("Re-compile with the -DBUILD_DAEMON=ON build option")
sys.exit(1)
# Build list of tests
@@ -521,15 +523,28 @@ def main():
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
+ # The user can specify a test case with or without the .py extension.
if args.exclude:
- exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
- for exclude_test in exclude_tests:
- # Remove <test_name>.py and <test_name>.py --arg from the test list
- exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
+
+ def print_warning_missing_test(test_name):
+ print("{}WARNING!{} Test '{}' not found in current test list. Check the --exclude list.".format(BOLD[1], BOLD[0], test_name))
+ if fail_on_warn:
+ sys.exit(1)
+
+ def remove_tests(exclude_list):
+ if not exclude_list:
+ print_warning_missing_test(exclude_test)
for exclude_item in exclude_list:
test_list.remove(exclude_item)
- if not exclude_list:
- print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
+
+ exclude_tests = [test.strip() for test in args.exclude.split(",")]
+ for exclude_test in exclude_tests:
+ # A space in the name indicates it has arguments such as "wallet_basic.py --descriptors"
+ if ' ' in exclude_test:
+ remove_tests([test for test in test_list if test.replace('.py', '') == exclude_test.replace('.py', '')])
+ else:
+ # Exclude all variants of a test
+ remove_tests([test for test in test_list if test.split('.py')[0] == exclude_test.split('.py')[0]])
if args.filter:
test_list = list(filter(re.compile(args.filter).search, test_list))
@@ -552,7 +567,7 @@ def main():
f"A minimum of {MIN_NO_CLEANUP_SPACE // (1024 * 1024 * 1024)} GB of free space is required.")
passon_args.append("--nocleanup")
- check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
+ check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=fail_on_warn)
check_script_prefixes()
if not args.keepcache:
@@ -560,7 +575,6 @@ def main():
run_tests(
test_list=test_list,
- src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
@@ -572,7 +586,7 @@ def main():
results_filepath=results_filepath,
)
-def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, results_filepath=None):
+def run_tests(*, test_list, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, results_filepath=None):
args = args or []
# Warn if bitcoind is already running
@@ -595,7 +609,7 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
print(f"{BOLD[1]}WARNING!{BOLD[0]} There may be insufficient free space in {tmpdir} to run the Bitcoin functional test suite. "
f"Running the test suite with fewer than {min_space // (1024 * 1024)} MB of free space might cause tests to fail.")
- tests_dir = build_dir + '/test/functional/'
+ tests_dir = f"{build_dir}/test/functional/"
# This allows `test_runner.py` to work from an out-of-source build directory using a symlink,
# a hard link or a copy on any platform. See https://github.com/bitcoin/bitcoin/pull/27561.
sys.path.append(tests_dir)
@@ -862,7 +876,6 @@ def check_script_list(*, src_dir, fail_on_warn):
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
- # On CI this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
diff --git a/test/functional/tool_signet_miner.py b/test/functional/tool_signet_miner.py
index bdefb92ae6..67fb5c9f94 100755
--- a/test/functional/tool_signet_miner.py
+++ b/test/functional/tool_signet_miner.py
@@ -57,6 +57,7 @@ class SignetMinerTest(BitcoinTestFramework):
f'--grind-cmd={self.options.bitcoinutil} grind',
'--nbits=1d00ffff',
f'--set-block-time={int(time.time())}',
+ '--poolnum=99',
], check=True, stderr=subprocess.STDOUT)
assert_equal(node.getblockcount(), 1)
diff --git a/test/functional/wallet_assumeutxo.py b/test/functional/wallet_assumeutxo.py
index 0bce2f137c..76cd2097a3 100755
--- a/test/functional/wallet_assumeutxo.py
+++ b/test/functional/wallet_assumeutxo.py
@@ -11,7 +11,9 @@ See feature_assumeutxo.py for background.
- TODO: test loading a wallet (backup) on a pruned node
"""
+from test_framework.address import address_to_scriptpubkey
from test_framework.test_framework import BitcoinTestFramework
+from test_framework.messages import COIN
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
@@ -62,8 +64,16 @@ class AssumeutxoTest(BitcoinTestFramework):
for n in self.nodes:
n.setmocktime(n.getblockheader(n.getbestblockhash())['time'])
+ # Create a wallet that we will create a backup for later (at snapshot height)
n0.createwallet('w')
w = n0.get_wallet_rpc("w")
+ w_address = w.getnewaddress()
+
+ # Create another wallet and backup now (before snapshot height)
+ n0.createwallet('w2')
+ w2 = n0.get_wallet_rpc("w2")
+ w2_address = w2.getnewaddress()
+ w2.backupwallet("backup_w2.dat")
# Generate a series of blocks that `n0` will have in the snapshot,
# but that n1 doesn't yet see. In order for the snapshot to activate,
@@ -84,6 +94,8 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n.getblockchaininfo()[
"headers"], SNAPSHOT_BASE_HEIGHT)
+ # This backup is created at the snapshot height, so it's
+ # not part of the background sync anymore
w.backupwallet("backup_w.dat")
self.log.info("-- Testing assumeutxo")
@@ -93,7 +105,7 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info(
f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}")
- dump_output = n0.dumptxoutset('utxos.dat')
+ dump_output = n0.dumptxoutset('utxos.dat', "latest")
assert_equal(
dump_output['txoutset_hash'],
@@ -103,7 +115,13 @@ class AssumeutxoTest(BitcoinTestFramework):
# Mine more blocks on top of the snapshot that n1 hasn't yet seen. This
# will allow us to test n1's sync-to-tip on top of a snapshot.
- self.generate(n0, nblocks=100, sync_fun=self.no_op)
+ w_skp = address_to_scriptpubkey(w_address)
+ w2_skp = address_to_scriptpubkey(w2_address)
+ for i in range(100):
+ if i % 3 == 0:
+ self.mini_wallet.send_to(from_node=n0, scriptPubKey=w_skp, amount=1 * COIN)
+ self.mini_wallet.send_to(from_node=n0, scriptPubKey=w2_skp, amount=10 * COIN)
+ self.generate(n0, nblocks=1, sync_fun=self.no_op)
assert_equal(n0.getblockcount(), FINAL_HEIGHT)
assert_equal(n1.getblockcount(), START_HEIGHT)
@@ -126,8 +144,13 @@ class AssumeutxoTest(BitcoinTestFramework):
assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)
- self.log.info("Backup can't be loaded during background sync")
- assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w", "backup_w.dat")
+ self.log.info("Backup from the snapshot height can be loaded during background sync")
+ n1.restorewallet("w", "backup_w.dat")
+ # Balance of w wallet is still still 0 because n1 has not synced yet
+ assert_equal(n1.getbalance(), 0)
+
+ self.log.info("Backup from before the snapshot height can't be loaded during background sync")
+ assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w2", "backup_w2.dat")
PAUSE_HEIGHT = FINAL_HEIGHT - 40
@@ -159,8 +182,15 @@ class AssumeutxoTest(BitcoinTestFramework):
self.log.info("Ensuring background validation completes")
self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1)
- self.log.info("Ensuring wallet can be restored from backup")
- n1.restorewallet("w", "backup_w.dat")
+ self.log.info("Ensuring wallet can be restored from a backup that was created before the snapshot height")
+ n1.restorewallet("w2", "backup_w2.dat")
+ # Check balance of w2 wallet
+ assert_equal(n1.getbalance(), 340)
+
+ # Check balance of w wallet after node is synced
+ n1.loadwallet("w")
+ w = n1.get_wallet_rpc("w")
+ assert_equal(w.getbalance(), 34)
if __name__ == '__main__':
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index a639c34377..83267f77e1 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -140,6 +140,25 @@ class WalletBackupTest(BitcoinTestFramework):
assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file)
assert wallet_file.exists()
+ def test_pruned_wallet_backup(self):
+ self.log.info("Test loading backup on a pruned node when the backup was created close to the prune height of the restoring node")
+ node = self.nodes[3]
+ self.restart_node(3, ["-prune=1", "-fastprune=1"])
+ # Ensure the chain tip is at height 214, because this test assume it is.
+ assert_equal(node.getchaintips()[0]["height"], 214)
+ # We need a few more blocks so we can actually get above an realistic
+ # minimal prune height
+ self.generate(node, 50, sync_fun=self.no_op)
+ # Backup created at block height 264
+ node.backupwallet(node.datadir_path / 'wallet_pruned.bak')
+ # Generate more blocks so we can actually prune the older blocks
+ self.generate(node, 300, sync_fun=self.no_op)
+ # This gives us an actual prune height roughly in the range of 220 - 240
+ node.pruneblockchain(250)
+ # The backup should be updated with the latest height (locator) for
+ # the backup to load successfully this close to the prune height
+ node.restorewallet(f'pruned', node.datadir_path / 'wallet_pruned.bak')
+
def run_test(self):
self.log.info("Generating initial blockchain")
self.generate(self.nodes[0], 1)
@@ -242,6 +261,8 @@ class WalletBackupTest(BitcoinTestFramework):
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
+ self.test_pruned_wallet_backup()
+
if __name__ == '__main__':
WalletBackupTest(__file__).main()
diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py
index e71283b928..775786fbb1 100755
--- a/test/functional/wallet_backwards_compatibility.py
+++ b/test/functional/wallet_backwards_compatibility.py
@@ -33,7 +33,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 12
+ self.num_nodes = 11
# Add new version after each release:
self.extra_args = [
["-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to mine blocks. noban for immediate tx relay
@@ -47,7 +47,6 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v0.19.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=127.0.0.1"], # v0.18.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=127.0.0.1"], # v0.17.2
- ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=127.0.0.1", "-wallet=wallet.dat"], # v0.16.3
]
self.wallet_names = [self.default_wallet_name]
@@ -68,7 +67,6 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
190100,
180100,
170200,
- 160300,
])
self.start_nodes()
@@ -133,18 +131,17 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
def run_test(self):
node_miner = self.nodes[0]
node_master = self.nodes[1]
- node_v21 = self.nodes[self.num_nodes - 6]
- node_v17 = self.nodes[self.num_nodes - 2]
- node_v16 = self.nodes[self.num_nodes - 1]
+ node_v21 = self.nodes[self.num_nodes - 5]
+ node_v17 = self.nodes[self.num_nodes - 1]
legacy_nodes = self.nodes[2:] # Nodes that support legacy wallets
- legacy_only_nodes = self.nodes[-5:] # Nodes that only support legacy wallets
- descriptors_nodes = self.nodes[2:-5] # Nodes that support descriptor wallets
+ legacy_only_nodes = self.nodes[-4:] # Nodes that only support legacy wallets
+ descriptors_nodes = self.nodes[2:-4] # Nodes that support descriptor wallets
self.generatetoaddress(node_miner, COINBASE_MATURITY + 1, node_miner.getnewaddress())
# Sanity check the test framework:
- res = node_v16.getblockchaininfo()
+ res = node_v17.getblockchaininfo()
assert_equal(res['blocks'], COINBASE_MATURITY + 1)
self.log.info("Test wallet backwards compatibility...")
@@ -215,9 +212,6 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
# In descriptors wallet mode, run this test on the nodes that support descriptor wallets
# In legacy wallets mode, run this test on the nodes that support legacy wallets
for node in descriptors_nodes if self.options.descriptors else legacy_nodes:
- if self.major_version_less_than(node, 17):
- # loadwallet was introduced in v0.17.0
- continue
self.log.info(f"- {node.version}")
for wallet_name in ["w1", "w2", "w3"]:
if self.major_version_less_than(node, 18) and wallet_name == "w3":
@@ -290,15 +284,6 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core")
self.start_node(node_v17.index)
- # No wallet created in master can be opened in 0.16
- self.log.info("Test that wallets created in master are too new for 0.16")
- self.stop_node(node_v16.index)
- for wallet_name in ["w1", "w2", "w3"]:
- if self.options.descriptors:
- node_v16.assert_start_raises_init_error([f"-wallet={wallet_name}"], f"Error: {wallet_name} corrupt, salvage failed")
- else:
- node_v16.assert_start_raises_init_error([f"-wallet={wallet_name}"], f"Error: Error loading {wallet_name}: Wallet requires newer version of Bitcoin Core")
-
# When descriptors are enabled, w1 cannot be opened by 0.21 since it contains a taproot descriptor
if self.options.descriptors:
self.log.info("Test that 0.21 cannot open wallet containing tr() descriptors")
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index 156f4279b4..149b1246d8 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -229,7 +229,7 @@ class MultiWalletTest(BitcoinTestFramework):
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
- assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
+ assert_raises_rpc_error(-19, "Multiple wallets are loaded. Please select which wallet", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
self.generatetoaddress(node, nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress(), sync_fun=self.no_op)
@@ -275,7 +275,7 @@ class MultiWalletTest(BitcoinTestFramework):
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
- assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
+ assert_raises_rpc_error(-19, "Multiple wallets are loaded. Please select which wallet", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py
index 7d1d244dff..c909336a25 100755
--- a/test/functional/wallet_upgradewallet.py
+++ b/test/functional/wallet_upgradewallet.py
@@ -185,6 +185,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
self.restart_node(0)
copy_v16()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
+ assert_equal(wallet.getbalance(), v16_3_balance)
self.log.info("Test upgradewallet without a version argument")
self.test_upgradewallet(wallet, previous_version=159900, expected_version=169900)
# wallet should still contain the same balance
@@ -231,7 +232,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
assert b'\x07hdchain' in new_kvs
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(28, len(hd_chain))
- hd_chain_version, external_counter, seed_id = struct.unpack('<iI20s', hd_chain)
+ hd_chain_version, _external_counter, seed_id = struct.unpack('<iI20s', hd_chain)
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
@@ -258,7 +259,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
- hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
+ hd_chain_version, _external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(0, internal_counter)
seed_id = bytearray(seed_id)
@@ -284,7 +285,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
- hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
+ hd_chain_version, _external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
# The next addresses are HD and should be on different HD chains (the one remaining key in each pool should have been flushed)
@@ -301,8 +302,8 @@ class UpgradeWalletTest(BitcoinTestFramework):
new_kvs = dump_bdb_kv(node_master_wallet)
for k, old_v in old_kvs.items():
if k.startswith(b'\x07keymeta'):
- new_ver, new_create_time, new_kp_str, new_seed_id, new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k]))
- old_ver, old_create_time, old_kp_str, old_seed_id, old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v))
+ new_ver, new_create_time, new_kp_str, new_seed_id, _new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k]))
+ old_ver, old_create_time, old_kp_str, old_seed_id, _old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v))
assert_equal(10, old_ver)
if old_kp_str == b"": # imported things that don't have keymeta (i.e. imported coinbase privkeys) won't be upgraded
assert_equal(new_kvs[k], old_v)
diff --git a/test/lint/README.md b/test/lint/README.md
index 04a836c4d2..8c1f0fedf0 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -45,13 +45,13 @@ or `--help`:
| Lint test | Dependency |
|-----------|:----------:|
-| [`lint-python.py`](/test/lint/lint-python.py) | [flake8](https://github.com/PyCQA/flake8)
| [`lint-python.py`](/test/lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
| [`lint-python.py`](/test/lint/lint-python.py) | [mypy](https://github.com/python/mypy)
| [`lint-python.py`](/test/lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
| [`lint-python-dead-code.py`](/test/lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
| [`lint-shell.py`](/test/lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck)
| [`lint-spelling.py`](/test/lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell)
+| `py_lint` | [ruff](https://github.com/astral-sh/ruff)
| markdown link check | [mlc](https://github.com/becheran/mlc)
In use versions and install instructions are available in the [CI setup](../../ci/lint/04_install.sh).
diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py
index 002c59e9a3..86a17fb0f8 100755
--- a/test/lint/lint-format-strings.py
+++ b/test/lint/lint-format-strings.py
@@ -16,27 +16,8 @@ import re
import sys
FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [
- 'FatalErrorf,0',
- 'fprintf,1',
'tfm::format,1', # Assuming tfm::::format(std::ostream&, ...
- 'LogConnectFailure,1',
- 'LogError,0',
- 'LogWarning,0',
- 'LogInfo,0',
- 'LogDebug,1',
- 'LogTrace,1',
- 'LogPrintf,0',
- 'LogPrintfCategory,1',
- 'LogPrintLevel,2',
- 'printf,0',
- 'snprintf,2',
- 'sprintf,1',
'strprintf,0',
- 'vfprintf,1',
- 'vprintf,1',
- 'vsnprintf,1',
- 'vsprintf,1',
- 'WalletLogPrintf,0',
]
RUN_LINT_FILE = 'test/lint/run-lint-format-strings.py'
@@ -81,7 +62,7 @@ def main():
matching_files_filtered = []
for matching_file in matching_files:
- if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)|contrib/devtools/bitcoin-tidy/example_logprintf.cpp', matching_file):
+ if not re.search('^src/(leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)', matching_file):
matching_files_filtered.append(matching_file)
matching_files_filtered.sort()
diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py
index eabd13322e..e2dbe25b88 100755
--- a/test/lint/lint-python.py
+++ b/test/lint/lint-python.py
@@ -5,13 +5,12 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
-Check for specified flake8 and mypy warnings in python files.
+Check for specified mypy warnings in python files.
"""
import os
from pathlib import Path
import subprocess
-import sys
from importlib.metadata import metadata, PackageNotFoundError
@@ -19,89 +18,12 @@ from importlib.metadata import metadata, PackageNotFoundError
cache_dir = Path(__file__).parent.parent / ".mypy_cache"
os.environ["MYPY_CACHE_DIR"] = str(cache_dir)
-DEPS = ['flake8', 'lief', 'mypy', 'pyzmq']
-
-# All .py files, except those in src/ (to exclude subtrees there)
-FLAKE_FILES_ARGS = ['git', 'ls-files', '*.py', ':!:src/*.py']
+DEPS = ['lief', 'mypy', 'pyzmq']
# Only .py files in test/functional and contrib/devtools have type annotations
# enforced.
MYPY_FILES_ARGS = ['git', 'ls-files', 'test/functional/*.py', 'contrib/devtools/*.py']
-ENABLED = (
- 'E101,' # indentation contains mixed spaces and tabs
- 'E112,' # expected an indented block
- 'E113,' # unexpected indentation
- 'E115,' # expected an indented block (comment)
- 'E116,' # unexpected indentation (comment)
- 'E125,' # continuation line with same indent as next logical line
- 'E129,' # visually indented line with same indent as next logical line
- 'E131,' # continuation line unaligned for hanging indent
- 'E133,' # closing bracket is missing indentation
- 'E223,' # tab before operator
- 'E224,' # tab after operator
- 'E242,' # tab after ','
- 'E266,' # too many leading '#' for block comment
- 'E271,' # multiple spaces after keyword
- 'E272,' # multiple spaces before keyword
- 'E273,' # tab after keyword
- 'E274,' # tab before keyword
- 'E275,' # missing whitespace after keyword
- 'E304,' # blank lines found after function decorator
- 'E306,' # expected 1 blank line before a nested definition
- 'E401,' # multiple imports on one line
- 'E402,' # module level import not at top of file
- 'E502,' # the backslash is redundant between brackets
- 'E701,' # multiple statements on one line (colon)
- 'E702,' # multiple statements on one line (semicolon)
- 'E703,' # statement ends with a semicolon
- 'E711,' # comparison to None should be 'if cond is None:'
- 'E714,' # test for object identity should be "is not"
- 'E721,' # do not compare types, use "isinstance()"
- 'E722,' # do not use bare 'except'
- 'E742,' # do not define classes named "l", "O", or "I"
- 'E743,' # do not define functions named "l", "O", or "I"
- 'E901,' # SyntaxError: invalid syntax
- 'E902,' # TokenError: EOF in multi-line string
- 'F401,' # module imported but unused
- 'F402,' # import module from line N shadowed by loop variable
- 'F403,' # 'from foo_module import *' used; unable to detect undefined names
- 'F404,' # future import(s) name after other statements
- 'F405,' # foo_function may be undefined, or defined from star imports: bar_module
- 'F406,' # "from module import *" only allowed at module level
- 'F407,' # an undefined __future__ feature name was imported
- 'F601,' # dictionary key name repeated with different values
- 'F602,' # dictionary key variable name repeated with different values
- 'F621,' # too many expressions in an assignment with star-unpacking
- 'F622,' # two or more starred expressions in an assignment (a, *b, *c = d)
- 'F631,' # assertion test is a tuple, which are always True
- 'F632,' # use ==/!= to compare str, bytes, and int literals
- 'F701,' # a break statement outside of a while or for loop
- 'F702,' # a continue statement outside of a while or for loop
- 'F703,' # a continue statement in a finally block in a loop
- 'F704,' # a yield or yield from statement outside of a function
- 'F705,' # a return statement with arguments inside a generator
- 'F706,' # a return statement outside of a function/method
- 'F707,' # an except: block as not the last exception handler
- 'F811,' # redefinition of unused name from line N
- 'F812,' # list comprehension redefines 'foo' from line N
- 'F821,' # undefined name 'Foo'
- 'F822,' # undefined name name in __all__
- 'F823,' # local variable name … referenced before assignment
- 'F831,' # duplicate argument name in function definition
- 'F841,' # local variable 'foo' is assigned to but never used
- 'W191,' # indentation contains tabs
- 'W291,' # trailing whitespace
- 'W292,' # no newline at end of file
- 'W293,' # blank line contains whitespace
- 'W601,' # .has_key() is deprecated, use "in"
- 'W602,' # deprecated form of raising exception
- 'W603,' # "<>" is deprecated, use "!="
- 'W604,' # backticks are deprecated, use "repr()"
- 'W605,' # invalid escape sequence "x"
- 'W606,' # 'async' and 'await' are reserved keywords starting with Python 3.7
-)
-
def check_dependencies():
for dep in DEPS:
@@ -115,20 +37,6 @@ def check_dependencies():
def main():
check_dependencies()
- if len(sys.argv) > 1:
- flake8_files = sys.argv[1:]
- else:
- flake8_files = subprocess.check_output(FLAKE_FILES_ARGS).decode("utf-8").splitlines()
-
- flake8_args = ['flake8', '--ignore=B,C,E,F,I,N,W', f'--select={ENABLED}'] + flake8_files
- flake8_env = os.environ.copy()
- flake8_env["PYTHONWARNINGS"] = "ignore"
-
- try:
- subprocess.check_call(flake8_args, env=flake8_env)
- except subprocess.CalledProcessError:
- exit(1)
-
mypy_files = subprocess.check_output(MYPY_FILES_ARGS).decode("utf-8").splitlines()
mypy_args = ['mypy', '--show-error-codes'] + mypy_files
diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py
index 3e578b218f..945288a3dd 100755
--- a/test/lint/lint-spelling.py
+++ b/test/lint/lint-spelling.py
@@ -14,7 +14,7 @@ from subprocess import check_output, STDOUT, CalledProcessError
from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES
IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt'
-FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)contrib/guix/patches"]
+FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)contrib/guix/patches"]
FILES_ARGS += [f":(exclude){dir}" for dir in SHARED_EXCLUDED_SUBTREES]
diff --git a/test/lint/run-lint-format-strings.py b/test/lint/run-lint-format-strings.py
index 09a2503452..d3c0ac92e5 100755
--- a/test/lint/run-lint-format-strings.py
+++ b/test/lint/run-lint-format-strings.py
@@ -13,17 +13,8 @@ import re
import sys
FALSE_POSITIVES = [
- ("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
- ("src/index/base.cpp", "FatalErrorf(const char* fmt, const Args&... args)"),
- ("src/index/base.h", "FatalErrorf(const char* fmt, const Args&... args)"),
- ("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"),
("src/clientversion.cpp", "strprintf(_(COPYRIGHT_HOLDERS).translated, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/test/translation_tests.cpp", "strprintf(format, arg)"),
- ("src/validationinterface.cpp", "LogDebug(BCLog::VALIDATION, fmt \"\\n\", __VA_ARGS__)"),
- ("src/wallet/wallet.h", "WalletLogPrintf(const char* fmt, Params... parameters)"),
- ("src/wallet/wallet.h", "LogPrintf((\"%s \" + std::string{fmt}).c_str(), GetDisplayName(), parameters...)"),
- ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(const char* fmt, Params... parameters)"),
- ("src/wallet/scriptpubkeyman.h", "LogPrintf((\"%s \" + std::string{fmt}).c_str(), m_storage.GetDisplayName(), parameters...)"),
]
diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs
index 1a8c11dd42..42c880052e 100644
--- a/test/lint/test_runner/src/main.rs
+++ b/test/lint/test_runner/src/main.rs
@@ -5,9 +5,12 @@
use std::env;
use std::fs;
use std::io::ErrorKind;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
use std::process::{Command, ExitCode, Stdio};
+/// A possible error returned by any of the linters.
+///
+/// The error string should explain the failure type and list all violations.
type LintError = String;
type LintResult = Result<(), LintError>;
type LintFn = fn() -> LintResult;
@@ -26,7 +29,7 @@ fn get_linter_list() -> Vec<&'static Linter> {
lint_fn: lint_doc
},
&Linter {
- description: "Check that no symbol from bitcoin-config.h is used without the header being included",
+ description: "Check that no symbol from bitcoin-build-config.h is used without the header being included",
name: "includes_build_config",
lint_fn: lint_includes_build_config
},
@@ -36,9 +39,9 @@ fn get_linter_list() -> Vec<&'static Linter> {
lint_fn: lint_markdown
},
&Linter {
- description: "Check the default arguments in python",
- name: "py_mut_arg_default",
- lint_fn: lint_py_mut_arg_default,
+ description: "Lint Python code",
+ name: "py_lint",
+ lint_fn: lint_py_lint,
},
&Linter {
description: "Check that std::filesystem is not used directly",
@@ -46,6 +49,11 @@ fn get_linter_list() -> Vec<&'static Linter> {
lint_fn: lint_std_filesystem
},
&Linter {
+ description: "Check that release note snippets are in the right folder",
+ name: "doc_release_note_snippets",
+ lint_fn: lint_doc_release_note_snippets
+ },
+ &Linter {
description: "Check that subtrees are pure subtrees",
name: "subtree",
lint_fn: lint_subtree
@@ -125,20 +133,27 @@ fn parse_lint_args(args: &[String]) -> Vec<&'static Linter> {
}
/// Return the git command
+///
+/// Lint functions should use this command, so that only files tracked by git are considered and
+/// temporary and untracked files are ignored. For example, instead of 'grep', 'git grep' should be
+/// used.
fn git() -> Command {
let mut git = Command::new("git");
git.arg("--no-pager");
git
}
-/// Return stdout
+/// Return stdout on success and a LintError on failure, when invalid UTF8 was detected or the
+/// command did not succeed.
fn check_output(cmd: &mut std::process::Command) -> Result<String, LintError> {
let out = cmd.output().expect("command error");
if !out.status.success() {
return Err(String::from_utf8_lossy(&out.stderr).to_string());
}
Ok(String::from_utf8(out.stdout)
- .map_err(|e| format!("{e}"))?
+ .map_err(|e| {
+ format!("All path names, source code, messages, and output must be valid UTF8!\n{e}")
+ })?
.trim()
.to_string())
}
@@ -185,12 +200,50 @@ fn lint_subtree() -> LintResult {
}
}
-fn lint_py_mut_arg_default() -> LintResult {
+fn lint_py_lint() -> LintResult {
let bin_name = "ruff";
- let checks = ["B006", "B008"]
- .iter()
- .map(|c| format!("--select={}", c))
- .collect::<Vec<_>>();
+ let checks = format!(
+ "--select={}",
+ [
+ "B006", // mutable-argument-default
+ "B008", // function-call-in-default-argument
+ "E101", // indentation contains mixed spaces and tabs
+ "E401", // multiple imports on one line
+ "E402", // module level import not at top of file
+ "E701", // multiple statements on one line (colon)
+ "E702", // multiple statements on one line (semicolon)
+ "E703", // statement ends with a semicolon
+ "E711", // comparison to None should be 'if cond is None:'
+ "E714", // test for object identity should be "is not"
+ "E721", // do not compare types, use "isinstance()"
+ "E722", // do not use bare 'except'
+ "E742", // do not define classes named "l", "O", or "I"
+ "E743", // do not define functions named "l", "O", or "I"
+ "F401", // module imported but unused
+ "F402", // import module from line N shadowed by loop variable
+ "F403", // 'from foo_module import *' used; unable to detect undefined names
+ "F404", // future import(s) name after other statements
+ "F405", // foo_function may be undefined, or defined from star imports: bar_module
+ "F406", // "from module import *" only allowed at module level
+ "F407", // an undefined __future__ feature name was imported
+ "F601", // dictionary key name repeated with different values
+ "F602", // dictionary key variable name repeated with different values
+ "F621", // too many expressions in an assignment with star-unpacking
+ "F631", // assertion test is a tuple, which are always True
+ "F632", // use ==/!= to compare str, bytes, and int literals
+ "F811", // redefinition of unused name from line N
+ "F821", // undefined name 'Foo'
+ "F822", // undefined name name in __all__
+ "F823", // local variable name … referenced before assignment
+ "F841", // local variable 'foo' is assigned to but never used
+ "W191", // indentation contains tabs
+ "W291", // trailing whitespace
+ "W292", // no newline at end of file
+ "W293", // blank line contains whitespace
+ "W605", // invalid escape sequence "x"
+ ]
+ .join(",")
+ );
let files = check_output(
git()
.args(["ls-files", "--", "*.py"])
@@ -198,7 +251,7 @@ fn lint_py_mut_arg_default() -> LintResult {
)?;
let mut cmd = Command::new(bin_name);
- cmd.arg("check").args(checks).args(files.lines());
+ cmd.args(["check", &checks]).args(files.lines());
match cmd.status() {
Ok(status) if status.success() => Ok(()),
@@ -238,6 +291,30 @@ fs:: namespace, which has unsafe filesystem functions marked as deleted.
}
}
+fn lint_doc_release_note_snippets() -> LintResult {
+ let non_release_notes = check_output(git().args([
+ "ls-files",
+ "--",
+ "doc/release-notes/",
+ ":(exclude)doc/release-notes/*.*.md", // Assume that at least one dot implies a proper release note
+ ]))?;
+ if non_release_notes.is_empty() {
+ Ok(())
+ } else {
+ Err(format!(
+ r#"
+{}
+^^^
+Release note snippets and other docs must be put into the doc/ folder directly.
+
+The doc/release-notes/ folder is for archived release notes of previous releases only. Snippets are
+expected to follow the naming "/doc/release-notes-<PR number>.md".
+ "#,
+ non_release_notes
+ ))
+ }
+}
+
/// Return the pathspecs for whitespace related excludes
fn get_pathspecs_exclude_whitespace() -> Vec<String> {
let mut list = get_pathspecs_exclude_subtrees();
@@ -318,7 +395,7 @@ Please add any false positives, such as subtrees, or externally sourced files to
}
fn lint_includes_build_config() -> LintResult {
- let config_path = "./cmake/bitcoin-config.h.in";
+ let config_path = "./cmake/bitcoin-build-config.h.in";
let defines_regex = format!(
r"^\s*(?!//).*({})",
check_output(Command::new("grep").args(["define", "--", config_path]))
@@ -352,7 +429,7 @@ fn lint_includes_build_config() -> LintResult {
])
.args(get_pathspecs_exclude_subtrees())
.args([
- // These are exceptions which don't use bitcoin-config.h, rather the Makefile.am adds
+ // These are exceptions which don't use bitcoin-build-config.h, rather CMakeLists.txt adds
// these cppflags manually.
":(exclude)src/crypto/sha256_arm_shani.cpp",
":(exclude)src/crypto/sha256_avx2.cpp",
@@ -370,9 +447,9 @@ fn lint_includes_build_config() -> LintResult {
"--files-with-matches"
},
if mode {
- "^#include <config/bitcoin-config.h> // IWYU pragma: keep$"
+ "^#include <bitcoin-build-config.h> // IWYU pragma: keep$"
} else {
- "#include <config/bitcoin-config.h>" // Catch redundant includes with and without the IWYU pragma
+ "#include <bitcoin-build-config.h>" // Catch redundant includes with and without the IWYU pragma
},
"--",
])
@@ -386,11 +463,11 @@ fn lint_includes_build_config() -> LintResult {
return Err(format!(
r#"
^^^
-One or more files use a symbol declared in the bitcoin-config.h header. However, they are not
+One or more files use a symbol declared in the bitcoin-build-config.h header. However, they are not
including the header. This is problematic, because the header may or may not be indirectly
included. If the indirect include were to be intentionally or accidentally removed, the build could
still succeed, but silently be buggy. For example, a slower fallback algorithm could be picked,
-even though bitcoin-config.h indicates that a faster feature is available and should be used.
+even though bitcoin-build-config.h indicates that a faster feature is available and should be used.
If you are unsure which symbol is used, you can find it with this command:
git grep --perl-regexp '{}' -- file_name
@@ -398,7 +475,7 @@ git grep --perl-regexp '{}' -- file_name
Make sure to include it with the IWYU pragma. Otherwise, IWYU may falsely instruct to remove the
include again.
-#include <config/bitcoin-config.h> // IWYU pragma: keep
+#include <bitcoin-build-config.h> // IWYU pragma: keep
"#,
defines_regex
));
@@ -407,7 +484,7 @@ include again.
if redundant {
return Err(r#"
^^^
-None of the files use a symbol declared in the bitcoin-config.h header. However, they are including
+None of the files use a symbol declared in the bitcoin-build-config.h header. However, they are including
the header. Consider removing the unused include.
"#
.to_string());
diff --git a/test/util/test_runner.py b/test/util/test_runner.py
index 1cd368f6f4..cac184ca30 100755
--- a/test/util/test_runner.py
+++ b/test/util/test_runner.py
@@ -5,7 +5,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for bitcoin utils.
-Runs automatically during `make check`.
+Runs automatically during `ctest --test-dir build/`.
Can also be run manually."""
@@ -83,13 +83,11 @@ def bctest(testDir, testObj, buildenv):
execrun = [execprog] + execargs
# Read the input data (if there is any)
- stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
with open(filename, encoding="utf8") as f:
inputData = f.read()
- stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
@@ -112,9 +110,8 @@ def bctest(testDir, testObj, buildenv):
raise Exception
# Run the test
- proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
try:
- outs = proc.communicate(input=inputData)
+ res = subprocess.run(execrun, capture_output=True, text=True, input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
@@ -123,9 +120,9 @@ def bctest(testDir, testObj, buildenv):
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
- a_parsed = parse_output(outs[0], outputType)
+ a_parsed = parse_output(res.stdout, outputType)
except Exception as e:
- logging.error('Error parsing command output as %s: %s' % (outputType, e))
+ logging.error(f"Error parsing command output as {outputType}: '{str(e)}'; res: {str(res)}")
raise
try:
b_parsed = parse_output(outputData, outputType)
@@ -134,13 +131,13 @@ def bctest(testDir, testObj, buildenv):
raise
# Compare data
if a_parsed != b_parsed:
- logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
+ logging.error(f"Output data mismatch for {outputFn} (format {outputType}); res: {str(res)}")
data_mismatch = True
# Compare formatting
- if outs[0] != outputData:
- error_message = "Output formatting mismatch for " + outputFn + ":\n"
+ if res.stdout != outputData:
+ error_message = f"Output formatting mismatch for {outputFn}:\nres: {str(res)}\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
- outs[0].splitlines(True),
+ res.stdout.splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
@@ -152,8 +149,8 @@ def bctest(testDir, testObj, buildenv):
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
- if proc.returncode != wantRC:
- logging.error("Return code mismatch for " + outputFn)
+ if res.returncode != wantRC:
+ logging.error(f"Return code mismatch for {outputFn}; res: {str(res)}")
raise Exception
if "error_txt" in testObj:
@@ -164,8 +161,8 @@ def bctest(testDir, testObj, buildenv):
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
- if want_error not in outs[1]:
- logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
+ if want_error not in res.stderr:
+ logging.error(f"Error mismatch:\nExpected: {want_error}\nReceived: {res.stderr.rstrip()}\nres: {str(res)}")
raise Exception
def parse_output(a, fmt):