aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml63
-rw-r--r--.cirrus.yml121
-rw-r--r--.editorconfig2
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--Makefile.am17
-rw-r--r--REVIEWERS4
-rw-r--r--build-aux/m4/bitcoin_qt.m42
-rw-r--r--build-aux/m4/bitcoin_runtime_lib.m442
-rw-r--r--build_msvc/README.md13
-rw-r--r--build_msvc/bitcoin_config.h141
-rw-r--r--build_msvc/common.qt.init.vcxproj2
-rw-r--r--build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj2
-rwxr-xr-xbuild_msvc/msvc-autogen.py2
-rw-r--r--build_msvc/test_bitcoin/test_bitcoin.vcxproj1
-rwxr-xr-xci/lint/04_install.sh1
-rwxr-xr-xci/lint/06_script.sh13
-rwxr-xr-xci/test/00_setup_env.sh2
-rwxr-xr-xci/test/00_setup_env_android.sh2
-rwxr-xr-xci/test/00_setup_env_mac.sh2
-rwxr-xr-xci/test/00_setup_env_native_fuzz.sh2
-rwxr-xr-xci/test/00_setup_env_native_multiprocess.sh1
-rwxr-xr-xci/test/00_setup_env_native_qt5.sh2
-rwxr-xr-xci/test/00_setup_env_win64.sh6
-rw-r--r--configure.ac60
-rw-r--r--contrib/README.md10
-rw-r--r--contrib/builder-keys/README.md (renamed from contrib/gitian-keys/README.md)16
-rw-r--r--contrib/builder-keys/keys.txt (renamed from contrib/gitian-keys/keys.txt)3
-rw-r--r--contrib/devtools/README.md6
-rwxr-xr-xcontrib/devtools/symbol-check.py12
-rwxr-xr-xcontrib/devtools/test-security-check.py31
-rwxr-xr-xcontrib/devtools/test-symbol-check.py54
-rwxr-xr-xcontrib/devtools/utils.py22
-rwxr-xr-xcontrib/gitian-build.py263
-rw-r--r--contrib/gitian-descriptors/assign_DISTNAME12
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml167
-rw-r--r--contrib/gitian-descriptors/gitian-osx-signer.yml53
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml155
-rw-r--r--contrib/gitian-descriptors/gitian-win-signer.yml42
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml157
-rw-r--r--contrib/guix/INSTALL.md801
-rw-r--r--contrib/guix/README.md419
-rwxr-xr-xcontrib/guix/guix-attest31
-rwxr-xr-xcontrib/guix/guix-build31
-rwxr-xr-xcontrib/guix/guix-codesign18
-rwxr-xr-xcontrib/guix/guix-verify34
-rwxr-xr-xcontrib/guix/libexec/build.sh23
-rwxr-xr-xcontrib/guix/libexec/codesign.sh1
-rw-r--r--contrib/guix/libexec/prelude.bash2
-rw-r--r--contrib/guix/manifest.scm53
-rw-r--r--contrib/guix/patches/binutils-mingw-w64-disable-flags.patch171
-rw-r--r--contrib/guix/patches/gcc-8-sort-libtool-find-output.patch8
-rw-r--r--contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch62
-rw-r--r--contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch100
-rw-r--r--contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include__-to-include-asm-syscalls.h.patch72
-rw-r--r--contrib/guix/patches/glibc-ldd-x86_64.patch10
-rw-r--r--contrib/guix/patches/glibc-versioned-locpath.patch240
-rw-r--r--contrib/guix/patches/nsis-SConstruct-sde-support.patch3
-rwxr-xr-xcontrib/install_db4.sh8
-rwxr-xr-xcontrib/linearize/linearize-data.py3
-rw-r--r--contrib/macdeploy/README.md15
-rwxr-xr-xcontrib/macdeploy/macdeployqtplus121
-rw-r--r--contrib/seeds/nodes_main.txt26
-rwxr-xr-xcontrib/signet/getcoins.py52
-rwxr-xr-xcontrib/signet/miner5
-rw-r--r--contrib/tracing/README.md241
-rwxr-xr-xcontrib/tracing/connectblock_benchmark.bt150
-rwxr-xr-xcontrib/tracing/log_p2p_traffic.bt28
-rwxr-xr-xcontrib/tracing/log_raw_p2p_msgs.py180
-rwxr-xr-xcontrib/tracing/p2p_monitor.py250
-rw-r--r--contrib/verify-commits/README.md2
-rwxr-xr-xcontrib/verifybinaries/verify.py2
-rwxr-xr-xcontrib/zmq/zmq_sub.py11
-rw-r--r--depends/Makefile3
-rw-r--r--depends/README.md8
-rw-r--r--depends/packages/bdb.mk2
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--depends/packages/libevent.mk4
-rw-r--r--depends/packages/native_cctools.mk4
-rw-r--r--depends/packages/native_clang.mk6
-rw-r--r--depends/packages/qt.mk56
-rw-r--r--depends/packages/sqlite.mk4
-rw-r--r--depends/packages/zeromq.mk2
-rw-r--r--depends/patches/qt/drop_lrelease_dependency.patch20
-rw-r--r--depends/patches/qt/fix_android_pch.patch2
-rw-r--r--depends/patches/qt/fix_android_qmake_conf.patch10
-rw-r--r--depends/patches/qt/mac-qmake.conf1
-rw-r--r--depends/patches/qt/qt.pro16
-rw-r--r--depends/patches/qt/qttools_src.pro6
-rw-r--r--depends/patches/qt/support_new_android_ndks.patch122
-rw-r--r--doc/Doxyfile.in4
-rw-r--r--doc/README.md2
-rw-r--r--doc/benchmarking.md27
-rw-r--r--doc/bitcoin-conf.md2
-rw-r--r--doc/build-openbsd.md15
-rw-r--r--doc/build-unix.md17
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/descriptors.md2
-rw-r--r--doc/developer-notes.md20
-rw-r--r--doc/files.md2
-rw-r--r--doc/fuzzing.md7
-rw-r--r--doc/gitian-building.md4
-rw-r--r--doc/i2p.md77
-rw-r--r--doc/managing-wallets.md125
-rw-r--r--doc/release-notes.md143
-rw-r--r--doc/release-process.md285
-rw-r--r--doc/tor.md10
-rw-r--r--doc/tracing.md266
-rw-r--r--doc/translation_process.md13
-rwxr-xr-xshare/rpcauth/rpcauth.py3
-rw-r--r--src/.clang-tidy2
-rw-r--r--src/Makefile.am15
-rw-r--r--src/Makefile.bench.include1
-rw-r--r--src/Makefile.leveldb.include1
-rw-r--r--src/Makefile.qt.include7
-rw-r--r--src/Makefile.test.include10
-rw-r--r--src/addrdb.cpp77
-rw-r--r--src/addrdb.h54
-rw-r--r--src/addrman.cpp389
-rw-r--r--src/addrman.h475
-rw-r--r--src/banman.cpp2
-rw-r--r--src/banman.h2
-rw-r--r--src/bench/addrman.cpp18
-rw-r--r--src/bench/bench.h18
-rw-r--r--src/bench/bench_bitcoin.cpp8
-rw-r--r--src/bench/coin_selection.cpp13
-rw-r--r--src/bench/peer_eviction.cpp157
-rw-r--r--src/bench/verify_script.cpp2
-rw-r--r--src/bench/wallet_balance.cpp5
-rw-r--r--src/bitcoin-cli-res.rc6
-rw-r--r--src/bitcoin-cli.cpp189
-rw-r--r--src/bitcoin-tx-res.rc6
-rw-r--r--src/bitcoin-tx.cpp11
-rw-r--r--src/bitcoin-util-res.rc6
-rw-r--r--src/bitcoin-wallet-res.rc6
-rw-r--r--src/bitcoind-res.rc6
-rw-r--r--src/chainparams.cpp54
-rw-r--r--src/chainparams.h11
-rw-r--r--src/chainparamsbase.cpp2
-rw-r--r--src/chainparamsseeds.h24
-rw-r--r--src/clientversion.cpp9
-rw-r--r--src/clientversion.h1
-rw-r--r--src/consensus/params.h14
-rw-r--r--src/consensus/tx_verify.cpp2
-rw-r--r--src/consensus/tx_verify.h4
-rw-r--r--src/crypto/chacha_poly_aead.cpp5
-rw-r--r--src/deploymentstatus.cpp17
-rw-r--r--src/deploymentstatus.h2
-rw-r--r--src/dummywallet.cpp1
-rw-r--r--src/fs.cpp12
-rw-r--r--src/hash.cpp2
-rw-r--r--src/httprpc.cpp6
-rw-r--r--src/i2p.cpp11
-rw-r--r--src/index/coinstatsindex.cpp187
-rw-r--r--src/index/coinstatsindex.h16
-rw-r--r--src/index/txindex.cpp2
-rw-r--r--src/init.cpp165
-rw-r--r--src/init.h3
-rw-r--r--src/interfaces/chain.h16
-rw-r--r--src/interfaces/ipc.h7
-rw-r--r--src/interfaces/wallet.h5
-rw-r--r--src/ipc/capnp/context.h23
-rw-r--r--src/ipc/capnp/protocol.cpp7
-rw-r--r--src/ipc/context.h19
-rw-r--r--src/ipc/interfaces.cpp1
-rw-r--r--src/ipc/protocol.h5
-rw-r--r--src/key.cpp3
-rw-r--r--src/key.h13
-rw-r--r--src/logging.cpp27
-rw-r--r--src/logging.h5
-rw-r--r--src/logging/timer.h12
-rw-r--r--src/miner.cpp8
-rw-r--r--src/net.cpp165
-rw-r--r--src/net.h35
-rw-r--r--src/net_permissions.h3
-rw-r--r--src/net_processing.cpp299
-rw-r--r--src/net_processing.h8
-rw-r--r--src/net_types.cpp65
-rw-r--r--src/net_types.h47
-rw-r--r--src/netaddress.cpp2
-rw-r--r--src/netaddress.h832
-rw-r--r--src/node/blockstorage.cpp2
-rw-r--r--src/node/coinstats.h26
-rw-r--r--src/node/interfaces.cpp13
-rw-r--r--src/node/transaction.cpp155
-rw-r--r--src/node/transaction.h20
-rw-r--r--src/outputtype.cpp17
-rw-r--r--src/outputtype.h3
-rw-r--r--src/policy/rbf.cpp35
-rw-r--r--src/policy/rbf.h19
-rw-r--r--src/protocol.h13
-rw-r--r--src/pubkey.cpp20
-rw-r--r--src/pubkey.h5
-rw-r--r--src/qt/bitcoin.cpp163
-rw-r--r--src/qt/bitcoin.h34
-rw-r--r--src/qt/bitcoingui.cpp26
-rw-r--r--src/qt/clientmodel.cpp4
-rw-r--r--src/qt/createwalletdialog.cpp5
-rw-r--r--src/qt/createwalletdialog.h1
-rw-r--r--src/qt/forms/optionsdialog.ui80
-rw-r--r--src/qt/guiutil.cpp2
-rw-r--r--src/qt/initexecutor.cpp66
-rw-r--r--src/qt/initexecutor.h46
-rw-r--r--src/qt/locale/bitcoin_en.ts8
-rw-r--r--src/qt/optionsdialog.cpp1
-rw-r--r--src/qt/optionsmodel.cpp11
-rw-r--r--src/qt/optionsmodel.h3
-rw-r--r--src/qt/overviewpage.cpp19
-rw-r--r--src/qt/peertablemodel.cpp14
-rw-r--r--src/qt/peertablemodel.h7
-rw-r--r--src/qt/peertablesortproxy.cpp4
-rw-r--r--src/qt/psbtoperationsdialog.cpp33
-rw-r--r--src/qt/recentrequeststablemodel.cpp2
-rw-r--r--src/qt/recentrequeststablemodel.h6
-rw-r--r--src/qt/rpcconsole.cpp16
-rw-r--r--src/qt/sendcoinsentry.cpp4
-rw-r--r--src/qt/test/addressbooktests.cpp7
-rw-r--r--src/qt/test/apptests.cpp17
-rw-r--r--src/qt/test/test_main.cpp1
-rw-r--r--src/qt/test/wallettests.cpp7
-rw-r--r--src/qt/transactionfilterproxy.cpp19
-rw-r--r--src/qt/transactionfilterproxy.h13
-rw-r--r--src/qt/transactiontablemodel.cpp4
-rw-r--r--src/qt/transactionview.cpp14
-rw-r--r--src/qt/walletframe.cpp54
-rw-r--r--src/qt/walletframe.h4
-rw-r--r--src/qt/walletview.cpp135
-rw-r--r--src/qt/walletview.h23
-rw-r--r--src/qt/winshutdownmonitor.h2
-rw-r--r--src/rest.cpp1
-rw-r--r--src/rpc/blockchain.cpp86
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/mining.cpp5
-rw-r--r--src/rpc/misc.cpp20
-rw-r--r--src/rpc/net.cpp14
-rw-r--r--src/rpc/rawtransaction.cpp15
-rw-r--r--src/rpc/rawtransaction_util.cpp9
-rw-r--r--src/rpc/rawtransaction_util.h3
-rw-r--r--src/script/descriptor.cpp8
-rw-r--r--src/script/interpreter.cpp14
-rw-r--r--src/script/interpreter.h14
-rw-r--r--src/script/script.h9
-rw-r--r--src/script/sign.cpp66
-rw-r--r--src/script/sign.h7
-rw-r--r--src/script/signingprovider.h24
-rw-r--r--src/script/standard.cpp1
-rw-r--r--src/script/standard.h9
-rw-r--r--src/secp256k1/.cirrus.yml202
-rw-r--r--src/secp256k1/.gitignore9
-rw-r--r--src/secp256k1/Makefile.am22
-rw-r--r--src/secp256k1/README.md4
-rw-r--r--src/secp256k1/build-aux/m4/bitcoin_secp.m416
-rwxr-xr-xsrc/secp256k1/ci/cirrus.sh33
-rw-r--r--src/secp256k1/ci/linux-debian.Dockerfile18
-rw-r--r--src/secp256k1/configure.ac120
-rw-r--r--src/secp256k1/contrib/lax_der_parsing.c5
-rw-r--r--src/secp256k1/contrib/lax_der_parsing.h6
-rw-r--r--src/secp256k1/contrib/lax_der_privatekey_parsing.c3
-rw-r--r--src/secp256k1/contrib/lax_der_privatekey_parsing.h6
-rw-r--r--src/secp256k1/include/secp256k1.h60
-rw-r--r--src/secp256k1/include/secp256k1_extrakeys.h21
-rw-r--r--src/secp256k1/include/secp256k1_schnorrsig.h115
-rw-r--r--src/secp256k1/obj/.gitignore0
-rw-r--r--src/secp256k1/src/bench_ecdh.c4
-rw-r--r--src/secp256k1/src/bench_ecmult.c224
-rw-r--r--src/secp256k1/src/bench_internal.c4
-rw-r--r--src/secp256k1/src/bench_recover.c4
-rw-r--r--src/secp256k1/src/bench_schnorrsig.c19
-rw-r--r--src/secp256k1/src/bench_sign.c2
-rw-r--r--src/secp256k1/src/bench_verify.c2
-rw-r--r--src/secp256k1/src/ecdsa_impl.h2
-rw-r--r--src/secp256k1/src/ecmult.h1
-rw-r--r--src/secp256k1/src/ecmult_gen.h1
-rw-r--r--src/secp256k1/src/gen_context.c8
-rw-r--r--src/secp256k1/src/group_impl.h12
-rw-r--r--src/secp256k1/src/modules/ecdh/main_impl.h4
-rw-r--r--src/secp256k1/src/modules/extrakeys/main_impl.h30
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h2
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_impl.h40
-rw-r--r--src/secp256k1/src/modules/recovery/main_impl.h2
-rw-r--r--src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h2
-rw-r--r--src/secp256k1/src/modules/schnorrsig/main_impl.h69
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h26
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_impl.h235
-rw-r--r--src/secp256k1/src/secp256k1.c49
-rw-r--r--src/secp256k1/src/testrand_impl.h2
-rw-r--r--src/secp256k1/src/tests.c182
-rw-r--r--src/secp256k1/src/tests_exhaustive.c9
-rw-r--r--src/secp256k1/src/valgrind_ctime_test.c12
-rw-r--r--src/signet.cpp2
-rw-r--r--src/sync.cpp11
-rw-r--r--src/sync.h19
-rw-r--r--src/test/addrman_tests.cpp443
-rw-r--r--src/test/bip32_tests.cpp34
-rw-r--r--src/test/crypto_tests.cpp4
-rw-r--r--src/test/denialofservice_tests.cpp8
-rw-r--r--src/test/fuzz/addrdb.cpp37
-rw-r--r--src/test/fuzz/addrman.cpp249
-rw-r--r--src/test/fuzz/banman.cpp14
-rw-r--r--src/test/fuzz/blockfilter.cpp5
-rw-r--r--src/test/fuzz/coins_view.cpp2
-rw-r--r--src/test/fuzz/connman.cpp8
-rw-r--r--src/test/fuzz/crypto.cpp7
-rw-r--r--src/test/fuzz/data_stream.cpp2
-rw-r--r--src/test/fuzz/deserialize.cpp279
-rw-r--r--src/test/fuzz/fuzz.h7
-rw-r--r--src/test/fuzz/integer.cpp26
-rw-r--r--src/test/fuzz/key_io.cpp11
-rw-r--r--src/test/fuzz/kitchen_sink.cpp8
-rw-r--r--src/test/fuzz/multiplication_overflow.cpp12
-rw-r--r--src/test/fuzz/net.cpp6
-rw-r--r--src/test/fuzz/netaddress.cpp2
-rw-r--r--src/test/fuzz/parse_numbers.cpp3
-rw-r--r--src/test/fuzz/prevector.cpp3
-rw-r--r--src/test/fuzz/process_message.cpp12
-rw-r--r--src/test/fuzz/rolling_bloom_filter.cpp14
-rw-r--r--src/test/fuzz/script.cpp40
-rw-r--r--src/test/fuzz/script_sign.cpp3
-rw-r--r--src/test/fuzz/string.cpp3
-rw-r--r--src/test/fuzz/system.cpp3
-rw-r--r--src/test/fuzz/tx_pool.cpp6
-rw-r--r--src/test/fuzz/util.cpp194
-rw-r--r--src/test/fuzz/util.h189
-rw-r--r--src/test/logging_tests.cpp2
-rw-r--r--src/test/miner_tests.cpp62
-rw-r--r--src/test/net_peer_eviction_tests.cpp22
-rw-r--r--src/test/net_tests.cpp179
-rw-r--r--src/test/script_tests.cpp16
-rw-r--r--src/test/serfloat_tests.cpp13
-rw-r--r--src/test/sigopcount_tests.cpp4
-rw-r--r--src/test/streams_tests.cpp2
-rw-r--r--src/test/transaction_tests.cpp133
-rw-r--r--src/test/txvalidationcache_tests.cpp11
-rw-r--r--src/test/util/net.cpp25
-rw-r--r--src/test/util/net.h2
-rw-r--r--src/test/util/setup_common.cpp12
-rw-r--r--src/test/util/wallet.cpp3
-rw-r--r--src/test/util_tests.cpp129
-rw-r--r--src/test/validation_block_tests.cpp18
-rw-r--r--src/test/validation_chainstate_tests.cpp3
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp8
-rw-r--r--src/test/validation_flush_tests.cpp25
-rw-r--r--src/tinyformat.h10
-rw-r--r--src/torcontrol.cpp23
-rw-r--r--src/txmempool.cpp116
-rw-r--r--src/txmempool.h42
-rw-r--r--src/txorphanage.h7
-rw-r--r--src/util/epochguard.h10
-rw-r--r--src/util/getuniquepath.cpp4
-rw-r--r--src/util/hasher.h4
-rw-r--r--src/util/moneystr.cpp29
-rw-r--r--src/util/moneystr.h3
-rw-r--r--src/util/rbf.h11
-rw-r--r--src/util/settings.cpp8
-rw-r--r--src/util/string.h8
-rw-r--r--src/util/system.cpp9
-rw-r--r--src/util/system.h2
-rw-r--r--src/util/translation.h6
-rw-r--r--src/validation.cpp327
-rw-r--r--src/validation.h86
-rw-r--r--src/wallet/coinselection.cpp31
-rw-r--r--src/wallet/coinselection.h15
-rw-r--r--src/wallet/context.h16
-rw-r--r--src/wallet/feebumper.cpp14
-rw-r--r--src/wallet/init.cpp1
-rw-r--r--src/wallet/interfaces.cpp67
-rw-r--r--src/wallet/load.cpp64
-rw-r--r--src/wallet/load.h13
-rw-r--r--src/wallet/receive.cpp229
-rw-r--r--src/wallet/receive.h44
-rw-r--r--src/wallet/rpcdump.cpp33
-rw-r--r--src/wallet/rpcwallet.cpp237
-rw-r--r--src/wallet/scriptpubkeyman.cpp37
-rw-r--r--src/wallet/scriptpubkeyman.h33
-rw-r--r--src/wallet/spend.cpp238
-rw-r--r--src/wallet/spend.h83
-rw-r--r--src/wallet/test/coinselector_tests.cpp166
-rw-r--r--src/wallet/test/db_tests.cpp4
-rw-r--r--src/wallet/test/init_test_fixture.cpp4
-rw-r--r--src/wallet/test/psbt_wallet_tests.cpp4
-rw-r--r--src/wallet/test/spend_tests.cpp62
-rw-r--r--src/wallet/test/util.cpp38
-rw-r--r--src/wallet/test/util.h19
-rw-r--r--src/wallet/test/wallet_tests.cpp106
-rw-r--r--src/wallet/transaction.h73
-rw-r--r--src/wallet/wallet.cpp395
-rw-r--r--src/wallet/wallet.h158
-rw-r--r--src/wallet/walletdb.cpp6
-rw-r--r--src/wallet/walletdb.h3
-rw-r--r--test/README.md19
-rw-r--r--test/config.ini.in2
-rw-r--r--test/functional/data/invalid_txs.py48
-rwxr-xr-xtest/functional/feature_addrman.py87
-rwxr-xr-xtest/functional/feature_anchors.py4
-rwxr-xr-xtest/functional/feature_asmap.py14
-rwxr-xr-xtest/functional/feature_backwards_compatibility.py9
-rwxr-xr-xtest/functional/feature_bind_extra.py95
-rwxr-xr-xtest/functional/feature_block.py54
-rwxr-xr-xtest/functional/feature_blocksdir.py4
-rwxr-xr-xtest/functional/feature_cltv.py18
-rwxr-xr-xtest/functional/feature_coinstatsindex.py14
-rwxr-xr-xtest/functional/feature_config_args.py36
-rwxr-xr-xtest/functional/feature_csv_activation.py4
-rwxr-xr-xtest/functional/feature_dbcrash.py40
-rwxr-xr-xtest/functional/feature_dersig.py11
-rwxr-xr-xtest/functional/feature_fee_estimation.py21
-rwxr-xr-xtest/functional/feature_filelock.py8
-rwxr-xr-xtest/functional/feature_help.py6
-rwxr-xr-xtest/functional/feature_loadblock.py24
-rwxr-xr-xtest/functional/feature_logging.py12
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py4
-rwxr-xr-xtest/functional/feature_minchainwork.py10
-rwxr-xr-xtest/functional/feature_notifications.py12
-rwxr-xr-xtest/functional/feature_nulldummy.py25
-rwxr-xr-xtest/functional/feature_presegwit_node_upgrade.py52
-rwxr-xr-xtest/functional/feature_proxy.py28
-rwxr-xr-xtest/functional/feature_pruning.py45
-rwxr-xr-xtest/functional/feature_rbf.py169
-rwxr-xr-xtest/functional/feature_segwit.py85
-rwxr-xr-xtest/functional/feature_settings.py2
-rwxr-xr-xtest/functional/feature_taproot.py43
-rwxr-xr-xtest/functional/feature_versionbits_warning.py4
-rwxr-xr-xtest/functional/interface_bitcoin_cli.py155
-rwxr-xr-xtest/functional/interface_http.py20
-rwxr-xr-xtest/functional/interface_rest.py70
-rwxr-xr-xtest/functional/interface_rpc.py2
-rwxr-xr-xtest/functional/interface_zmq.py14
-rwxr-xr-xtest/functional/mempool_accept.py20
-rwxr-xr-xtest/functional/mempool_accept_wtxid.py128
-rwxr-xr-xtest/functional/mempool_compatibility.py8
-rwxr-xr-xtest/functional/mempool_limit.py2
-rwxr-xr-xtest/functional/mempool_package_limits.py475
-rwxr-xr-xtest/functional/mempool_package_onemore.py4
-rwxr-xr-xtest/functional/mempool_packages.py63
-rwxr-xr-xtest/functional/mempool_reorg.py2
-rwxr-xr-xtest/functional/mempool_unbroadcast.py6
-rwxr-xr-xtest/functional/mempool_updatefromblock.py11
-rwxr-xr-xtest/functional/mining_basic.py25
-rwxr-xr-xtest/functional/mining_getblocktemplate_longpoll.py6
-rwxr-xr-xtest/functional/mining_prioritisetransaction.py10
-rwxr-xr-xtest/functional/p2p_addr_relay.py179
-rwxr-xr-xtest/functional/p2p_addrfetch.py79
-rwxr-xr-xtest/functional/p2p_addrv2_relay.py28
-rwxr-xr-xtest/functional/p2p_dns_seeds.py129
-rwxr-xr-xtest/functional/p2p_i2p_ports.py43
-rwxr-xr-xtest/functional/p2p_invalid_messages.py8
-rwxr-xr-xtest/functional/p2p_invalid_tx.py6
-rwxr-xr-xtest/functional/p2p_permissions.py2
-rwxr-xr-xtest/functional/p2p_segwit.py369
-rwxr-xr-xtest/functional/p2p_unrequested_blocks.py2
-rwxr-xr-xtest/functional/rpc_addresses_deprecation.py3
-rwxr-xr-xtest/functional/rpc_blockchain.py95
-rwxr-xr-xtest/functional/rpc_createmultisig.py5
-rwxr-xr-xtest/functional/rpc_decodescript.py13
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py69
-rwxr-xr-xtest/functional/rpc_misc.py16
-rwxr-xr-xtest/functional/rpc_packages.py59
-rwxr-xr-xtest/functional/rpc_rawtransaction.py625
-rwxr-xr-xtest/functional/rpc_signmessagewithprivkey.py (renamed from test/functional/rpc_signmessage.py)27
-rwxr-xr-xtest/functional/rpc_signrawtransaction.py23
-rw-r--r--test/functional/test_framework/address.py10
-rw-r--r--test/functional/test_framework/bdb.py8
-rw-r--r--test/functional/test_framework/blocktools.py31
-rw-r--r--test/functional/test_framework/coverage.py21
-rwxr-xr-xtest/functional/test_framework/messages.py57
-rw-r--r--test/functional/test_framework/netutil.py3
-rwxr-xr-xtest/functional/test_framework/p2p.py1
-rwxr-xr-xtest/functional/test_framework/script_util.py7
-rwxr-xr-xtest/functional/test_framework/test_framework.py30
-rwxr-xr-xtest/functional/test_framework/test_node.py7
-rw-r--r--test/functional/test_framework/util.py49
-rw-r--r--test/functional/test_framework/wallet.py80
-rwxr-xr-xtest/functional/test_framework/wallet_util.py41
-rwxr-xr-xtest/functional/test_runner.py20
-rwxr-xr-xtest/functional/wallet_backup.py45
-rwxr-xr-xtest/functional/wallet_basic.py2
-rwxr-xr-xtest/functional/wallet_descriptor.py2
-rwxr-xr-xtest/functional/wallet_dump.py9
-rwxr-xr-xtest/functional/wallet_importdescriptors.py4
-rwxr-xr-xtest/functional/wallet_listdescriptors.py35
-rwxr-xr-xtest/functional/wallet_listtransactions.py35
-rwxr-xr-xtest/functional/wallet_signmessagewithaddress.py45
-rwxr-xr-xtest/functional/wallet_upgradewallet.py15
-rwxr-xr-xtest/get_previous_releases.py76
-rwxr-xr-xtest/lint/lint-circular-dependencies.sh6
-rwxr-xr-xtest/lint/lint-locale-dependence.sh2
-rwxr-xr-xtest/lint/lint-python.sh2
-rwxr-xr-xtest/lint/lint-shell.sh22
-rwxr-xr-xtest/lint/lint-spelling.sh2
-rw-r--r--test/sanitizer_suppressions/ubsan1
-rwxr-xr-xtest/util/test_runner.py (renamed from test/util/bitcoin-util-test.py)3
490 files changed, 14315 insertions, 8098 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index 3ca7818eca..0000000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-version: '{branch}.{build}'
-skip_tags: true
-image: Visual Studio 2019
-configuration: Release
-platform: x64
-clone_depth: 5
-environment:
- PATH: 'C:\Python37-x64;C:\Python37-x64\Scripts;%PATH%'
- PYTHONUTF8: 1
- QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/qt51211x64_static_vs2019_16101/Qt5.12.11_x64_static_vs2019_16101.zip'
- QT_DOWNLOAD_HASH: 'cf1b58107fadbf0d9a957d14dab16cde6b6eb6936a1908472da1f967dda34a3a'
- QT_LOCAL_PATH: 'C:\Qt5.12.11_x64_static_vs2019_16101'
- VCPKG_TAG: '75522bb1f2e7d863078bcd06322348f053a9e33f'
-install:
-# Disable zmq test for now since python zmq library on Windows would cause Access violation sometimes.
-# - cmd: pip install zmq
-# The powershell block below is to set up vcpkg to install the c++ dependencies. The pseudo code is:
-# a. Checkout the vcpkg source (including port files) for the specific checkout and build the vcpkg binary,
-# b. Append a setting to the vcpkg cmake config file to only do release builds of dependencies (skipping deubg builds saves ~5 mins).
-# Note originally this block also installed the dependencies using 'vcpkg install'. Dependencies are now installed
-# as part of the msbuild command using vcpkg mainfests.
-- ps: |
- cd c:\tools\vcpkg
- $env:GIT_REDIRECT_STDERR = '2>&1' # git is writing non-errors to STDERR when doing git pull. Send to STDOUT instead.
- git -c advice.detachedHead=false checkout $env:VCPKG_TAG
- .\bootstrap-vcpkg.bat > $null
- Add-Content "C:\tools\vcpkg\triplets\$env:PLATFORM-windows-static.cmake" "set(VCPKG_BUILD_TYPE release)"
- cd "$env:APPVEYOR_BUILD_FOLDER"
-before_build:
-# Powershell block below is to download and extract the Qt static libraries. The pseudo code is:
-# a. Download the zip file with the prebuilt Qt static libraries.
-# b. Check that the downloaded file matches the expected hash.
-# c. Extract the zip file to the specific destination path expected by the msbuild projects.
-- ps: |
- Write-Host "Downloading Qt binaries.";
- Invoke-WebRequest -Uri $env:QT_DOWNLOAD_URL -Out qtdownload.zip;
- Write-Host "Qt binaries successfully downloaded, checking hash against $env:QT_DOWNLOAD_HASH...";
- if((Get-FileHash qtdownload.zip).Hash -eq $env:QT_DOWNLOAD_HASH) {
- Expand-Archive qtdownload.zip -DestinationPath $env:QT_LOCAL_PATH;
- Write-Host "Qt binary download matched the expected hash.";
- }
- else {
- Write-Host "ERROR: Qt binary download did not match the expected hash.";
- Exit-AppveyorBuild;
- }
-- cmd: python build_msvc\msvc-autogen.py
-build_script:
-- cmd: msbuild /p:TrackFileAccess=false build_msvc\bitcoin.sln /m /v:q /nologo
-after_build:
-#- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe
-test_script:
-- cmd: src\test_bitcoin.exe -l test_suite
-- cmd: src\bench_bitcoin.exe > NUL
-- ps: python test\util\bitcoin-util-test.py
-- cmd: python test\util\rpcauth-test.py
-# Fee estimation test failing on appveyor with: WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted.
-# functional tests disabled for now. See
-# https://github.com/bitcoin/bitcoin/pull/18626#issuecomment-613396202
-# https://github.com/bitcoin/bitcoin/issues/18623
-# - cmd: python test\functional\test_runner.py --ci --quiet --combinedlogslen=4000 --failfast --exclude feature_fee_estimation
-artifacts:
-#- path: bitcoin-%APPVEYOR_BUILD_VERSION%.zip
-deploy: off
diff --git a/.cirrus.yml b/.cirrus.yml
index 26bd27754f..5105e0a490 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -18,19 +18,23 @@ persistent_worker_template: &PERSISTENT_WORKER_TEMPLATE
persistent_worker: {} # https://cirrus-ci.org/guide/persistent-workers/
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
-base_template: &BASE_TEMPLATE
+filter_template: &FILTER_TEMPLATE
skip: $CIRRUS_REPO_FULL_NAME == "bitcoin-core/gui" && $CIRRUS_PR == "" # No need to run on the read-only mirror, unless it is a PR. https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution
+ stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks
+
+base_template: &BASE_TEMPLATE
+ << : *FILTER_TEMPLATE
merge_base_script:
- - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
+ # Unconditionally install git (used in fingerprint_script) and set the
+ # default git author name (used in verify-commits.py)
- bash -c "$PACKAGE_MANAGER_INSTALL git"
- - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
- git config --global user.email "ci@ci.ci"
- git config --global user.name "ci"
+ - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
+ - git fetch $CIRRUS_REPO_CLONE_URL $CIRRUS_BASE_BRANCH
- git merge FETCH_HEAD # Merge base to detect silent merge conflicts
- stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks
-global_task_template: &GLOBAL_TASK_TEMPLATE
- << : *BASE_TEMPLATE
+main_template: &MAIN_TEMPLATE
timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out
container:
# https://cirrus-ci.org/faq/#are-there-any-limits
@@ -41,9 +45,14 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
folder: "/tmp/ccache_dir"
depends_built_cache:
folder: "depends/built"
+ fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-list -1 HEAD ./depends)
ci_script:
- ./ci/test_run_all.sh
+global_task_template: &GLOBAL_TASK_TEMPLATE
+ << : *BASE_TEMPLATE
+ << : *MAIN_TEMPLATE
+
depends_sdk_cache_template: &DEPENDS_SDK_CACHE_TEMPLATE
depends_sdk_cache:
folder: "depends/sdk-sources"
@@ -53,22 +62,6 @@ compute_credits_template: &CREDITS_TEMPLATE
# Only use credits for pull requests to the main repo
use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != ""
-#task:
-# name: "Windows"
-# windows_container:
-# image: cirrusci/windowsservercore:2019
-# env:
-# CIRRUS_SHELL: powershell
-# PATH: 'C:\Python37;C:\Python37\Scripts;%PATH%'
-# PYTHONUTF8: 1
-# QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/v1.6/Qt5.9.8_x64_static_vs2019.zip'
-# QT_DOWNLOAD_HASH: '9a8c6eb20967873785057fdcd329a657c7f922b0af08c5fde105cc597dd37e21'
-# QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019'
-# VCPKG_INSTALL_PATH: 'C:\tools\vcpkg\installed'
-# VCPKG_COMMIT_ID: 'ed0df8ecc4ed7e755ea03e18aaf285fd9b4b4a74'
-# install_script:
-# - choco install python --version=3.7.7 -y
-
task:
name: 'lint [bionic]'
<< : *BASE_TEMPLATE
@@ -84,13 +77,90 @@ task:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
task:
+ name: "Win64 native [unit tests, no functional tests] [msvc]"
+ << : *FILTER_TEMPLATE
+ windows_container:
+ cpu: 4
+ memory: 16G
+ image: cirrusci/windowsservercore:visualstudio2019
+ timeout_in: 120m
+ env:
+ PATH: 'C:\jom;C:\Python39;C:\Python39\Scripts;C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin;%PATH%'
+ PYTHONUTF8: 1
+ VCPKG_TAG: '75522bb1f2e7d863078bcd06322348f053a9e33f'
+ VCPKG_FEATURE_FLAGS: 'manifests'
+ QT_DOWNLOAD_URL: 'https://download.qt.io/official_releases/qt/5.12/5.12.11/single/qt-everywhere-src-5.12.11.zip'
+ QT_LOCAL_PATH: 'C:\qt-everywhere-src-5.12.11.zip'
+ QT_SOURCE_DIR: 'C:\qt-everywhere-src-5.12.11'
+ QTBASEDIR: 'C:\Qt5.12.11_x64_static_vs2019_160900'
+ x64_NATIVE_TOOLS: '"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvars64.bat"'
+ IgnoreWarnIntDirInTempDetected: 'true'
+ merge_script:
+ - ps: git config --global user.email "ci@ci.ci"
+ - ps: git config --global user.name "ci"
+ # Windows filesystem loses the executable bit, and all of the executable
+ # files are considered "modified" now. It will break the following `git merge`
+ # command. The next two commands make git ignore this issue.
+ - ps: git config core.filemode false
+ - ps: git reset --hard
+ - ps: if ($env:CIRRUS_PR -eq $null) { exit 0; }
+ - ps: git fetch $env:CIRRUS_REPO_CLONE_URL $env:CIRRUS_BASE_BRANCH
+ # Merge base to detect silent merge conflicts.
+ - ps: git merge FETCH_HEAD
+ msvc_qt_built_cache:
+ folder: "%QTBASEDIR%"
+ reupload_on_changes: false
+ fingerprint_script:
+ - echo %QT_DOWNLOAD_URL%
+ - msbuild -version
+ populate_script:
+ - curl -L -o C:\jom.zip http://download.qt.io/official_releases/jom/jom.zip
+ - mkdir C:\jom
+ - tar -xf C:\jom.zip -C C:\jom
+ - curl -L -o %QT_LOCAL_PATH% %QT_DOWNLOAD_URL%
+ - tar -xf %QT_LOCAL_PATH% -C C:\
+ - '%x64_NATIVE_TOOLS%'
+ - cd %QT_SOURCE_DIR%
+ - mkdir build
+ - cd build
+ - ..\configure -release -silent -opensource -confirm-license -opengl desktop -no-shared -static -static-runtime -mp -qt-zlib -qt-pcre -qt-libpng -no-libjpeg -nomake examples -nomake tests -nomake tools -no-dbus -no-libudev -no-icu -no-gtk -no-opengles3 -no-angle -no-sql-sqlite -no-sql-odbc -no-sqlite -no-libudev -no-vulkan -skip qt3d -skip qtactiveqt -skip qtandroidextras -skip qtcanvas3d -skip qtcharts -skip qtconnectivity -skip qtdatavis3d -skip qtdeclarative -skip qtdoc -skip qtgamepad -skip qtgraphicaleffects -skip qtimageformats -skip qtlocation -skip qtmacextras -skip qtmultimedia -skip qtnetworkauth -skip qtpurchasing -skip qtquickcontrols -skip qtquickcontrols2 -skip qtscript -skip qtscxml -skip qtsensors -skip qtserialbus -skip qtserialport -skip qtspeech -skip qtvirtualkeyboard -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebsockets -skip qtwebview -skip qtx11extras -skip qtxmlpatterns -no-openssl -no-feature-sql -no-feature-sqlmodel -prefix %QTBASEDIR%
+ - jom
+ - jom install
+ vcpkg_cache:
+ folder: 'C:\Users\ContainerAdministrator\AppData\Local\vcpkg\archives'
+ install_python_script:
+ - choco install --yes --no-progress python3 --version=3.9.6
+ - python -VV
+ install_vcpkg_script:
+ - cd ..
+ - git clone --quiet https://github.com/microsoft/vcpkg.git
+ - cd vcpkg
+ - git -c advice.detachedHead=false checkout %VCPKG_TAG%
+ - .\bootstrap-vcpkg -disableMetrics
+ - echo set(VCPKG_BUILD_TYPE release) >> triplets\x64-windows-static.cmake
+ - .\vcpkg integrate install
+ - .\vcpkg version
+ build_script:
+ - cd %CIRRUS_WORKING_DIR%
+ - python build_msvc\msvc-autogen.py
+ - msbuild build_msvc\bitcoin.sln -property:Configuration=Release -maxCpuCount -verbosity:minimal -noLogo
+ unit_tests_script:
+ - src\test_bitcoin.exe -l test_suite
+ - src\bench_bitcoin.exe > NUL
+ - python test\util\test_runner.py
+ - python test\util\rpcauth-test.py
+
+task:
name: 'ARM [unit tests, no functional tests] [buster]'
<< : *GLOBAL_TASK_TEMPLATE
- container:
+ arm_container:
image: debian:buster
+ cpu: 2
+ memory: 8G
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
FILE_ENV: "./ci/test/00_setup_env_arm.sh"
+ QEMU_USER_CMD: "" # Disable qemu and run the test natively
task:
name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [focal]'
@@ -153,6 +223,7 @@ task:
task:
name: '[no depends, sanitizers: fuzzer,address,undefined,integer] [focal]'
+ only_if: $CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH || $CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
@@ -211,9 +282,11 @@ task:
task:
name: 'ARM64 Android APK [focal]'
<< : *DEPENDS_SDK_CACHE_TEMPLATE
+ << : *BASE_TEMPLATE
depends_sources_cache:
folder: "depends/sources"
- << : *GLOBAL_TASK_TEMPLATE
+ fingerprint_script: git rev-list -1 HEAD ./depends
+ << : *MAIN_TEMPLATE
container:
image: ubuntu:focal
env:
diff --git a/.editorconfig b/.editorconfig
index 4967e675f6..ae7e92d1c8 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -13,7 +13,7 @@ trim_trailing_whitespace = true
[*.{h,cpp,py,sh}]
indent_size = 4
-# .cirrus.yml, .appveyor.yml, .fuzzbuzz.yml, etc.
+# .cirrus.yml, .fuzzbuzz.yml, etc.
[*.yml]
indent_size = 2
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5cd4715ef0..e0d3671b07 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -81,7 +81,7 @@ facilitates social contribution, easy testing and peer review.
To contribute a patch, the workflow is as follows:
- 1. Fork repository ([only for the first time](https://help.github.com/en/articles/fork-a-repo))
+ 1. Fork repository ([only for the first time](https://docs.github.com/en/get-started/quickstart/fork-a-repo))
1. Create topic branch
1. Commit patches
@@ -182,7 +182,7 @@ for more information on helping with translations.
### Work in Progress Changes and Requests for Comments
If a pull request is not to be considered for merging (yet), please
-prefix the title with [WIP] or use [Tasks Lists](https://help.github.com/articles/basic-writing-and-formatting-syntax/#task-lists)
+prefix the title with [WIP] or use [Tasks Lists](https://docs.github.com/en/github/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#task-lists)
in the body of the pull request to indicate tasks are pending.
### Address Feedback
@@ -386,7 +386,7 @@ about:
- It may be because your code is too complex for all but a few people, and those people
may not have realized your pull request even exists. A great way to find people who
are qualified and care about the code you are touching is the
- [Git Blame feature](https://help.github.com/articles/tracing-changes-in-a-file/). Simply
+ [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply
look up who last modified the code you are changing and see if you can find
them and give them a nudge. Don't be incessant about the nudging, though.
- Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request
diff --git a/Makefile.am b/Makefile.am
index d84dab1842..ce66331910 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -3,7 +3,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Pattern rule to print variables, e.g. make print-top_srcdir
-print-%:
+print-%: FORCE
@echo '$*'='$($*)'
ACLOCAL_AMFLAGS = -I build-aux/m4
@@ -58,6 +58,7 @@ DIST_SHARE = \
BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \
$(top_srcdir)/contrib/devtools/security-check.py \
+ $(top_srcdir)/contrib/devtools/utils.py \
$(top_srcdir)/contrib/devtools/pixie.py
WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \
@@ -285,7 +286,7 @@ EXTRA_DIST += \
test/fuzz
EXTRA_DIST += \
- test/util/bitcoin-util-test.py \
+ test/util/test_runner.py \
test/util/data/bitcoin-util-test.json \
test/util/data/blanktxv1.hex \
test/util/data/blanktxv1.json \
@@ -366,14 +367,14 @@ clean-local: clean-docs
test-security-check:
if TARGET_DARWIN
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
endif
if TARGET_WINDOWS
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
+ $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
+ $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
endif
if TARGET_LINUX
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
endif
diff --git a/REVIEWERS b/REVIEWERS
index e048036e42..0caacec440 100644
--- a/REVIEWERS
+++ b/REVIEWERS
@@ -41,7 +41,6 @@
/doc/dependencies.md @fanquake
/doc/developer-notes.md @laanwj
/doc/files.md @hebasto
-/doc/gitian-building.md @laanwj
/doc/reduce-memory.md @fanquake
/doc/reduce-traffic.md @jonasschnelli
/doc/release-process.md @laanwj
@@ -77,8 +76,7 @@
/contrib/devtools/test-security-check.py @fanquake
/contrib/devtools/symbol-check.py @fanquake
-# Gitian/Guix
-/contrib/gitian-build.py @hebasto
+# Guix
/contrib/guix/ @dongcarl
# Compatibility
diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4
index 5b5a8ed16e..1e979edf0f 100644
--- a/build-aux/m4/bitcoin_qt.m4
+++ b/build-aux/m4/bitcoin_qt.m4
@@ -350,7 +350,7 @@ AC_DEFUN([_BITCOIN_QT_CHECK_STATIC_LIBS], [
PKG_CHECK_MODULES([QT_FONTDATABASE], [${qt_lib_prefix}FontDatabaseSupport${qt_lib_suffix}], [QT_LIBS="$QT_FONTDATABASE_LIBS $QT_LIBS"])
PKG_CHECK_MODULES([QT_THEME], [${qt_lib_prefix}ThemeSupport${qt_lib_suffix}], [QT_LIBS="$QT_THEME_LIBS $QT_LIBS"])
if test "x$TARGET_OS" = xlinux; then
- PKG_CHECK_MODULES([QT_INPUT], [${qt_lib_prefix}XcbQpa], [QT_LIBS="$QT_INPUT_LIBS $QT_LIBS"])
+ PKG_CHECK_MODULES([QT_INPUT], [${qt_lib_prefix}InputSupport], [QT_LIBS="$QT_INPUT_LIBS $QT_LIBS"])
PKG_CHECK_MODULES([QT_SERVICE], [${qt_lib_prefix}ServiceSupport], [QT_LIBS="$QT_SERVICE_LIBS $QT_LIBS"])
PKG_CHECK_MODULES([QT_XCBQPA], [${qt_lib_prefix}XcbQpa], [QT_LIBS="$QT_XCBQPA_LIBS $QT_LIBS"])
elif test "x$TARGET_OS" = xdarwin; then
diff --git a/build-aux/m4/bitcoin_runtime_lib.m4 b/build-aux/m4/bitcoin_runtime_lib.m4
new file mode 100644
index 0000000000..1a6922deca
--- /dev/null
+++ b/build-aux/m4/bitcoin_runtime_lib.m4
@@ -0,0 +1,42 @@
+# On some platforms clang builtin implementations
+# require compiler-rt as a runtime library to use.
+#
+# See:
+# - https://bugs.llvm.org/show_bug.cgi?id=28629
+
+m4_define([_CHECK_RUNTIME_testbody], [[
+ bool f(long long x, long long y, long long* p)
+ {
+ return __builtin_mul_overflow(x, y, p);
+ }
+ int main() { return 0; }
+]])
+
+AC_DEFUN([CHECK_RUNTIME_LIB], [
+
+ AC_LANG_PUSH([C++])
+
+ AC_MSG_CHECKING([for __builtin_mul_overflow])
+ AC_LINK_IFELSE(
+ [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])],
+ [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow])
+ ],
+ [
+ ax_check_save_flags="$LDFLAGS"
+ LDFLAGS="$LDFLAGS --rtlib=compiler-rt -lgcc_s"
+ AC_LINK_IFELSE(
+ [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])],
+ [
+ AC_MSG_RESULT([yes, with additional linker flags])
+ RUNTIME_LDFLAGS="--rtlib=compiler-rt -lgcc_s"
+ AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow])
+ ],
+ [AC_MSG_RESULT([no])])
+ LDFLAGS="$ax_check_save_flags"
+ ])
+
+ AC_LANG_POP
+ AC_SUBST([RUNTIME_LDFLAGS])
+])
diff --git a/build_msvc/README.md b/build_msvc/README.md
index 88a05644a7..c3705f6b03 100644
--- a/build_msvc/README.md
+++ b/build_msvc/README.md
@@ -39,7 +39,7 @@ In order to build Bitcoin Core a static build of Qt is required. The runtime lib
Some prebuilt x64 versions of Qt can be downloaded from [here](https://github.com/sipsorcery/qt_win_binary/releases). Please be aware these downloads are NOT officially sanctioned by Bitcoin Core and are provided for developer convenience only. They should NOT be used for builds that will be used in a production environment or with real funds.
-To determine which Qt prebuilt version to download open the `.appveyor.yml` file and note the `QT_DOWNLOAD_URL`. When extracting the zip file the destination path must be set to `C:\`. This is due to the way that Qt includes, libraries and tools use internal paths.
+To determine which Qt prebuilt version to download open the `.cirrus.yml` file and note the `QT_DOWNLOAD_URL`. When extracting the zip file the destination path must be set to `C:\`. This is due to the way that Qt includes, libraries and tools use internal paths.
To build Bitcoin Core without Qt unload or disable the `bitcoin-qt`, `libbitcoin_qt` and `test_bitcoin-qt` projects.
@@ -65,17 +65,6 @@ msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
- Alternatively, open the `build_msvc/bitcoin.sln` file in Visual Studio 2019.
-AppVeyor
----------------------
-The .appveyor.yml in the root directory is suitable to perform builds on [AppVeyor](https://www.appveyor.com/) Continuous Integration servers. The simplest way to perform an AppVeyor build is to fork Bitcoin Core and then configure a new AppVeyor Project pointing to the forked repository.
-
-For safety reasons the Bitcoin Core .appveyor.yml file has the artifact options disabled. The build will be performed but no executable files will be available. To enable artifacts on a forked repository uncomment the lines shown below:
-
-```
- #- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe
- #- path: bitcoin-%APPVEYOR_BUILD_VERSION%.zip
-```
-
Security
---------------------
[Base address randomization](https://docs.microsoft.com/en-us/cpp/build/reference/dynamicbase-use-address-space-layout-randomization?view=msvc-160) is used to make Bitcoin Core more secure. When building Bitcoin using the `build_msvc` process base address randomization can be disabled by editing `common.init.vcproj` to change `RandomizedBaseAddress` from `true` to `false` and then rebuilding the project.
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index aba5607eb6..e2930f3ea9 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -5,9 +5,6 @@
#ifndef BITCOIN_BITCOIN_CONFIG_H
#define BITCOIN_BITCOIN_CONFIG_H
-/* Define if building universal (internal helper macro) */
-/* #undef AC_APPLE_UNIVERSAL_BUILD */
-
/* Version Build */
#define CLIENT_VERSION_BUILD 0
@@ -15,7 +12,7 @@
#define CLIENT_VERSION_IS_RELEASE false
/* Major version */
-#define CLIENT_VERSION_MAJOR 21
+#define CLIENT_VERSION_MAJOR 22
/* Minor version */
#define CLIENT_VERSION_MINOR 99
@@ -30,7 +27,7 @@
#define COPYRIGHT_HOLDERS_SUBSTITUTION "Bitcoin Core"
/* Copyright year */
-#define COPYRIGHT_YEAR 2019
+#define COPYRIGHT_YEAR 2021
/* Define to 1 to enable wallet functions */
#define ENABLE_WALLET 1
@@ -59,14 +56,11 @@
/* define if the Boost::Unit_Test_Framework library is available */
#define HAVE_BOOST_UNIT_TEST_FRAMEWORK /**/
-/* Define to 1 if you have the <byteswap.h> header file. */
-/* #undef HAVE_BYTESWAP_H */
-
/* Define this symbol if the consensus lib has been built */
#define HAVE_CONSENSUS_LIB 1
-/* define if the compiler supports basic C++11 syntax */
-#define HAVE_CXX11 1
+/* define if the compiler supports basic C++17 syntax */
+#define HAVE_CXX17 1
/* Define to 1 if you have the declaration of `be16toh', and to 0 if you
don't. */
@@ -144,37 +138,12 @@
don't. */
#define HAVE_DECL_STRNLEN 1
-/* Define to 1 if you have the <dlfcn.h> header file. */
-/* #undef HAVE_DLFCN_H */
-
-/* Define to 1 if you have the <endian.h> header file. */
-/* #undef HAVE_ENDIAN_H */
-
-/* Define to 1 if the system has the `dllexport' function attribute */
-#define HAVE_FUNC_ATTRIBUTE_DLLEXPORT 1
-
-/* Define to 1 if the system has the `dllimport' function attribute */
-#define HAVE_FUNC_ATTRIBUTE_DLLIMPORT 1
-
-/* Define to 1 if the system has the `visibility' function attribute */
-#define HAVE_FUNC_ATTRIBUTE_VISIBILITY 1
-
-/* Define this symbol if the BSD getentropy system call is available */
-/* #undef HAVE_GETENTROPY */
-
-/* Define this symbol if the BSD getentropy system call is available with
- sys/random.h */
-/* #undef HAVE_GETENTROPY_RAND */
+/* Define if the dllexport attribute is supported. */
+#define HAVE_DLLEXPORT_ATTRIBUTE 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
-/* Define this symbol if you have malloc_info */
-/* #undef HAVE_MALLOC_INFO */
-
-/* Define this symbol if you have mallopt with M_ARENA_MAX */
-/* #undef HAVE_MALLOPT_ARENA_MAX */
-
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
@@ -187,18 +156,6 @@
/* Define to 1 if you have the <miniupnpc/upnperrors.h> header file. */
#define HAVE_MINIUPNPC_UPNPERRORS_H 1
-/* Define this symbol if you have MSG_DONTWAIT */
-/* #undef HAVE_MSG_DONTWAIT */
-
-/* Define this symbol if you have MSG_NOSIGNAL */
-/* #undef HAVE_MSG_NOSIGNAL */
-
-/* Define if you have POSIX threads libraries and header files. */
-//#define HAVE_PTHREAD 1
-
-/* Have PTHREAD_PRIO_INHERIT. */
-//#define HAVE_PTHREAD_PRIO_INHERIT 1
-
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
@@ -208,45 +165,18 @@
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
-/* Define to 1 if you have the `strerror_r' function. */
-/* #undef HAVE_STRERROR_R */
-
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
-/* Define this symbol if the BSD sysctl(KERN_ARND) is available */
-/* #undef HAVE_SYSCTL_ARND */
-
-/* Define to 1 if you have the <sys/endian.h> header file. */
-/* #undef HAVE_SYS_ENDIAN_H */
-
-/* Define this symbol if the Linux getrandom system call is available */
-/* #undef HAVE_SYS_GETRANDOM */
-
-/* Define to 1 if you have the <sys/prctl.h> header file. */
-/* #undef HAVE_SYS_PRCTL_H */
-
-/* Define to 1 if you have the <sys/select.h> header file. */
-/* #undef HAVE_SYS_SELECT_H */
-
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
-/* Define to 1 if you have the <unistd.h> header file. */
-//#define HAVE_UNISTD_H 1
-
-/* Define if the visibility attribute is supported. */
-#define HAVE_VISIBILITY_ATTRIBUTE 1
-
-/* Define to the sub-directory where libtool stores uninstalled libraries. */
-#define LT_OBJDIR ".libs/"
-
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "https://github.com/bitcoin/bitcoin/issues"
@@ -254,23 +184,13 @@
#define PACKAGE_NAME "Bitcoin Core"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "Bitcoin Core 21.99.0"
-
-/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "bitcoin"
+#define PACKAGE_STRING "Bitcoin Core 22.99.0"
/* Define to the home page for this package. */
#define PACKAGE_URL "https://bitcoincore.org/"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "21.99.0"
-
-/* Define to necessary symbol if this constant uses a non-standard name on
- your system. */
-/* #undef PTHREAD_CREATE_JOINABLE */
-
-/* Define this symbol if the qt platform is cocoa */
-/* #undef QT_QPA_PLATFORM_COCOA */
+#define PACKAGE_VERSION "22.99.0"
/* Define this symbol if the minimal qt platform exists */
#define QT_QPA_PLATFORM_MINIMAL 1
@@ -278,54 +198,9 @@
/* Define this symbol if the qt platform is windows */
#define QT_QPA_PLATFORM_WINDOWS 1
-/* Define this symbol if the qt platform is xcb */
-/* #undef QT_QPA_PLATFORM_XCB */
-
/* Define this symbol if qt plugins are static */
#define QT_STATICPLUGIN 1
-/* Define to 1 if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
-/* Define to 1 if strerror_r returns char *. */
-/* #undef STRERROR_R_CHAR_P */
-
-/* Define this symbol to build in assembly routines */
-//#define USE_ASM 1
-
-/* Define if dbus support should be compiled in */
-/* #undef USE_DBUS */
-
-/* Define if QR support should be compiled in */
-//#define USE_QRCODE 1
-
-/* UPnP support not compiled if undefined, otherwise value (0 or 1) determines
- default state */
-//#define USE_UPNP 0
-
-/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
- significant byte first (like Motorola and SPARC, unlike Intel). */
-#if defined AC_APPLE_UNIVERSAL_BUILD
-# if defined __BIG_ENDIAN__
-# define WORDS_BIGENDIAN 1
-# endif
-#else
-# ifndef WORDS_BIGENDIAN
-/* # undef WORDS_BIGENDIAN */
-# endif
-#endif
-
-/* Enable large inode numbers on Mac OS X 10.5. */
-#ifndef _DARWIN_USE_64_BIT_INODE
-# define _DARWIN_USE_64_BIT_INODE 1
-#endif
-
-/* Number of bits in a file offset, on hosts where this is settable. */
-#define _FILE_OFFSET_BITS 64
-
-/* Define for large files, on AIX-style hosts. */
-/* #undef _LARGE_FILES */
-
/* Windows Universal Platform constraints */
#if !defined(WINAPI_FAMILY) || (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)
/* Either a desktop application without API restrictions, or and older system
diff --git a/build_msvc/common.qt.init.vcxproj b/build_msvc/common.qt.init.vcxproj
index ce66a7ab34..df2fd2fb49 100644
--- a/build_msvc/common.qt.init.vcxproj
+++ b/build_msvc/common.qt.init.vcxproj
@@ -2,7 +2,7 @@
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Label="QtGlobals">
- <QtBaseDir>C:\Qt5.12.11_x64_static_vs2019_16101</QtBaseDir>
+ <QtBaseDir>C:\Qt5.12.11_x64_static_vs2019_160900</QtBaseDir>
<QtPluginsLibraryDir>$(QtBaseDir)\plugins</QtPluginsLibraryDir>
<QtLibraryDir>$(QtBaseDir)\lib</QtLibraryDir>
<QtIncludeDir>$(QtBaseDir)\include</QtIncludeDir>
diff --git a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
index 96bb584375..6c45d4dbd8 100644
--- a/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
+++ b/build_msvc/libbitcoin_qt/libbitcoin_qt.vcxproj
@@ -24,6 +24,7 @@
<ClCompile Include="..\..\src\qt\csvmodelwriter.cpp" />
<ClCompile Include="..\..\src\qt\editaddressdialog.cpp" />
<ClCompile Include="..\..\src\qt\guiutil.cpp" />
+ <ClCompile Include="..\..\src\qt\initexecutor.cpp" />
<ClCompile Include="..\..\src\qt\intro.cpp" />
<ClCompile Include="..\..\src\qt\modaloverlay.cpp" />
<ClCompile Include="..\..\src\qt\networkstyle.cpp" />
@@ -78,6 +79,7 @@
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_csvmodelwriter.cpp" />
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_editaddressdialog.cpp" />
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_guiutil.cpp" />
+ <ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_initexecutor.cpp" />
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_intro.cpp" />
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_modaloverlay.cpp" />
<ClCompile Include="$(GeneratedFilesOutDir)\moc\moc_networkstyle.cpp" />
diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py
index d99b17d381..a1ed935996 100755
--- a/build_msvc/msvc-autogen.py
+++ b/build_msvc/msvc-autogen.py
@@ -9,7 +9,7 @@ import argparse
from shutil import copyfile
SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src'))
-DEFAULT_PLATFORM_TOOLSET = R'v141'
+DEFAULT_PLATFORM_TOOLSET = R'v142'
libs = [
'libbitcoin_cli',
diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
index 5c4b777d51..bb1a780bfa 100644
--- a/build_msvc/test_bitcoin/test_bitcoin.vcxproj
+++ b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
@@ -16,6 +16,7 @@
<ClCompile Include="..\..\src\test\util\*.cpp" />
<ClCompile Include="..\..\src\wallet\test\*_fixture.cpp" />
<ClCompile Include="..\..\src\wallet\test\*_tests.cpp" />
+ <ClCompile Include="..\..\src\wallet\test\util.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\libbitcoinconsensus\libbitcoinconsensus.vcxproj">
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index 2c63a9efac..5587618f2d 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -13,7 +13,6 @@ update-alternatives --install /usr/bin/clang-format-diff clang-format-diff $(whi
${CI_RETRY_EXE} pip3 install codespell==2.0.0
${CI_RETRY_EXE} pip3 install flake8==3.8.3
-${CI_RETRY_EXE} pip3 install yq
${CI_RETRY_EXE} pip3 install mypy==0.781
${CI_RETRY_EXE} pip3 install vulture==2.3
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index e38cfe8eef..f7dacd8512 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -23,10 +23,15 @@ test/lint/git-subtree-check.sh src/crc32c
test/lint/check-doc.py
test/lint/lint-all.sh
-if [ "$CIRRUS_REPO_FULL_NAME" = "bitcoin/bitcoin" ] && [ -n "$CIRRUS_CRON" ]; then
- git log --merges --before="2 days ago" -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit
- ${CI_RETRY_EXE} gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $(<contrib/verify-commits/trusted-keys) &&
- ./contrib/verify-commits/verify-commits.py --clean-merge=2;
+if [ "$CIRRUS_REPO_FULL_NAME" = "bitcoin/bitcoin" ] && [ "$CIRRUS_PR" = "" ] ; then
+ # Sanity check only the last few commits to get notified of missing sigs,
+ # missing keys, or expired keys. Usually there is only one new merge commit
+ # per push on the master branch and a few commits on release branches, so
+ # sanity checking only a few (10) commits seems sufficient and cheap.
+ git log HEAD~10 -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit
+ git log HEAD~10 -1 --format='%H' > ./contrib/verify-commits/trusted-git-root
+ ${CI_RETRY_EXE} gpg --keyserver hkps://keys.openpgp.org --recv-keys $(<contrib/verify-commits/trusted-keys) &&
+ ./contrib/verify-commits/verify-commits.py;
fi
echo
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index fa4d0410fa..8a9d808f5d 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -47,7 +47,7 @@ export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
export EXPECTED_TESTS_DURATION_IN_SECONDS=${EXPECTED_TESTS_DURATION_IN_SECONDS:-1000}
export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
-export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04}
+export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:20.04}
# Randomize test order.
# See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html
export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1}
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index f78a84eeac..4ef3ae1ceb 100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
@@ -16,7 +16,7 @@ export RUN_FUNCTIONAL_TESTS=false
export ANDROID_API_LEVEL=28
export ANDROID_BUILD_TOOLS_VERSION=28.0.3
-export ANDROID_NDK_VERSION=21.1.6352462
+export ANDROID_NDK_VERSION=22.1.7171670
export ANDROID_TOOLS_URL=https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip
export ANDROID_HOME="${DEPENDS_DIR}/SDKs/android"
export ANDROID_NDK_HOME="${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 73ac09c1de..310e964973 100755
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -7,7 +7,7 @@
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_macos_cross
-export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos (Focal is used in the gitian build as well)
+export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos
export HOST=x86_64-apple-darwin18
export PACKAGES="cmake imagemagick librsvg2-bin libz-dev libtiff-tools libtinfo5 python3-setuptools xorriso"
export XCODE_VERSION=12.1
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index 58388fa928..b8ac691346 100755
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -14,5 +14,5 @@ export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC='clang -ftrivial-auto-var-init=pattern' CXX='clang++ -ftrivial-auto-var-init=pattern'"
export CCACHE_SIZE=200M
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
index 1418dfbc51..8869b2a083 100755
--- a/ci/test/00_setup_env_native_multiprocess.sh
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -13,5 +13,4 @@ export DEP_OPTS="DEBUG=1 MULTIPROCESS=1"
export GOAL="install"
export BITCOIN_CONFIG="--enable-debug CC=clang CXX=clang++" # Use clang to avoid OOM
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
-export RUN_SECURITY_TESTS="true"
export PIP_PACKAGES="lief"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 9c57eba62a..b3e967c898 100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -14,6 +14,6 @@ export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude fe
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
-export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1"
+export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1 v0.20.1"
export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports
--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 4d5bde13fd..4dff335e4e 100755
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -7,14 +7,10 @@
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_win64
-export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to win64 (Focal is used in the gitian build as well)
+export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to win64
export HOST=x86_64-w64-mingw32
export DPKG_ADD_ARCH="i386"
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file"
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --disable-external-signer"
-
-# Compiler for MinGW-w64 causes false -Wreturn-type warning.
-# See https://sourceforge.net/p/mingw-w64/bugs/306/
-export NO_WERROR=1
diff --git a/configure.ac b/configure.ac
index 88f91004af..e50d53f6dd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,5 +1,5 @@
AC_PREREQ([2.69])
-define(_CLIENT_VERSION_MAJOR, 21)
+define(_CLIENT_VERSION_MAJOR, 22)
define(_CLIENT_VERSION_MINOR, 99)
define(_CLIENT_VERSION_BUILD, 0)
define(_CLIENT_VERSION_RC, 0)
@@ -318,13 +318,6 @@ AC_ARG_ENABLE([gprof],
[enable_gprof=$enableval],
[enable_gprof=no])
-dnl Pass compiler & linker flags that make builds deterministic
-AC_ARG_ENABLE([determinism],
- [AS_HELP_STRING([--enable-determinism],
- [Enable compilation flags that make builds deterministic (default is no)])],
- [enable_determinism=$enableval],
- [enable_determinism=no])
-
dnl Turn warnings into errors
AC_ARG_ENABLE([werror],
[AS_HELP_STRING([--enable-werror],
@@ -424,7 +417,13 @@ if test "x$enable_werror" = "xyes"; then
AX_CHECK_COMPILE_FLAG([-Werror=range-loop-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=range-loop-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=return-type],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"],,[[$CXXFLAG_WERROR]])
+
+ dnl -Wreturn-type is broken in GCC for MinGW-w64.
+ dnl https://sourceforge.net/p/mingw-w64/bugs/306/
+ AX_CHECK_COMPILE_FLAG([-Werror=return-type], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"], [], [$CXXFLAG_WERROR],
+ [AC_LANG_SOURCE([[#include <cassert>
+ int f(){ assert(false); }]])])
+
AX_CHECK_COMPILE_FLAG([-Werror=conditional-uninitialized],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=conditional-uninitialized"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=sign-compare],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=sign-compare"],,[[$CXXFLAG_WERROR]])
dnl -Wsuggest-override is broken with GCC before 9.2
@@ -433,6 +432,7 @@ if test "x$enable_werror" = "xyes"; then
[AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
AX_CHECK_COMPILE_FLAG([-Werror=unreachable-code-loop-increment],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=mismatched-tags], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=mismatched-tags"], [], [$CXXFLAG_WERROR])
+ AX_CHECK_COMPILE_FLAG([-Werror=implicit-fallthrough], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=implicit-fallthrough"], [], [$CXXFLAG_WERROR])
if test x$suppress_external_warnings != xno ; then
AX_CHECK_COMPILE_FLAG([-Werror=documentation],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=documentation"],,[[$CXXFLAG_WERROR]])
@@ -463,6 +463,7 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wsuggest-override],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"],,[[$CXXFLAG_WERROR]],
[AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wimplicit-fallthrough"], [], [$CXXFLAG_WERROR])
if test x$suppress_external_warnings != xno ; then
AX_CHECK_COMPILE_FLAG([-Wdocumentation],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdocumentation"],,[[$CXXFLAG_WERROR]])
@@ -474,7 +475,6 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wunused-parameter],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-parameter"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wself-assign],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]])
if test x$suppress_external_warnings != xyes ; then
AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"],,[[$CXXFLAG_WERROR]])
fi
@@ -705,6 +705,33 @@ case $host in
if $BREW list --versions qt5 >/dev/null; then
export PKG_CONFIG_PATH="$($BREW --prefix qt5 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH"
fi
+
+ case $host in
+ *aarch64*)
+ dnl The preferred Homebrew prefix for Apple Silicon is /opt/homebrew.
+ dnl Therefore, as we do not use pkg-config to detect miniupnpc and libnatpmp
+ dnl packages, we should set the CPPFLAGS and LDFLAGS variables for them
+ dnl explicitly.
+ if test "x$use_upnp" != xno && $BREW list --versions miniupnpc >/dev/null; then
+ miniupnpc_prefix=$($BREW --prefix miniupnpc 2>/dev/null)
+ if test "x$suppress_external_warnings" != xno; then
+ CPPFLAGS="$CPPFLAGS -isystem $miniupnpc_prefix/include"
+ else
+ CPPFLAGS="$CPPFLAGS -I$miniupnpc_prefix/include"
+ fi
+ LDFLAGS="$LDFLAGS -L$miniupnpc_prefix/lib"
+ fi
+ if test "x$use_natpmp" != xno && $BREW list --versions libnatpmp >/dev/null; then
+ libnatpmp_prefix=$($BREW --prefix libnatpmp 2>/dev/null)
+ if test "x$suppress_external_warnings" != xno; then
+ CPPFLAGS="$CPPFLAGS -isystem $libnatpmp_prefix/include"
+ else
+ CPPFLAGS="$CPPFLAGS -I$libnatpmp_prefix/include"
+ fi
+ LDFLAGS="$LDFLAGS -L$libnatpmp_prefix/lib"
+ fi
+ ;;
+ esac
fi
else
case $build_os in
@@ -907,6 +934,7 @@ if test x$use_hardening != xno; then
])
fi
+ AX_CHECK_LINK_FLAG([[-Wl,--enable-reloc-section]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--enable-reloc-section"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-Wl,--dynamicbase]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--dynamicbase"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-Wl,--nxcompat]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--nxcompat"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-Wl,--high-entropy-va]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"],, [[$LDFLAG_WERROR]])
@@ -931,12 +959,6 @@ if test x$TARGET_OS = xdarwin; then
AX_CHECK_LINK_FLAG([[-Wl,-bind_at_load]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"],, [[$LDFLAG_WERROR]])
fi
-if test x$enable_determinism = xyes; then
- if test x$TARGET_OS = xwindows; then
- AX_CHECK_LINK_FLAG([[-Wl,--no-insert-timestamp]], [LDFLAGS="$LDFLAGS -Wl,--no-insert-timestamp"],, [[$LDFLAG_WERROR]])
- fi
-fi
-
AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h])
AC_CHECK_DECLS([getifaddrs, freeifaddrs],[CHECK_SOCKET],,
@@ -1763,6 +1785,10 @@ if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_
AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui --enable-bench or --enable-tests])
fi
+if test x$enable_fuzz_binary = xyes; then
+ CHECK_RUNTIME_LIB
+fi
+
AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin])
AM_CONDITIONAL([BUILD_DARWIN], [test x$BUILD_OS = xdarwin])
AM_CONDITIONAL([TARGET_LINUX], [test x$TARGET_OS = xlinux])
@@ -1875,7 +1901,7 @@ AC_CONFIG_LINKS([contrib/devtools/test-symbol-check.py:contrib/devtools/test-sym
AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py])
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py])
-AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py])
+AC_CONFIG_LINKS([test/util/test_runner.py:test/util/test_runner.py])
AC_CONFIG_LINKS([test/util/rpcauth-test.py:test/util/rpcauth-test.py])
dnl boost's m4 checks do something really nasty: they export these vars. As a
diff --git a/contrib/README.md b/contrib/README.md
index 361975baa4..ae1372e95d 100644
--- a/contrib/README.md
+++ b/contrib/README.md
@@ -26,18 +26,12 @@ The [Debian](/contrib/debian) subfolder contains the copyright file.
All other packaging related files can be found in the [bitcoin-core/packaging](https://github.com/bitcoin-core/packaging) repository.
-### [Gitian-descriptors](/contrib/gitian-descriptors) ###
-Files used during the gitian build process. For more information about gitian, see the [the Bitcoin Core documentation repository](https://github.com/bitcoin-core/docs).
-
-### [Gitian-keys](/contrib/gitian-keys)
-PGP keys used for signing Bitcoin Core [Gitian release](/doc/release-process.md) results.
+### [Builder keys](/contrib/builder-keys)
+PGP keys used for signing Bitcoin Core [release](/doc/release-process.md) results.
### [MacDeploy](/contrib/macdeploy) ###
Scripts and notes for Mac builds.
-### [Gitian-build](/contrib/gitian-build.py) ###
-Script for running full Gitian builds.
-
Test and Verify Tools
---------------------
diff --git a/contrib/gitian-keys/README.md b/contrib/builder-keys/README.md
index ffe4fb144b..56bd87d0af 100644
--- a/contrib/gitian-keys/README.md
+++ b/contrib/builder-keys/README.md
@@ -1,10 +1,10 @@
-## PGP keys of Gitian builders and Developers
+## PGP keys of builders and Developers
-The file `keys.txt` contains fingerprints of the public keys of Gitian builders
-and active developers.
+The file `keys.txt` contains fingerprints of the public keys of builders and
+active developers.
The associated keys are mainly used to sign git commits or the build results
-of Gitian builds.
+of Guix builds.
The most recent version of each pgp key can be found on most pgp key servers.
@@ -16,12 +16,12 @@ To fetch the latest version of all pgp keys in your gpg homedir,
gpg --refresh-keys
```
-To fetch keys of Gitian builders and active developers, feed the list of
-fingerprints of the primary keys into gpg:
+To fetch keys of builders and active developers, feed the list of fingerprints
+of the primary keys into gpg:
```sh
-while read fingerprint keyholder_name; do gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys ${fingerprint}; done < ./keys.txt
+while read fingerprint keyholder_name; do gpg --keyserver hkps://keys.openpgp.org --recv-keys ${fingerprint}; done < ./keys.txt
```
-Add your key to the list if you provided Gitian signatures for two major or
+Add your key to the list if you provided Guix attestations for two major or
minor releases of Bitcoin Core.
diff --git a/contrib/gitian-keys/keys.txt b/contrib/builder-keys/keys.txt
index db28cd07a0..e8032f66ee 100644
--- a/contrib/gitian-keys/keys.txt
+++ b/contrib/builder-keys/keys.txt
@@ -5,6 +5,7 @@ E944AE667CF960B1004BC32FCA662BE18B877A60 Andreas Schildbach (aschildbach)
590B7292695AFFA5B672CBB2E13FC145CD3F4304 Antoine Poinsot (darosior)
0AD83877C1F0CD1EE9BD660AD7CC770B81FD22A8 Ben Carman (benthecarman)
912FD3228387123DC97E0E57D5566241A0295FA9 BtcDrak (btcdrak)
+04017A2A6D9A0CCDC81D8EC296AB007F1A7ED999 Carl Dong (dongcarl)
C519EBCF3B926298946783EFF6430754120EC2F4 Christian Decker (cdecker)
18AE2F798E0D239755DA4FD24B79F986CBDF8736 Chun Kuan Le (ken2812221)
101598DC823C1B5F9A6624ABA5E0907A0380E6C3 CoinForensics (CoinForensics)
@@ -19,6 +20,7 @@ D35176BE9264832E4ACA8986BF0792FBE95DC863 fivepiece (fivepiece)
01CDF4627A3B88AAE4A571C87588242FBE38D3A8 Gavin Andresen (gavinandresen)
D1DBF2C4B96F2DEBF4C16654410108112E7EA81F Hennadii Stepanov (hebasto)
A2FD494D0021AA9B4FA58F759102B7AE654A4A5A Ilyas Ridhuan (IlyasRidhuan)
+2688F5A9A4BE0F295E921E8A25F27A38A47AD566 James O'Beirne (jamesob)
D3F22A3A4C366C2DCB66D3722DA9C5A7FA81EA35 Jarol Rodriguez (jarolrod)
7480909378D544EA6B6DCEB7535B12980BB8A4D3 Jeffri H Frontz (jhfrontz)
D3CC177286005BB8FF673294C5242A1AB3936517 jl2012 (jl2012)
@@ -26,6 +28,7 @@ D3CC177286005BB8FF673294C5242A1AB3936517 jl2012 (jl2012)
32EE5C4C3FA15CCADB46ABE529D4BCB6416F53EC Jonas Schnelli (jonasschnelli)
4B4E840451149DD7FB0D633477DFAB5C3108B9A8 Jorge Timon (jtimon)
C42AFF7C61B3E44A1454CD3557AF762DB3353322 Karl-Johan Alm (kallewoof)
+70A1D47DD44F59DF8B22244333E472FE870C7E5D Kristaps Kaupe (kristapsk)
30DE693AE0DE9E37B3E7EB6BBFF0F67810C1EED1 Lisa Neigut (niftynei)
E463A93F5F3117EEDE6C7316BD02942421F4889F Luke Dashjr (luke-jr)
B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B Marco Falke (marco)
diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md
index 1fa850af1a..afbad096c4 100644
--- a/contrib/devtools/README.md
+++ b/contrib/devtools/README.md
@@ -98,7 +98,7 @@ Perform basic security checks on a series of executables.
symbol-check.py
===============
-A script to check that the executables produced by gitian only contain
+A script to check that release executables only contain
certain symbols and are only linked against allowed libraries.
For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols.
@@ -106,9 +106,9 @@ This makes sure they are still compatible with the minimum supported distributio
For macOS and Windows we check that the executables are only linked against libraries we allow.
-Example usage after a gitian build:
+Example usage:
- find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
+ find ../path/to/executables -type f -executable | xargs python3 contrib/devtools/symbol-check.py
If no errors occur the return value will be 0 and the output will be empty.
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 56e4313d78..61f727fa63 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -3,21 +3,22 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
-A script to check that the executables produced by gitian only contain
-certain symbols and are only linked against allowed libraries.
+A script to check that release executables only contain certain symbols
+and are only linked against allowed libraries.
Example usage:
- find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
+ find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import sys
-import os
from typing import List, Optional
import lief
import pixie
+from utils import determine_wellknown_cmd
+
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
@@ -60,7 +61,6 @@ IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
-CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
@@ -140,7 +140,7 @@ class CPPFilt(object):
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
- self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
+ self.proc = subprocess.Popen(determine_wellknown_cmd('CPPFILT', 'c++filt'), stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index c079fe5b4d..14058e2cc8 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -9,6 +9,8 @@ import os
import subprocess
import unittest
+from utils import determine_wellknown_cmd
+
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
@@ -25,7 +27,7 @@ def clean_files(source, executable):
os.remove(executable)
def call_security_check(cc, source, executable, options):
- subprocess.run([cc,source,'-o',executable] + options, check=True)
+ subprocess.run([*cc,source,'-o',executable] + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
@@ -33,7 +35,7 @@ class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
- cc = 'gcc'
+ cc = determine_wellknown_cmd('CC', 'gcc')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
@@ -54,18 +56,20 @@ class TestSecurityChecks(unittest.TestCase):
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
- cc = 'x86_64-w64-mingw32-gcc'
+ cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
write_testcode(source)
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
- (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
- (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
- (1, executable+': failed HIGH_ENTROPY_VA RELOC_SECTION'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-no-pie','-fno-PIE']),
- (1, executable+': failed RELOC_SECTION'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
+ (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
+ (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
+ (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
+ (1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA')) # -pie -fPIE does nothing unless --dynamicbase is also supplied
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
+ (1, executable+': failed HIGH_ENTROPY_VA'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']),
(0, ''))
clean_files(source, executable)
@@ -73,7 +77,7 @@ class TestSecurityChecks(unittest.TestCase):
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
- cc = 'clang'
+ cc = determine_wellknown_cmd('CC', 'clang')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
@@ -95,4 +99,3 @@ class TestSecurityChecks(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
-
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index 6ce2fa3560..2da7ae793d 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -7,41 +7,51 @@ Test script for symbol-check.py
'''
import os
import subprocess
+from typing import List
import unittest
-def call_symbol_check(cc, source, executable, options):
- subprocess.run([cc,source,'-o',executable] + options, check=True)
+from utils import determine_wellknown_cmd
+
+def call_symbol_check(cc: List[str], source, executable, options):
+ subprocess.run([*cc,source,'-o',executable] + options, check=True)
p = subprocess.run(['./contrib/devtools/symbol-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
os.remove(source)
os.remove(executable)
return (p.returncode, p.stdout.rstrip())
+def get_machine(cc: List[str]):
+ p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, universal_newlines=True)
+ return p.stdout.rstrip()
+
class TestSymbolChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
- cc = 'gcc'
+ cc = determine_wellknown_cmd('CC', 'gcc')
- # renameat2 was introduced in GLIBC 2.28, so is newer than the upper limit
- # of glibc for all platforms
+ # there's no way to do this test for RISC-V at the moment; we build for
+ # RISC-V in a glibc 2.27 envinonment and we allow all symbols from 2.27.
+ if 'riscv' in get_machine(cc):
+ self.skipTest("test not available for RISC-V")
+
+ # nextup was introduced in GLIBC 2.24, so is newer than our supported
+ # glibc (2.17), and available in our release build environment (2.24).
with open(source, 'w', encoding="utf8") as f:
f.write('''
#define _GNU_SOURCE
- #include <stdio.h>
- #include <linux/fs.h>
+ #include <math.h>
- int renameat2(int olddirfd, const char *oldpath,
- int newdirfd, const char *newpath, unsigned int flags);
+ double nextup(double x);
int main()
{
- renameat2(0, "test", 0, "test_", RENAME_EXCHANGE);
+ nextup(3.14);
return 0;
}
''')
- self.assertEqual(call_symbol_check(cc, source, executable, []),
- (1, executable + ': symbol renameat2 from unsupported version GLIBC_2.28\n' +
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
+ (1, executable + ': symbol nextup from unsupported version GLIBC_2.24\n' +
executable + ': failed IMPORTED_SYMBOLS'))
# -lutil is part of the libc6 package so a safe bet that it's installed
@@ -63,26 +73,27 @@ class TestSymbolChecks(unittest.TestCase):
(1, executable + ': NEEDED library libutil.so.1 is not allowed\n' +
executable + ': failed LIBRARY_DEPENDENCIES'))
- # finally, check a conforming file that simply uses a math function
+ # finally, check a simple conforming binary
source = 'test3.c'
executable = 'test3'
with open(source, 'w', encoding="utf8") as f:
f.write('''
- #include <math.h>
+ #include <stdio.h>
int main()
{
- return (int)pow(2.0, 4.0);
+ printf("42");
+ return 0;
}
''')
- self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
+ self.assertEqual(call_symbol_check(cc, source, executable, []),
(0, ''))
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
- cc = 'clang'
+ cc = determine_wellknown_cmd('CC', 'clang')
with open(source, 'w', encoding="utf8") as f:
f.write('''
@@ -96,7 +107,7 @@ class TestSymbolChecks(unittest.TestCase):
''')
- self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat']),
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']),
(1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' +
f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK'))
@@ -113,7 +124,7 @@ class TestSymbolChecks(unittest.TestCase):
}
''')
- self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics']),
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']),
(1, f'{executable}: failed MIN_OS SDK'))
source = 'test3.c'
@@ -126,13 +137,13 @@ class TestSymbolChecks(unittest.TestCase):
}
''')
- self.assertEqual(call_symbol_check(cc, source, executable, ['-mmacosx-version-min=10.14']),
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,10.14', '-Wl,11.4']),
(1, f'{executable}: failed SDK'))
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
- cc = 'x86_64-w64-mingw32-gcc'
+ cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
with open(source, 'w', encoding="utf8") as f:
f.write('''
@@ -182,4 +193,3 @@ class TestSymbolChecks(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
-
diff --git a/contrib/devtools/utils.py b/contrib/devtools/utils.py
new file mode 100755
index 0000000000..68ad1c3aba
--- /dev/null
+++ b/contrib/devtools/utils.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+'''
+Common utility functions
+'''
+import shutil
+import sys
+import os
+from typing import List
+
+
+def determine_wellknown_cmd(envvar, progname) -> List[str]:
+ maybe_env = os.getenv(envvar)
+ maybe_which = shutil.which(progname)
+ if maybe_env:
+ return maybe_env.split(' ') # Well-known vars are often meant to be word-split
+ elif maybe_which:
+ return [ maybe_which ]
+ else:
+ sys.exit(f"{progname} not found")
diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py
deleted file mode 100755
index 5df87d9e70..0000000000
--- a/contrib/gitian-build.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2018-2020 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-import argparse
-import os
-import subprocess
-import sys
-
-def setup():
- global args, workdir
- programs = ['ruby', 'git', 'make', 'wget', 'curl']
- if args.kvm:
- programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
- elif args.docker:
- if not os.path.isfile('/lib/systemd/system/docker.service'):
- dockers = ['docker.io', 'docker-ce']
- for i in dockers:
- return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
- if return_code == 0:
- break
- if return_code != 0:
- print('Cannot find any way to install Docker.', file=sys.stderr)
- sys.exit(1)
- else:
- programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
- subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
- if not os.path.isdir('gitian.sigs'):
- subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/gitian.sigs.git'])
- if not os.path.isdir('bitcoin-detached-sigs'):
- subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/bitcoin-detached-sigs.git'])
- if not os.path.isdir('gitian-builder'):
- subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
- if not os.path.isdir('bitcoin'):
- subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin/bitcoin.git'])
- os.chdir('gitian-builder')
- make_image_prog = ['bin/make-base-vm', '--suite', 'focal', '--arch', 'amd64']
- if args.docker:
- make_image_prog += ['--docker']
- elif not args.kvm:
- make_image_prog += ['--lxc', '--disksize', '13000']
- subprocess.check_call(make_image_prog)
- os.chdir(workdir)
- if args.is_focal and not args.kvm and not args.docker:
- subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
- print('Reboot is required')
- sys.exit(0)
-
-def build():
- global args, workdir
-
- os.makedirs('bitcoin-binaries/' + args.version, exist_ok=True)
- print('\nBuilding Dependencies\n')
- os.chdir('gitian-builder')
- os.makedirs('inputs', exist_ok=True)
-
- subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz'])
- subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True)
- subprocess.check_call(['make', '-C', '../bitcoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
-
- if args.linux:
- print('\nCompiling ' + args.version + ' Linux')
- subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml'])
- subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml'])
- subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True)
-
- if args.windows:
- print('\nCompiling ' + args.version + ' Windows')
- subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-win.yml'])
- subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml'])
- subprocess.check_call('mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/', shell=True)
- subprocess.check_call('mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True)
-
- if args.macos:
- print('\nCompiling ' + args.version + ' MacOS')
- subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin='+args.commit, '--url', 'bitcoin='+args.url, '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml'])
- subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml'])
- subprocess.check_call('mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/', shell=True)
- subprocess.check_call('mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg build/out/src/bitcoin-*.tar.gz ../bitcoin-binaries/'+args.version, shell=True)
-
- os.chdir(workdir)
-
- if args.commit_files:
- print('\nCommitting '+args.version+' Unsigned Sigs\n')
- os.chdir('gitian.sigs')
- subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
- subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
- subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
- subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
- os.chdir(workdir)
-
-def sign():
- global args, workdir
- os.chdir('gitian-builder')
-
- if args.windows:
- print('\nSigning ' + args.version + ' Windows')
- subprocess.check_call('cp inputs/bitcoin-' + args.version + '-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True)
- subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
- subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
- subprocess.check_call('mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/'+args.version, shell=True)
-
- if args.macos:
- print('\nSigning ' + args.version + ' MacOS')
- subprocess.check_call('cp inputs/bitcoin-' + args.version + '-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True)
- subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
- subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
- subprocess.check_call('mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/'+args.version+'/bitcoin-'+args.version+'-osx.dmg', shell=True)
-
- os.chdir(workdir)
-
- if args.commit_files:
- print('\nCommitting '+args.version+' Signed Sigs\n')
- os.chdir('gitian.sigs')
- subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
- subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
- subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
- os.chdir(workdir)
-
-def verify():
- global args, workdir
- rc = 0
- os.chdir('gitian-builder')
-
- print('\nVerifying v'+args.version+' Linux\n')
- if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../bitcoin/contrib/gitian-descriptors/gitian-linux.yml']):
- print('Verifying v'+args.version+' Linux FAILED\n')
- rc = 1
-
- print('\nVerifying v'+args.version+' Windows\n')
- if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-win.yml']):
- print('Verifying v'+args.version+' Windows FAILED\n')
- rc = 1
-
- print('\nVerifying v'+args.version+' MacOS\n')
- if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../bitcoin/contrib/gitian-descriptors/gitian-osx.yml']):
- print('Verifying v'+args.version+' MacOS FAILED\n')
- rc = 1
-
- print('\nVerifying v'+args.version+' Signed Windows\n')
- if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml']):
- print('Verifying v'+args.version+' Signed Windows FAILED\n')
- rc = 1
-
- print('\nVerifying v'+args.version+' Signed MacOS\n')
- if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml']):
- print('Verifying v'+args.version+' Signed MacOS FAILED\n')
- rc = 1
-
- os.chdir(workdir)
- return rc
-
-def main():
- global args, workdir
-
- parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
- parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
- parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
- parser.add_argument('-u', '--url', dest='url', default='https://github.com/bitcoin/bitcoin', help='Specify the URL of the repository. Default is %(default)s')
- parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
- parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
- parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
- parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
- parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
- parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
- parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
- parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
- parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
- parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
- parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
- parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
- parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
- parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
-
- args = parser.parse_args()
- workdir = os.getcwd()
-
- args.is_focal = b'focal' in subprocess.check_output(['lsb_release', '-cs'])
-
- if args.kvm and args.docker:
- raise Exception('Error: cannot have both kvm and docker')
-
- # Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
- # can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
- os.environ['USE_LXC'] = ''
- os.environ['USE_VBOX'] = ''
- os.environ['USE_DOCKER'] = ''
- if args.docker:
- os.environ['USE_DOCKER'] = '1'
- elif not args.kvm:
- os.environ['USE_LXC'] = '1'
- if 'GITIAN_HOST_IP' not in os.environ.keys():
- os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
- if 'LXC_GUEST_IP' not in os.environ.keys():
- os.environ['LXC_GUEST_IP'] = '10.0.3.5'
-
- if args.setup:
- setup()
-
- if args.buildsign:
- args.build = True
- args.sign = True
-
- if not args.build and not args.sign and not args.verify:
- sys.exit(0)
-
- args.linux = 'l' in args.os
- args.windows = 'w' in args.os
- args.macos = 'm' in args.os
-
- # Disable for MacOS if no SDK found
- if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz'):
- print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
- args.macos = False
-
- args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
-
- script_name = os.path.basename(sys.argv[0])
- if not args.signer:
- print(script_name+': Missing signer')
- print('Try '+script_name+' --help for more information')
- sys.exit(1)
- if not args.version:
- print(script_name+': Missing version')
- print('Try '+script_name+' --help for more information')
- sys.exit(1)
-
- # Add leading 'v' for tags
- if args.commit and args.pull:
- raise Exception('Cannot have both commit and pull')
- args.commit = ('' if args.commit else 'v') + args.version
-
- os.chdir('bitcoin')
- if args.pull:
- subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
- os.chdir('../gitian-builder/inputs/bitcoin')
- subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
- args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
- args.version = 'pull-' + args.version
- print(args.commit)
- subprocess.check_call(['git', 'fetch'])
- subprocess.check_call(['git', 'checkout', args.commit])
- os.chdir(workdir)
-
- os.chdir('gitian-builder')
- subprocess.check_call(['git', 'pull'])
- os.chdir(workdir)
-
- if args.build:
- build()
-
- if args.sign:
- sign()
-
- if args.verify:
- os.chdir('gitian.sigs')
- subprocess.check_call(['git', 'pull'])
- os.chdir(workdir)
- sys.exit(verify())
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/gitian-descriptors/assign_DISTNAME b/contrib/gitian-descriptors/assign_DISTNAME
deleted file mode 100644
index 330fbc041b..0000000000
--- a/contrib/gitian-descriptors/assign_DISTNAME
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (c) 2020 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-# A helper script to be sourced into the gitian descriptors
-
-if RECENT_TAG="$(git describe --exact-match HEAD 2> /dev/null)"; then
- VERSION="${RECENT_TAG#v}"
-else
- VERSION="$(git rev-parse --short=12 HEAD)"
-fi
-DISTNAME="bitcoin-${VERSION}"
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
deleted file mode 100644
index e6dce7a8c6..0000000000
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ /dev/null
@@ -1,167 +0,0 @@
----
-name: "bitcoin-core-linux-22"
-enable_cache: true
-distro: "ubuntu"
-suites:
-- "focal"
-architectures:
-- "amd64"
-packages:
-# Common dependencies.
-- "autoconf"
-- "automake"
-- "binutils"
-- "bison"
-- "bsdmainutils"
-- "ca-certificates"
-- "curl"
-- "faketime"
-- "g++-8"
-- "gcc-8"
-- "git"
-- "libtool"
-- "patch"
-- "pkg-config"
-- "python3"
-- "python3-pip"
-# Cross compilation HOSTS:
-# - arm-linux-gnueabihf
-- "binutils-arm-linux-gnueabihf"
-- "g++-8-arm-linux-gnueabihf"
-# - aarch64-linux-gnu
-- "binutils-aarch64-linux-gnu"
-- "g++-8-aarch64-linux-gnu"
-# - powerpc64-linux-gnu
-- "binutils-powerpc64-linux-gnu"
-- "g++-8-powerpc64-linux-gnu"
-# - powerpc64le-linux-gnu
-- "binutils-powerpc64le-linux-gnu"
-- "g++-8-powerpc64le-linux-gnu"
-# - riscv64-linux-gnu
-- "binutils-riscv64-linux-gnu"
-- "g++-8-riscv64-linux-gnu"
-remotes:
-- "url": "https://github.com/bitcoin/bitcoin.git"
- "dir": "bitcoin"
-files: []
-script: |
- set -e -o pipefail
-
- WRAP_DIR=$HOME/wrapped
- HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu riscv64-linux-gnu"
- CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary"
- FAKETIME_HOST_PROGS="gcc g++"
- FAKETIME_PROGS="date ar ranlib nm"
- HOST_CFLAGS="-O2 -g"
- HOST_CXXFLAGS="-O2 -g"
- HOST_LDFLAGS_BASE="-static-libstdc++ -Wl,-O2"
-
- export TZ="UTC"
- export BUILD_DIR="$PWD"
- mkdir -p ${WRAP_DIR}
- if test -n "$GBUILD_CACHE_ENABLED"; then
- export SOURCES_PATH=${GBUILD_COMMON_CACHE}
- export BASE_CACHE=${GBUILD_PACKAGE_CACHE}
- mkdir -p ${BASE_CACHE} ${SOURCES_PATH}
- fi
-
- # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`)
- function create_global_faketime_wrappers {
- for prog in ${FAKETIME_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog}
- echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog}
- chmod +x ${WRAP_DIR}/${prog}
- done
- }
-
- function create_per-host_faketime_wrappers {
- for i in $HOSTS; do
- for prog in ${FAKETIME_HOST_PROGS}; do
- if which ${i}-${prog}-8
- then
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog}
- echo "REAL=\`which -a ${i}-${prog}-8 | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
- if [ "${i:0:11}" = "powerpc64le" ]; then
- echo "exec \"\$REAL\" -mcpu=power8 -mtune=power9 \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- elif [ "${i:0:9}" = "powerpc64" ]; then
- echo "exec \"\$REAL\" -mcpu=970 -mtune=power9 \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- else
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- fi
- chmod +x ${WRAP_DIR}/${i}-${prog}
- fi
- done
- done
- }
-
- pip3 install lief==0.11.5
-
- # Faketime for depends so intermediate results are comparable
- export PATH_orig=${PATH}
- create_global_faketime_wrappers "2000-01-01 12:00:00"
- create_per-host_faketime_wrappers "2000-01-01 12:00:00"
- export PATH=${WRAP_DIR}:${PATH}
-
- cd bitcoin
- BASEPREFIX="${PWD}/depends"
- # Build dependencies for each host
- for i in $HOSTS; do
- make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" CC=${i}-gcc-8 CXX=${i}-g++-8
- done
-
- # Faketime for binaries
- export PATH=${PATH_orig}
- create_global_faketime_wrappers "${REFERENCE_DATETIME}"
- create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
- export PATH=${WRAP_DIR}:${PATH}
-
- # Define DISTNAME variable.
- # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME
- source contrib/gitian-descriptors/assign_DISTNAME
-
- GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz"
-
- # Create the source tarball
- mkdir -p "$(dirname "$GIT_ARCHIVE")"
- git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD
-
- ORIGPATH="$PATH"
- # Extract the git archive into a dir for each host and build
- for i in ${HOSTS}; do
- export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH}
- if [ "${i}" = "powerpc64-linux-gnu" ]; then
- # Workaround for https://bugs.launchpad.net/ubuntu/+source/gcc-8-cross-ports/+bug/1853740
- # TODO: remove this when no longer needed
- HOST_LDFLAGS="${HOST_LDFLAGS_BASE} -Wl,-z,noexecstack"
- else
- HOST_LDFLAGS="${HOST_LDFLAGS_BASE}"
- fi
- mkdir -p distsrc-${i}
- cd distsrc-${i}
- INSTALLPATH="${PWD}/installed/${DISTNAME}"
- mkdir -p ${INSTALLPATH}
- tar --strip-components=1 -xf "${GIT_ARCHIVE}"
-
- ./autogen.sh
- CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" CC=${i}-gcc-8 CXX=${i}-g++-8
- make ${MAKEOPTS}
- make ${MAKEOPTS} -C src check-security
- make ${MAKEOPTS} -C src check-symbols
- make install DESTDIR=${INSTALLPATH}
- cd installed
- find . -name "lib*.la" -delete
- find . -name "lib*.a" -delete
- rm -rf ${DISTNAME}/lib/pkgconfig
- find ${DISTNAME}/bin -type f -executable -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
- find ${DISTNAME}/lib -type f -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
- cp ../README.md ${DISTNAME}/
- find ${DISTNAME} -not -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
- find ${DISTNAME} -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz
- cd ../../
- rm -rf distsrc-${i}
- done
diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml
deleted file mode 100644
index addad0a5d2..0000000000
--- a/contrib/gitian-descriptors/gitian-osx-signer.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-name: "bitcoin-dmg-signer"
-distro: "ubuntu"
-suites:
-- "focal"
-architectures:
-- "amd64"
-packages:
-- "faketime"
-- "xorriso"
-- "python3-pip"
-remotes:
-- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git"
- "dir": "signature"
-- "url": "https://github.com/achow101/signapple.git"
- "dir": "signapple"
- "commit": "b084cbbf44d5330448ffce0c7d118f75781b64bd"
-files:
-- "bitcoin-osx-unsigned.tar.gz"
-script: |
- set -e -o pipefail
-
- WRAP_DIR=$HOME/wrapped
- mkdir -p ${WRAP_DIR}
- export PATH="$PWD":$PATH
- FAKETIME_PROGS="dmg xorrisofs"
-
- # Create global faketime wrappers
- for prog in ${FAKETIME_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog}
- echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"${REFERENCE_DATETIME}\"" >> ${WRAP_DIR}/${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog}
- chmod +x ${WRAP_DIR}/${prog}
- done
-
- # Install signapple
- cd signapple
- python3 -m pip install -U pip setuptools
- python3 -m pip install .
- export PATH="$HOME/.local/bin":$PATH
- cd ..
-
- UNSIGNED_TARBALL=bitcoin-osx-unsigned.tar.gz
- UNSIGNED_APP=dist/Bitcoin-Qt.app
- SIGNED=bitcoin-osx-signed.dmg
-
- tar -xf ${UNSIGNED_TARBALL}
- OSX_VOLNAME="$(cat osx_volname)"
- ./detached-sig-apply.sh ${UNSIGNED_APP} signature/osx/dist
- ${WRAP_DIR}/xorrisofs -D -l -V "${OSX_VOLNAME}" -no-pad -r -dir-mode 0755 -o uncompressed.dmg signed-app
- ${WRAP_DIR}/dmg dmg uncompressed.dmg ${OUTDIR}/${SIGNED}
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
deleted file mode 100644
index a39618adb7..0000000000
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ /dev/null
@@ -1,155 +0,0 @@
----
-name: "bitcoin-core-osx-22"
-enable_cache: true
-distro: "ubuntu"
-suites:
-- "focal"
-architectures:
-- "amd64"
-packages:
-- "ca-certificates"
-- "curl"
-- "g++"
-- "git"
-- "pkg-config"
-- "autoconf"
-- "librsvg2-bin"
-- "libtiff-tools"
-- "libtool"
-- "automake"
-- "faketime"
-- "bsdmainutils"
-- "cmake"
-- "imagemagick"
-- "libz-dev"
-- "python3"
-- "python3-pip"
-- "python3-setuptools"
-- "fonts-tuffy"
-- "xorriso"
-- "libtinfo5"
-remotes:
-- "url": "https://github.com/bitcoin/bitcoin.git"
- "dir": "bitcoin"
-files:
-- "Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz"
-script: |
- set -e -o pipefail
-
- WRAP_DIR=$HOME/wrapped
- HOSTS="x86_64-apple-darwin18"
- CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg"
- FAKETIME_HOST_PROGS=""
- FAKETIME_PROGS="ar ranlib date dmg xorrisofs"
-
- export TZ="UTC"
- export BUILD_DIR="$PWD"
- mkdir -p ${WRAP_DIR}
- if test -n "$GBUILD_CACHE_ENABLED"; then
- export SOURCES_PATH=${GBUILD_COMMON_CACHE}
- export BASE_CACHE=${GBUILD_PACKAGE_CACHE}
- mkdir -p ${BASE_CACHE} ${SOURCES_PATH}
- fi
-
- export ZERO_AR_DATE=1
-
- # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`)
- function create_global_faketime_wrappers {
- for prog in ${FAKETIME_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog}
- echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog}
- chmod +x ${WRAP_DIR}/${prog}
- done
- }
-
- function create_per-host_faketime_wrappers {
- for i in $HOSTS; do
- for prog in ${FAKETIME_HOST_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog}
- echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- chmod +x ${WRAP_DIR}/${i}-${prog}
- done
- done
- }
-
- pip3 install lief==0.11.5
-
- # Faketime for depends so intermediate results are comparable
- export PATH_orig=${PATH}
- create_global_faketime_wrappers "2000-01-01 12:00:00"
- create_per-host_faketime_wrappers "2000-01-01 12:00:00"
- export PATH=${WRAP_DIR}:${PATH}
-
- cd bitcoin
- BASEPREFIX="${PWD}/depends"
-
- mkdir -p ${BASEPREFIX}/SDKs
- tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz
-
- # Build dependencies for each host
- for i in $HOSTS; do
- make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
- done
-
- # Faketime for binaries
- export PATH=${PATH_orig}
- create_global_faketime_wrappers "${REFERENCE_DATETIME}"
- create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
- export PATH=${WRAP_DIR}:${PATH}
-
- # Define DISTNAME variable.
- # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME
- source contrib/gitian-descriptors/assign_DISTNAME
-
- GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz"
-
- # Create the source tarball
- mkdir -p "$(dirname "$GIT_ARCHIVE")"
- git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD
-
- ORIGPATH="$PATH"
- # Extract the git archive into a dir for each host and build
- for i in ${HOSTS}; do
- export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH}
- mkdir -p distsrc-${i}
- cd distsrc-${i}
- INSTALLPATH="${PWD}/installed/${DISTNAME}"
- mkdir -p ${INSTALLPATH}
- tar --strip-components=1 -xf "${GIT_ARCHIVE}"
-
- ./autogen.sh
- CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS}
- make ${MAKEOPTS}
- make ${MAKEOPTS} -C src check-security
- make ${MAKEOPTS} -C src check-symbols
- make install-strip DESTDIR=${INSTALLPATH}
-
- make osx_volname
- make deploydir
- mkdir -p unsigned-app-${i}
- cp osx_volname unsigned-app-${i}/
- cp contrib/macdeploy/detached-sig-apply.sh unsigned-app-${i}
- cp contrib/macdeploy/detached-sig-create.sh unsigned-app-${i}
- cp ${BASEPREFIX}/${i}/native/bin/dmg unsigned-app-${i}
- mv dist unsigned-app-${i}
- pushd unsigned-app-${i}
- find . | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-osx-unsigned.tar.gz
- popd
-
- make deploy OSX_DMG="${OUTDIR}/${DISTNAME}-osx-unsigned.dmg"
-
- cd installed
- find . -name "lib*.la" -delete
- find . -name "lib*.a" -delete
- rm -rf ${DISTNAME}/lib/pkgconfig
- find ${DISTNAME} | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
- cd ../../
- done
-
- mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz
diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml
deleted file mode 100644
index c13c24c3cc..0000000000
--- a/contrib/gitian-descriptors/gitian-win-signer.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-name: "bitcoin-win-signer"
-distro: "ubuntu"
-suites:
-- "focal"
-architectures:
-- "amd64"
-packages:
-- "libssl-dev"
-- "autoconf"
-- "automake"
-- "libtool"
-- "pkg-config"
-remotes:
-- "url": "https://github.com/bitcoin-core/bitcoin-detached-sigs.git"
- "dir": "signature"
-files:
-- "osslsigncode-2.0.tar.gz"
-- "bitcoin-win-unsigned.tar.gz"
-script: |
- set -e -o pipefail
-
- BUILD_DIR="$PWD"
- SIGDIR=${BUILD_DIR}/signature/win
- UNSIGNED_DIR=${BUILD_DIR}/unsigned
-
- echo "5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f osslsigncode-2.0.tar.gz" | sha256sum -c
-
- mkdir -p ${UNSIGNED_DIR}
- tar -C ${UNSIGNED_DIR} -xf bitcoin-win-unsigned.tar.gz
-
- tar xf osslsigncode-2.0.tar.gz
- cd osslsigncode-2.0
-
- ./autogen.sh
- ./configure --without-gsf --without-curl --disable-dependency-tracking
- make
- find ${UNSIGNED_DIR} -name "*-unsigned.exe" | while read i; do
- INFILE="$(basename "${i}")"
- OUTFILE="${INFILE/-unsigned}"
- ./osslsigncode attach-signature -in "${i}" -out "${OUTDIR}/${OUTFILE}" -sigin "${SIGDIR}/${INFILE}.pem"
- done
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
deleted file mode 100644
index ffe228a032..0000000000
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-name: "bitcoin-core-win-22"
-enable_cache: true
-distro: "ubuntu"
-suites:
-- "focal"
-architectures:
-- "amd64"
-packages:
-- "curl"
-- "g++"
-- "git"
-- "pkg-config"
-- "autoconf"
-- "libtool"
-- "automake"
-- "faketime"
-- "bsdmainutils"
-- "mingw-w64"
-- "g++-mingw-w64"
-- "nsis"
-- "zip"
-- "ca-certificates"
-- "python3"
-- "python3-pip"
-remotes:
-- "url": "https://github.com/bitcoin/bitcoin.git"
- "dir": "bitcoin"
-files: []
-script: |
- set -e -o pipefail
-
- WRAP_DIR=$HOME/wrapped
- HOSTS="x86_64-w64-mingw32"
- CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary"
- FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy"
- FAKETIME_PROGS="date makensis zip"
- HOST_CFLAGS="-O2 -g -fno-ident"
- HOST_CXXFLAGS="-O2 -g -fno-ident"
-
- export TZ="UTC"
- export BUILD_DIR="$PWD"
- mkdir -p ${WRAP_DIR}
- if test -n "$GBUILD_CACHE_ENABLED"; then
- export SOURCES_PATH=${GBUILD_COMMON_CACHE}
- export BASE_CACHE=${GBUILD_PACKAGE_CACHE}
- mkdir -p ${BASE_CACHE} ${SOURCES_PATH}
- fi
-
- # Use $LIB in LD_PRELOAD to avoid hardcoding the dir (See `man ld.so`)
- function create_global_faketime_wrappers {
- for prog in ${FAKETIME_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${prog}
- echo "REAL=\`which -a ${prog} | grep -v ${WRAP_DIR}/${prog} | head -1\`" >> ${WRAP_DIR}/${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${prog}
- chmod +x ${WRAP_DIR}/${prog}
- done
- }
-
- function create_per-host_faketime_wrappers {
- for i in $HOSTS; do
- for prog in ${FAKETIME_HOST_PROGS}; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog}
- echo "REAL=\`which -a ${i}-${prog} | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- chmod +x ${WRAP_DIR}/${i}-${prog}
- done
- done
- }
-
- function create_per-host_compiler_wrapper {
- # -posix variant is required for c++11 threading.
- for i in $HOSTS; do
- for prog in gcc g++; do
- echo '#!/usr/bin/env bash' > ${WRAP_DIR}/${i}-${prog}
- echo "REAL=\`which -a ${i}-${prog}-posix | grep -v ${WRAP_DIR}/${i}-${prog} | head -1\`" >> ${WRAP_DIR}/${i}-${prog}
- echo "export LD_PRELOAD='/usr/\$LIB/faketime/libfaketime.so.1'" >> ${WRAP_DIR}/${i}-${prog}
- echo "export FAKETIME=\"$1\"" >> ${WRAP_DIR}/${i}-${prog}
- echo "exec \"\$REAL\" \"\$@\"" >> $WRAP_DIR/${i}-${prog}
- chmod +x ${WRAP_DIR}/${i}-${prog}
- done
- done
- }
-
- pip3 install lief==0.11.5
-
- # Faketime for depends so intermediate results are comparable
- export PATH_orig=${PATH}
- create_global_faketime_wrappers "2000-01-01 12:00:00"
- create_per-host_faketime_wrappers "2000-01-01 12:00:00"
- create_per-host_compiler_wrapper "2000-01-01 12:00:00"
- export PATH=${WRAP_DIR}:${PATH}
-
- cd bitcoin
- BASEPREFIX="${PWD}/depends"
- # Build dependencies for each host
- for i in $HOSTS; do
- make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
- done
-
- # Faketime for binaries
- export PATH=${PATH_orig}
- create_global_faketime_wrappers "${REFERENCE_DATETIME}"
- create_per-host_faketime_wrappers "${REFERENCE_DATETIME}"
- create_per-host_compiler_wrapper "${REFERENCE_DATETIME}"
- export PATH=${WRAP_DIR}:${PATH}
-
- # Define DISTNAME variable.
- # shellcheck source=contrib/gitian-descriptors/assign_DISTNAME
- source contrib/gitian-descriptors/assign_DISTNAME
-
- GIT_ARCHIVE="${OUTDIR}/src/${DISTNAME}.tar.gz"
-
- # Create the source tarball
- mkdir -p "$(dirname "$GIT_ARCHIVE")"
- git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD
-
- ORIGPATH="$PATH"
- # Extract the git archive into a dir for each host and build
- for i in ${HOSTS}; do
- export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH}
- mkdir -p distsrc-${i}
- cd distsrc-${i}
- INSTALLPATH="${PWD}/installed/${DISTNAME}"
- mkdir -p ${INSTALLPATH}
- tar --strip-components=1 -xf "${GIT_ARCHIVE}"
-
- ./autogen.sh
- CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}"
- make ${MAKEOPTS}
- make ${MAKEOPTS} -C src check-security
- make ${MAKEOPTS} -C src check-symbols
- make deploy BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe"
- make install DESTDIR=${INSTALLPATH}
- cd installed
- mv ${DISTNAME}/bin/*.dll ${DISTNAME}/lib/
- find . -name "lib*.la" -delete
- find . -name "lib*.a" -delete
- rm -rf ${DISTNAME}/lib/pkgconfig
- find ${DISTNAME}/bin -type f -executable -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
- find ${DISTNAME}/lib -type f -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
- cp ../doc/README_windows.txt ${DISTNAME}/readme.txt
- find ${DISTNAME} -not -name "*.dbg" -type f | sort | zip -X@ ${OUTDIR}/${DISTNAME}-${i//x86_64-w64-mingw32/win64}.zip
- find ${DISTNAME} -name "*.dbg" -type f | sort | zip -X@ ${OUTDIR}/${DISTNAME}-${i//x86_64-w64-mingw32/win64}-debug.zip
- cd ../../
- rm -rf distsrc-${i}
- done
-
- cp -rf contrib/windeploy $BUILD_DIR
- cd $BUILD_DIR/windeploy
- mkdir unsigned
- cp ${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe unsigned/
- find . | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-win-unsigned.tar.gz
diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md
new file mode 100644
index 0000000000..63aa3e02b2
--- /dev/null
+++ b/contrib/guix/INSTALL.md
@@ -0,0 +1,801 @@
+# Guix Installation and Setup
+
+This only needs to be done once per machine. If you have already completed the
+installation and setup, please proceed to [perform a build](./README.md).
+
+Otherwise, you may choose from one of the following options to install Guix:
+
+1. Using the official **shell installer script** [⤓ skip to section][install-script]
+ - Maintained by Guix developers
+ - Easiest (automatically performs *most* setup)
+ - Works on nearly all Linux distributions
+ - Only installs latest release
+ - Binary installation only, requires high level of trust
+ - Note: The script needs to be run as root, so it should be inspected before it's run
+2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball]
+ - Maintained by Guix developers
+ - Normal difficulty (full manual setup required)
+ - Works on nearly all Linux distributions
+ - Installs any release
+ - Binary installation only, requires high level of trust
+3. Using fanquake's **Docker image** [↗︎ external instructions][install-fanquake-docker]
+ - Maintained by fanquake
+ - Easy (automatically performs *some* setup)
+ - Works wherever Docker images work
+ - Installs any release
+ - Binary installation only, requires high level of trust
+4. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg]
+ - Maintained by distribution's Guix package maintainer
+ - Normal difficulty (manual setup required)
+ - Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions
+ - Installs a release decided on by package maintainer
+ - Source or binary installation depending on the distribution
+5. Building **from source** [⤓ skip to section][install-source]
+ - Maintained by you
+ - Hard, but rewarding
+ - Can be made to work on most Linux distributions
+ - Installs any commit (more granular)
+ - Source installation, requires lower level of trust
+
+## Options 1 and 2: Using the official shell installer script or binary tarball
+
+The installation instructions for both the official shell installer script and
+the binary tarballs can be found in the GNU Guix Manual's [Binary Installation
+section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html).
+
+Note that running through the binary tarball installation steps is largely
+equivalent to manually performing what the shell installer script does.
+
+Note that at the time of writing (July 5th, 2021), the shell installer script
+automatically creates an `/etc/profile.d` entry which the binary tarball
+installation instructions do not ask you to create. However, you will likely
+need this entry for better desktop integration. Please see [this
+section](#add-an-etcprofiled-entry) for instructions on how to add a
+`/etc/profile.d/guix.sh` entry.
+
+Regardless of which installation option you chose, the changes to
+`/etc/profile.d` will not take effect until the next shell or desktop session,
+so you should log out and log back in.
+
+## Option 3: Using fanquake's Docker image
+
+Please refer to fanquake's instructions
+[here](https://github.com/fanquake/core-review/tree/master/guix).
+
+Note that the `Dockerfile` is largely equivalent to running through the binary
+tarball installation steps.
+
+## Option 4: Using a distribution-maintained package
+
+Note that this section is based on the distro packaging situation at the time of
+writing (July 2021). Guix is expected to be more widely packaged over time. For
+an up-to-date view on Guix's package status/version across distros, please see:
+https://repology.org/project/guix/versions
+
+### Debian 11 (Bullseye)/Ubuntu 21.04 (Hirsute Hippo)
+
+Guix v1.2.0 is available as a distribution package starting in [Debian
+11](https://packages.debian.org/bullseye/guix) and [Ubuntu
+21.04](https://packages.ubuntu.com/hirsute/guix).
+
+Note that if you intend on using Guix without using any substitutes (more
+details [here][security-model]), v1.2.0 has a known problem when building GnuTLS
+from source. Solutions and workarounds are documented
+[here](#gnutls-test-suite-fail-status-request-revoked).
+
+
+To install:
+```sh
+sudo apt install guix
+```
+
+For up-to-date information on Debian and Ubuntu's release history:
+- [Debian release history](https://www.debian.org/releases/)
+- [Ubuntu release history](https://ubuntu.com/about/release-cycle)
+
+### Arch Linux
+
+Guix is available in the AUR as
+[`guix`](https://aur.archlinux.org/packages/guix/), please follow the
+installation instructions in the Arch Linux Wiki ([live
+link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation),
+[2021/03/30
+permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation))
+to install Guix.
+
+At the time of writing (2021/03/30), the `check` phase will fail if the path to
+guix's build directory is longer than 36 characters due to an anachronistic
+character limit on the shebang line. Since the `check` phase happens after the
+`build` phase, which may take quite a long time, it is recommended that users
+either:
+
+1. Skip the `check` phase
+ - For `makepkg`: `makepkg --nocheck ...`
+ - For `yay`: `yay --mflags="--nocheck" ...`
+ - For `paru`: `paru --nocheck ...`
+2. Or, check their build directory's length beforehand
+ - For those building with `makepkg`: `pwd | wc -c`
+
+## Option 5: Building from source
+
+Building Guix from source is a rather involved process but a rewarding one for
+those looking to minimize trust and maximize customizability (e.g. building a
+particular commit of Guix). Previous experience with using autotools-style build
+systems to build packages from source will be helpful. *hic sunt dracones.*
+
+I strongly urge you to at least skim through the entire section once before you
+start issuing commands, as it will save you a lot of unnecessary pain and
+anguish.
+
+### Installing common build tools
+
+There are a few basic build tools that are required for most things we'll build,
+so let's install them now:
+
+Text transformation/i18n:
+- `autopoint` (sometimes packaged in `gettext`)
+- `help2man`
+- `po4a`
+- `texinfo`
+
+Build system tools:
+- `g++` w/ C++11 support
+- `libtool`
+- `autoconf`
+- `automake`
+- `pkg-config` (sometimes packaged as `pkgconf`)
+- `make`
+- `cmake`
+
+Miscellaneous:
+- `git`
+- `gnupg`
+- `python3`
+
+### Building and Installing Guix's dependencies
+
+In order to build Guix itself from source, we need to first make sure that the
+necessary dependencies are installed and discoverable. The most up-to-date list
+of Guix's dependencies is kept in the ["Requirements"
+section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix
+Reference Manual.
+
+Depending on your distribution, most or all of these dependencies may already be
+packaged and installable without manually building and installing.
+
+For reference, the graphic below outlines Guix v1.3.0's dependency graph:
+
+![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png)
+
+#### Guile
+
+##### Choosing a Guile version and sticking to it
+
+One of the first things you need to decide is which Guile version you want to
+use: Guile v2.2 or Guile v3.0. Unlike the python2 to python3 transition, Guile
+v2.2 and Guile v3.0 are largely compatible, as evidenced by the fact that most
+Guile packages and even [Guix
+itself](https://guix.gnu.org/en/blog/2020/guile-3-and-guix/) support running on
+both.
+
+What is important here is that you **choose one**, and you **remain consistent**
+with your choice throughout **all Guile-related packages**, no matter if they
+are installed via the distribution's package manager or installed from source.
+This is because the files for Guile packages are installed to directories which
+are separated based on the Guile version.
+
+###### Example: Checking that Ubuntu's `guile-git` is compatible with your chosen Guile version
+
+On Ubuntu Focal:
+
+```sh
+$ apt show guile-git
+Package: guile-git
+...
+Depends: guile-2.2, guile-bytestructures, libgit2-dev
+...
+```
+
+As you can see, the package `guile-git` depends on `guile-2.2`, meaning that it
+was likely built for Guile v2.2. This means that if you decided to use Guile
+v3.0 on Ubuntu Focal, you would need to build guile-git from source instead of
+using the distribution package.
+
+On Ubuntu Hirsute:
+
+```sh
+$ apt show guile-git
+Package: guile-git
+...
+Depends: guile-3.0 | guile-2.2, guile-bytestructures (>= 1.0.7-3~), libgit2-dev (>= 1.0)
+...
+```
+
+In this case, `guile-git` depends on either `guile-3.0` or `guile-2.2`, meaning
+that it would work no matter what Guile version you decided to use.
+
+###### Corner case: Multiple versions of Guile on one system
+
+It is recommended to only install one version of Guile, so that build systems do
+not get confused about which Guile to use.
+
+However, if you insist on having both Guile v2.2 and Guile v3.0 installed on
+your system, then you need to **consistently** specify one of
+`GUILE_EFFECTIVE_VERSION=3.0` or `GUILE_EFFECTIVE_VERSION=2.2` to all
+`./configure` invocations for Guix and its dependencies.
+
+##### Installing Guile
+
+Guile is most likely already packaged for your distribution, so after you have
+[chosen a Guile version](#choosing-a-guile-version-and-sticking-to-it), install
+it via your distribution's package manager.
+
+If your distribution splits packages into `-dev`-suffixed and
+non-`-dev`-suffixed sub-packages (as is the case for Debian-derived
+distributions), please make sure to install both. For example, to install Guile
+v2.2 on Debian/Ubuntu:
+
+```sh
+apt install guile-2.2 guile-2.2-dev
+```
+
+#### Mixing distribution packages and source-built packages
+
+At the time of writing, most distributions have _some_ of Guix's dependencies
+packaged, but not all. This means that you may want to install the distribution
+package for some dependencies, and manually build-from-source for others.
+
+Distribution packages usually install to `/usr`, which is different from the
+default `./configure` prefix of source-built packages: `/usr/local`.
+
+This means that if you mix-and-match distribution packages and source-built
+packages and do not specify exactly `--prefix=/usr` to `./configure` for
+source-built packages, you will need to augment the `GUILE_LOAD_PATH` and
+`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look
+under the right prefix and find your source-built packages.
+
+For example, if you are using Guile v2.2, and have Guile packages in the
+`/usr/local` prefix, either add the following lines to your `.profile` or
+`.bash_profile` so that the environment variable is properly set for all future
+shell logins, or paste the lines into a POSIX-style shell to temporarily modify
+the environment variables of your current shell session.
+
+```sh
+# Help Guile v2.2.x find packages in /usr/local
+export GUILE_LOAD_PATH="/usr/local/share/guile/site/2.2${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH"
+export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/2.2/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH"
+```
+
+Note that these environment variables are used to check for packages during
+`./configure`, so they should be set as soon as possible should you want to use
+a prefix other than `/usr`.
+
+#### Building and installing source-built packages
+
+***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are
+documented in the sub-sections immediately below. Please read these sections
+before proceeding to build and install these packages.*
+
+Although you should always refer to the README or INSTALL files for the most
+accurate information, most of these dependencies use autoconf-style build
+systems (check if there's a `configure.ac` file), and will likely do the right
+thing with the following:
+
+Clone the repository and check out the latest release:
+```sh
+git clone <git-repo-of-dependency>/<dependency>.git
+cd <dependency>
+git tag -l # check for the latest release
+git checkout <latest-release>
+```
+
+For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at
+the root of the repository):
+
+```sh
+./autogen.sh || autoreconf -vfi
+./configure --prefix=<prefix>
+make
+sudo make install
+```
+
+For CMake-based build systems (if `CMakeLists.txt` exists at the root of the
+repository):
+
+```sh
+mkdir build && cd build
+cmake .. -DCMAKE_INSTALL_PREFIX=<prefix>
+sudo cmake --build . --target install
+```
+
+If you choose not to specify exactly `--prefix=/usr` to `./configure`, please
+make sure you've carefully read the [previous section] on mixing distribution
+packages and source-built packages.
+
+##### Binding packages require `-dev`-suffixed packages
+
+Relevant for:
+- Everyone
+
+When building bindings, the `-dev`-suffixed version of the original package
+needs to be installed. For example, building `Guile-zlib` on Debian-derived
+distributions requires that `zlib1g-dev` is installed.
+
+When using bindings, the `-dev`-suffixed version of the original package still
+needs to be installed. This is particularly problematic when distribution
+packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that
+installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a
+dependency.
+
+Below is a list of relevant Guile bindings and their corresponding `-dev`
+packages in Debian at the time of writing.
+
+| Guile binding package | -dev Debian package |
+|-----------------------|---------------------|
+| guile-gcrypt | libgcrypt-dev |
+| guile-git | libgit2-dev |
+| guile-lzlib | liblz-dev |
+| guile-ssh | libssh-dev |
+| guile-sqlite3 | libsqlite3-dev |
+| guile-zlib | zlib1g-dev |
+
+##### `guile-git` actually depends on `libgit2 >= 1.1`
+
+Relevant for:
+- Those building `guile-git` from source against `libgit2 < 1.1`
+- Those installing `guile-git` from their distribution where `guile-git` is
+ built against `libgit2 < 1.1`
+
+As of v0.4.0, `guile-git` claims to only require `libgit2 >= 0.28.0`, however,
+it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a
+reference of `origin/keyring`: instead of interpreting the reference as "the
+'keyring' branch of the 'origin' remote", the reference is interpreted as "the
+branch literally named 'origin/keyring'"
+
+This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and
+`guile-git` is built against it.
+
+Should you be in this situation, you need to build both `libgit2 v1.1.x` and
+`guile-git` from source.
+
+Source: http://logs.guix.gnu.org/guix/2020-11-12.log#232527
+
+##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2
+
+Relevant for:
+- Those building `{scheme,guile}-bytestructures` from source against Guile v2.2
+
+Commit
+[707eea3](https://github.com/TaylanUB/scheme-bytestructures/commit/707eea3a85e1e375e86702229ebf73d496377669)
+introduced a regression for Guile v2.2 and was first included in v1.0.8, this
+was later corrected in commit
+[ec9a721](https://github.com/TaylanUB/scheme-bytestructures/commit/ec9a721957c17bcda13148f8faa5f06934431ff7)
+and included in v1.1.0.
+
+TL;DR If you decided to use Guile v2.2, do not use `{scheme,guile}-bytestructures` v1.0.8 or v1.0.9.
+
+### Building and Installing Guix itself
+
+Start by cloning Guix:
+
+```
+git clone https://git.savannah.gnu.org/git/guix.git
+cd guix
+```
+
+You will likely want to build the latest release, however, if the latest release
+when you're reading this is still 1.2.0 then you may want to use 95aca29 instead
+to avoid a problem in the GnuTLS test suite.
+
+```
+git branch -a -l 'origin/version-*' # check for the latest release
+git checkout <latest-release>
+```
+
+Bootstrap the build system:
+```
+./bootstrap
+```
+
+Configure with the recommended `--localstatedir` flag:
+```
+./configure --localstatedir=/var
+```
+
+Note: If you intend to hack on Guix in the future, you will need to supply the
+same `--localstatedir=` flag for all future Guix `./configure` invocations. See
+the last paragraph of this
+[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more
+details.
+
+Build Guix (this will take a while):
+```
+make -j$(nproc)
+```
+
+Install Guix:
+
+```
+sudo make install
+```
+
+### Post-"build from source" Setup
+
+#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]`
+
+At this point, guix will be installed to `${bindir}`, which is likely
+`/usr/local/bin` if you did not override directory variables at
+`./configure`-time. More information on standard Automake directory variables
+can be found
+[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html).
+
+However, the Guix init scripts and service configurations for Upstart, systemd,
+SysV, and OpenRC are installed (in `${libdir}`) to launch
+`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`,
+which does not yet exist, and will only exist after [`root` performs their first
+`guix pull`](#guix-pull-as-root).
+
+We need to create a `-original` version of these init scripts that's pointed to
+the binaries we just built and `make install`'ed in `${bindir}` (normally,
+`/usr/local/bin`).
+
+Example for `systemd`, run as `root`:
+
+```sh
+# Create guix-daemon-original.service by modifying guix-daemon.service
+libdir=# set according to your PREFIX (default is /usr/local/lib)
+bindir="$(dirname $(command -v guix-daemon))"
+sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service
+chmod 664 /etc/systemd/system/guix-daemon-original.service
+
+# Make systemd recognize the new service
+systemctl daemon-reload
+
+# Make sure that the non-working guix-daemon.service is stopped and disabled
+systemctl stop guix-daemon
+systemctl disable guix-daemon
+
+# Make sure that the working guix-daemon-original.service is started and enabled
+systemctl enable guix-daemon-original
+systemctl start guix-daemon-original
+```
+
+#### Creating `guix-daemon` users / groups
+
+Please see the [relevant
+section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html)
+in the Guix Reference Manual for more details.
+
+## Optional setup
+
+At this point, you are set up to [use Guix to build Bitcoin
+Core](./README.md#usage). However, if you want to polish your setup a bit and
+make it "what Guix intended", then read the next few subsections.
+
+### Add an `/etc/profile.d` entry
+
+This section definitely does not apply to you if you installed Guix using:
+1. The shell installer script
+2. fanquake's Docker image
+3. Debian's `guix` package
+
+#### Background
+
+Although Guix knows how to update itself and its packages, it does so in a
+non-invasive way (it does not modify `/usr/local/bin/guix`).
+
+Instead, it does the following:
+
+- After a `guix pull`, it updates
+ `/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink
+ targeting this directory at `$HOME/.config/guix/current`
+
+- After a `guix install`, it updates
+ `/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink
+ targeting this directory at `$HOME/.guix-profile`
+
+Therefore, in order for these operations to affect your shell/desktop sessions
+(and for the principle of least astonishment to hold), their corresponding
+directories have to be added to well-known environment variables like `$PATH`,
+`$INFOPATH`, `$XDG_DATA_DIRS`, etc.
+
+In other words, if `$HOME/.config/guix/current/bin` does not exist in your
+`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same
+goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages.
+
+Helpfully, after a `guix pull` or `guix install`, a message will be printed like
+so:
+
+```
+hint: Consider setting the necessary environment variables by running:
+
+ GUIX_PROFILE="$HOME/.guix-profile"
+ . "$GUIX_PROFILE/etc/profile"
+
+Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'.
+```
+
+However, this is somewhat tedious to do for both `guix pull` and `guix install`
+for each user on the system that wants to properly use `guix`. I recommend that
+you instead add an entry to `/etc/profile.d` instead. This is done by default
+when installing the Debian package later than 1.2.0-4 and when using the shell
+script installer.
+
+#### Instructions
+
+Create `/etc/profile.d/guix.sh` with the following content:
+```sh
+# _GUIX_PROFILE: `guix pull` profile
+_GUIX_PROFILE="$HOME/.config/guix/current"
+if [ -L $_GUIX_PROFILE ]; then
+ export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH"
+ # Export INFOPATH so that the updated info pages can be found
+ # and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info
+ # When INFOPATH is unset, add a trailing colon so that Emacs
+ # searches 'Info-default-directory-list'.
+ export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH"
+fi
+
+# GUIX_PROFILE: User's default profile
+GUIX_PROFILE="$HOME/.guix-profile"
+[ -L $GUIX_PROFILE ] || return
+GUIX_LOCPATH="$GUIX_PROFILE/lib/locale"
+export GUIX_PROFILE GUIX_LOCPATH
+
+[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile"
+
+# set XDG_DATA_DIRS to include Guix installations
+export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}"
+```
+
+Please note that this will not take effect until the next shell or desktop
+session (log out and log back in).
+
+### `guix pull` as root
+
+Before you do this, you need to read the section on [choosing your security
+model][security-model] and adjust `guix` and `guix-daemon` flags according to
+your choice, as invoking `guix pull` may pull substitutes from substitute
+servers (which you may not want).
+
+As mentioned in a previous section, Guix expects
+`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with
+`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix.
+However, this is not the case when we build from source. Therefore, we need to
+perform a `guix pull` as `root`:
+
+```sh
+sudo --login guix pull --branch=version-<latest-release-version>
+# or
+sudo --login guix pull --commit=<particular-commit>
+```
+
+`guix pull` is quite a long process (especially if you're using
+`--no-substitute`). If you encounter build problems, please refer to the
+[troubleshooting section](#troubleshooting).
+
+Note that running a bare `guix pull` with no commit or branch specified will
+pull the latest commit on Guix's master branch, which is likely fine, but not
+recommended.
+
+If you installed Guix from source, you may get an error like the following:
+```sh
+error: while creating symlink '/root/.config/guix/current' No such file or directory
+```
+To resolve this, simply:
+```
+sudo mkdir -p /root/.config/guix
+```
+Then try the `guix pull` command again.
+
+After the `guix pull` finishes successfully,
+`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated.
+
+#### Using the newly-pulled `guix` by restarting the daemon
+
+Depending on how you installed Guix, you should now make sure that your init
+scripts and service configurations point to the newly-pulled `guix-daemon`.
+
+##### If you built Guix from source
+
+If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now
+do the following:
+
+```sh
+systemctl stop guix-daemon-original
+systemctl disable guix-daemon-original
+
+systemctl enable guix-daemon
+systemctl start guix-daemon
+```
+
+##### If you installed Guix via the Debian/Ubuntu distribution packages
+
+You will need to create a `guix-daemon-latest` service which points to the new
+`guix` rather than a pinned one.
+
+```sh
+# Create guix-daemon-latest.service by modifying guix-daemon.service
+sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service
+chmod 664 /lib/systemd/system/guix-daemon-latest.service
+
+# Make systemd recognize the new service
+systemctl daemon-reload
+
+# Make sure that the old guix-daemon.service is stopped and disabled
+systemctl stop guix-daemon
+systemctl disable guix-daemon
+
+# Make sure that the new guix-daemon-latest.service is started and enabled
+systemctl enable guix-daemon-latest
+systemctl start guix-daemon-latest
+```
+
+##### If you installed Guix via lantw44's Arch Linux AUR package
+
+At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is
+`guix-daemon-latest.service`, therefore, you should do the following:
+
+```sh
+systemctl stop guix-daemon
+systemctl disable guix-daemon
+
+systemctl enable guix-daemon-latest
+systemctl start guix-daemon-latest
+```
+
+##### Otherwise...
+
+Simply do:
+
+```sh
+systemctl restart guix-daemon
+```
+
+### Checking everything
+
+If you followed all the steps above to make your Guix setup "prim and proper,"
+you can check that you did everything properly by running through this
+checklist.
+
+1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login
+
+2. `guix describe` should not print `guix describe: error: failed to determine
+ origin`, but rather something like:
+
+ ```
+ Generation 38 Feb 22 2021 16:39:31 (current)
+ guix f350df4
+ repository URL: https://git.savannah.gnu.org/git/guix.git
+ branch: version-1.2.0
+ commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7
+ ```
+
+3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix`
+
+# Troubleshooting
+
+## Derivation failed to build
+
+When you see a build failure like below:
+
+```
+building /gnu/store/...-foo-3.6.12.drv...
+/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0'
+builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1
+build of /gnu/store/...-foo-3.6.12.drv failed
+View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'.
+cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built
+cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built
+cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built
+guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed
+```
+
+It means that `guix` failed to build a package named `foo`, which was a
+dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed"
+line is not necessarily the root cause, the first "failed" line is.
+
+Most of the time, the build failure is due to a spurious test failure or the
+package's build system/test suite breaking when running multi-threaded. To
+rebuild _just_ this derivation in a single-threaded fashion (please don't forget
+to add other `guix` flags like `--no-substitutes` as appropriate):
+
+```sh
+$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv
+```
+
+If the single-threaded rebuild did not succeed, you may need to dig deeper.
+You may view `foo`'s build logs in `less` like so (please replace paths with the
+path you see in the build failure output):
+
+```sh
+$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less
+```
+
+`foo`'s build directory is also preserved and available at
+`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple
+times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build
+failure output for the most accurate, up-to-date information.
+
+### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character
+
+This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem
+which rejects characters not present in the UTF-8 character code set. An example
+is ZFS with the utf8only=on option set.
+
+More information: https://bugs.python.org/issue37584
+
+### GnuTLS: test-suite FAIL: status-request-revoked
+
+*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`*
+
+This unfortunate error is most common for non-substitute builders who installed
+Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a
+hardcoded certificate which expired on 2020-10-24.
+
+What's more unfortunate is that this GnuTLS derivation is somewhat special in
+Guix's dependency graph and is not affected by the package transformation flags
+like `--without-tests=`.
+
+The easiest solution for those encountering this problem is to install a newer
+version of Guix. However, there are ways to work around this issue:
+
+#### Workaround 1: Using substitutes for this single derivation
+
+If you've authorized the official Guix build farm's key (more info
+[here](./README.md#step-1-authorize-the-signing-keys)), then you can use
+substitutes just for this single derivation by invoking the following:
+
+```sh
+guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv
+```
+
+See [this section](./README.md#removing-authorized-keys) for instructions on how
+to remove authorized keys if you don't want to keep the build farm's key
+authorized.
+
+#### Workaround 2: Temporarily setting the system clock back
+
+This workaround was described [here](https://issues.guix.gnu.org/44559#5).
+
+Basically:
+1. Turn off networking
+2. Turn off NTP
+3. Set system time to 2020-10-01
+4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv
+5. Set system time back to accurate current time
+6. Turn NTP back on
+7. Turn networking back on
+
+### coreutils: FAIL: tests/tail-2/inotify-dir-recreate
+
+The inotify-dir-create test fails on "remote" filesystems such as overlayfs
+(Docker's default filesystem) due to the filesystem being mistakenly recognized
+as non-remote.
+
+A relatively easy workaround to this is to make sure that a somewhat traditional
+filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For
+Docker users, this might mean [using a volume][docker/volumes], [binding
+mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap)
+[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag.
+
+Please see the following links for more details:
+
+- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940)
+- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935)
+- A commit to skip this test in Guix has been merged into the core-updates branch:
+[savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4)
+
+
+[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball
+[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball
+[install-fanquake-docker]: #option-3-using-fanquakes-docker-image
+[install-distro-pkg]: #option-4-using-a-distribution-maintained-package
+[install-source]: #option-5-building-from-source
+
+[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0
+[security-model]: ./README.md#choosing-your-security-model
+
+[docker/volumes]: https://docs.docker.com/storage/volumes/
+[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/
+[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index e604b370e3..2bb464a40d 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -9,83 +9,171 @@ downloads.
We achieve bootstrappability by using Guix as a functional package manager.
-## Requirements
+# Requirements
-Conservatively, a x86_64 machine with:
+Conservatively, you will need an x86_64 machine with:
- 16GB of free disk space on the partition that /gnu/store will reside in
-- 8GB of free disk space per platform triple you're planning on building (see
- the `HOSTS` environment variable description)
+- 8GB of free disk space **per platform triple** you're planning on building
+ (see the `HOSTS` [environment variable description][env-vars-list])
-## Setup
+# Installation and Setup
-### Installing Guix
+If you don't have Guix installed and set up, please follow the instructions in
+[INSTALL.md](./INSTALL.md)
-If you're just testing this out, you can use the
-[Dockerfile][fanquake/guix-docker] for convenience. It automatically speeds up
-your builds by [using substitutes](#speeding-up-builds-with-substitute-servers).
-If you don't want this behaviour, refer to the [next
-section](#choosing-your-security-model).
+# Usage
-Otherwise, follow the [Guix installation guide][guix/bin-install].
+If you haven't considered your security model yet, please read [the relevant
+section](#choosing-your-security-model) before proceeding to perform a build.
-> Note: For those who like to keep their filesystems clean, Guix is designed to
-> be very standalone and _will not_ conflict with your system's package
-> manager/existing setup. It _only_ touches `/var/guix`, `/gnu`, and
-> `~/.config/guix`.
+## Making the Xcode SDK available for macOS cross-compilation
-### Choosing your security model
+In order to perform a build for macOS (which is included in the default set of
+platform triples to build), you'll need to extract the macOS SDK tarball using
+tools found in the [`macdeploy` directory](../macdeploy/README.md).
-Guix allows us to achieve better binary security by using our CPU time to build
-everything from scratch. However, it doesn't sacrifice user choice in pursuit of
-this: users can decide whether or not to bootstrap and to use substitutes
-(pre-built packages).
+You can then either point to the SDK using the `SDK_PATH` environment variable:
-After installation, you may want to consider [adding substitute
-servers](#speeding-up-builds-with-substitute-servers) from which to download
-pre-built packages to speed up your build if that fits your security model (say,
-if you're just testing that this works). Substitute servers are set up by
-default if you're using the [Dockerfile][fanquake/guix-docker].
+```sh
+# Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode-<foo>-<bar>-extracted-SDK-with-libcxx-headers
+tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode-<foo>-<bar>-extracted-SDK-with-libcxx-headers.tar.gz
-If you prefer not to use any substitutes, make sure to supply `--no-substitutes`
-like in the following snippet. The first build will take a while, but the
-resulting packages will be cached for future builds.
+# Indicate where to locate the SDK tarball
+export SDK_PATH=/path/to/parent/dir/of/extracted/SDK
+```
+
+or extract it into `depends/SDKs`:
```sh
-export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes'
+mkdir -p depends/SDKs
+tar -C depends/SDKs -xaf /path/to/SDK/tarball
```
-Likewise, to perform a bootstrapped build (takes even longer):
+## Building
+
+*The author highly recommends at least reading over the [common usage patterns
+and examples](#common-guix-build-invocation-patterns-and-examples) section below
+before starting a build. For a full list of customization options, see the
+[recognized environment variables][env-vars-list] section.*
+
+To build Bitcoin Core reproducibly with all default options, invoke the
+following from the top of a clean repository:
```sh
-export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes' ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap'
+./contrib/guix/guix-build
```
-### Using a version of Guix with `guix time-machine` capabilities
+## Codesigning build outputs
-> Note: This entire section can be skipped if you are already using a version of
-> Guix that has [the `guix time-machine` command][guix/time-machine].
+The `guix-codesign` command attaches codesignatures (produced by codesigners) to
+existing non-codesigned outputs. Please see the [release process
+documentation](/doc/release-process.md) for more context.
-Once Guix is installed, if it doesn't have the `guix time-machine` command, pull
-the latest `guix`.
+It respects many of the same environment variable flags as `guix-build`, with 2
+crucial differences:
+
+1. Since only Windows and macOS build outputs require codesigning, the `HOSTS`
+ environment variable will have a sane default value of `x86_64-w64-mingw32
+ x86_64-apple-darwin18` instead of all the platforms.
+2. The `guix-codesign` command ***requires*** a `DETACHED_SIGS_REPO` flag.
+ * _**DETACHED_SIGS_REPO**_
+
+ Set the directory where detached codesignatures can be found for the current
+ Bitcoin Core version being built.
+
+ _REQUIRED environment variable_
+
+An invocation with all default options would look like:
+
+```
+env DETACHED_SIGS_REPO=<path/to/bitcoin-detached-sigs> ./contrib/guix/guix-codesign
+```
+
+## Cleaning intermediate work directories
+
+By default, `guix-build` leaves all intermediate files or "work directories"
+(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so
+that they are available to the user (to aid in debugging, etc.). However, these
+directories usually take up a large amount of disk space. Therefore, a
+`guix-clean` convenience script is provided which cleans the current `git`
+worktree to save disk space:
+
+```
+./contrib/guix/guix-clean
+```
+
+
+## Attesting to build outputs
+
+Much like how Gitian build outputs are attested to in a `gitian.sigs`
+repository, Guix build outputs are attested to in the [`guix.sigs`
+repository](https://github.com/bitcoin-core/guix.sigs).
+
+After you've cloned the `guix.sigs` repository, to attest to the current
+worktree's commit/tag:
+
+```
+env GUIX_SIGS_REPO=<path/to/guix.sigs> SIGNER=<gpg-key-name> ./contrib/guix/guix-attest
+```
+
+See `./contrib/guix/guix-attest --help` for more information on the various ways
+`guix-attest` can be invoked.
+
+## Verifying build output attestations
+
+After at least one other signer has uploaded their signatures to the `guix.sigs`
+repository:
+
+```
+git -C <path/to/guix.sigs> pull
+env GUIX_SIGS_REPO=<path/to/guix.sigs> ./contrib/guix/guix-verify
+```
+
+
+## Common `guix-build` invocation patterns and examples
+
+### Keeping caches and SDKs outside of the worktree
+
+If you perform a lot of builds and have a bunch of worktrees, you may find it
+more efficient to keep the depends tree's download cache, build cache, and SDKs
+outside of the worktrees to avoid duplicate downloads and unnecessary builds. To
+help with this situation, the `guix-build` script honours the `SOURCES_PATH`,
+`BASE_CACHE`, and `SDK_PATH` environment variables and will pass them on to the
+depends tree so that you can do something like:
```sh
-guix pull --max-jobs=4 # change number of jobs accordingly
+env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" SDK_PATH="$HOME/macOS-SDKs" ./contrib/guix/guix-build
```
-Make sure that you are using your current profile. (You are prompted to do this
-at the end of the `guix pull`)
+Note that the paths that these environment variables point to **must be
+directories**, and **NOT symlinks to directories**.
+
+See the [recognized environment variables][env-vars-list] section for more
+details.
+
+### Building a subset of platform triples
-```bash
-export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH"
+Sometimes you only want to build a subset of the supported platform triples, in
+which case you can override the default list by setting the space-separated
+`HOSTS` environment variable:
+
+```sh
+env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin18' ./contrib/guix/guix-build
```
+See the [recognized environment variables][env-vars-list] section for more
+details.
+
### Controlling the number of threads used by `guix` build commands
+Depending on your system's RAM capacity, you may want to decrease the number of
+threads used to decrease RAM usage or vice versa.
+
By default, the scripts under `./contrib/guix` will invoke all `guix` build
commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not
-specified. However, astute manual readers will also notice that there is a
-`--max-jobs=` flag (which defaults to 1 if unspecified).
+specified. However, astute manual readers will also notice that `guix` build
+commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified).
Here is the difference between `--cores=` and `--max-jobs=`:
@@ -124,30 +212,18 @@ packages when the dependency graph allows for it, you may want to try:
export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8'
```
-## Usage
-
-### As a Tool for Deterministic Builds
-
-From the top of a clean Bitcoin Core repository:
-
-```sh
-./contrib/guix/guix-build
-```
-
-After the build finishes successfully (check the status code please), compare
-hashes:
-
-```sh
-find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
-```
+See the [recognized environment variables][env-vars-list] section for more
+details.
-#### Recognized environment variables
+## Recognized environment variables
* _**HOSTS**_
Override the space-separated list of platform triples for which to perform a
- bootstrappable build. _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf
- aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
+ bootstrappable build.
+
+ _(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu
+ riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
x86\_64-w64-mingw32 x86\_64-apple-darwin18")_
* _**SOURCES_PATH**_
@@ -156,18 +232,27 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
depends tree. Setting this to the same directory across multiple builds of the
depends tree can eliminate unnecessary redownloading of package sources.
+ The path that this environment variable points to **must be a directory**, and
+ **NOT a symlink to a directory**.
+
* _**BASE_CACHE**_
Set the depends tree cache for built packages. This is passed through to the
depends tree. Setting this to the same directory across multiple builds of the
depends tree can eliminate unnecessary building of packages.
+ The path that this environment variable points to **must be a directory**, and
+ **NOT a symlink to a directory**.
+
* _**SDK_PATH**_
Set the path where _extracted_ SDKs can be found. This is passed through to
the depends tree. Note that this is should be set to the _parent_ directory of
- the actual SDK (e.g. SDK_PATH=$HOME/Downloads/macOS-SDKs instead of
- $HOME/Downloads/macOS-SDKs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers).
+ the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of
+ `$HOME/Downloads/macOS-SDKs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers`).
+
+ The path that this environment variable points to **must be a directory**, and
+ **NOT a symlink to a directory**.
* _**JOBS**_
@@ -178,13 +263,17 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
- `make` as in `make --jobs="$JOBS"`
- `xargs` as in `xargs -P"$JOBS"`
+ See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for
+ more details.
+
_(defaults to the value of `nproc` outside the container)_
* _**SOURCE_DATE_EPOCH**_
Override the reference UNIX timestamp used for bit-for-bit reproducibility,
- the variable name conforms to [standard][r12e/source-date-epoch]. _(defaults
- to the output of `$(git log --format=%at -1)`)_
+ the variable name conforms to [standard][r12e/source-date-epoch].
+
+ _(defaults to the output of `$(git log --format=%at -1)`)_
* _**V**_
@@ -200,8 +289,7 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
A whitespace-delimited list of URLs from which to download pre-built packages.
A URL is only used if its signing key is authorized (refer to the [substitute
- servers section](#speeding-up-builds-with-substitute-servers) for more
- details).
+ servers section](#option-1-building-with-substitutes) for more details).
* _**ADDITIONAL_GUIX_COMMON_FLAGS**_
@@ -216,28 +304,65 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
Additional flags to be passed to the invocation of `guix environment` inside
`guix time-machine`.
-## Tips and Tricks
-
-### Speeding up builds with substitute servers
+# Choosing your security model
-_This whole section is automatically done in the convenience
-[Dockerfiles][fanquake/guix-docker]_
+No matter how you installed Guix, you need to decide on your security model for
+building packages with Guix.
-For those who are used to life in the fast _(and trustful)_ lane, you can
-specify [substitute servers][guix/substitutes] from which to download pre-built
-packages.
+Guix allows us to achieve better binary security by using our CPU time to build
+everything from scratch. However, it doesn't sacrifice user choice in pursuit of
+this: users can decide whether or not to use **substitutes** (pre-built
+packages).
+
+## Option 1: Building with substitutes
+
+### Step 1: Authorize the signing keys
+
+Depending on the installation procedure you followed, you may have already
+authorized the Guix build farm key. In particular, the official shell installer
+script asks you if you want the key installed, and the debian distribution
+package authorized the key during installation.
+
+You can check the current list of authorized keys at `/etc/guix/acl`.
+
+At the time of writing, a `/etc/guix/acl` with just the Guix build farm key
+authorized looks something like:
+
+```lisp
+(acl
+ (entry
+ (public-key
+ (ecc
+ (curve Ed25519)
+ (q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#)
+ )
+ )
+ (tag
+ (guix import)
+ )
+ )
+ )
+```
-> For those who only want to use substitutes from the official Guix build farm
-> and have authorized the build farm's signing key during Guix's installation,
-> you don't need to do anything.
+If you've determined that the official Guix build farm key hasn't been
+authorized, and you would like to authorize it, run the following as root:
-#### Step 1: Authorize the signing keys
+```
+guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub
+```
-For the official Guix build farm at https://ci.guix.gnu.org, run as root:
+If
+`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub`
+doesn't exist, try:
+```sh
+guix archive --authorize < <PREFIX>/share/guix/ci.guix.gnu.org.pub
```
-guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub
-```
+
+Where `<PREFIX>` is likely:
+- `/usr` if you installed from a distribution package
+- `/usr/local` if you installed Guix from source and didn't supply any
+ prefix-modifying flags to Guix's `./configure`
For dongcarl's substitute server at https://guix.carldong.io, run as root:
@@ -245,90 +370,102 @@ For dongcarl's substitute server at https://guix.carldong.io, run as root:
wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize
```
-#### Step 2: Specify the substitute servers
+#### Removing authorized keys
-The official Guix build farm at https://ci.guix.gnu.org is automatically used
-unless the `--no-substitutes` flag is supplied.
+To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the
+`(entry (public-key ...))` entry.
-This can be overridden for all `guix` invocations by passing the
-`--substitute-urls` option to your invocation of `guix-daemon`. This can also be
-overridden on a call-by-call basis by passing the same `--substitute-urls`
-option to client tools such at `guix environment`.
+### Step 2: Specify the substitute servers
-To use dongcarl's substitute server for Bitcoin Core builds after having
-[authorized his signing key](#authorize-the-signing-keys):
+Once its key is authorized, the official Guix build farm at
+https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag
+is supplied. This default list of substitute servers is overridable both on a
+`guix-daemon` level and when you invoke `guix` commands. See examples below for
+the various ways of adding dongcarl's substitute server after having [authorized
+his signing key](#authorize-the-signing-keys).
-```
-export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org'
+Change the **default list** of substitute servers by starting `guix-daemon` with
+the `--substitute-urls` option (you will likely need to edit your init script):
+
+```sh
+guix-daemon <cmd> --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org'
```
-## Troubleshooting
+Override the default list of substitute servers by passing the
+`--substitute-urls` option for invocations of `guix` commands:
-### Derivation failed to build
+```sh
+guix <cmd> --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org'
+```
-When you see a build failure like below:
+For scripts under `./contrib/guix`, set the `SUBSTITUTE_URLS` environment
+variable:
-```
-building /gnu/store/...-foo-3.6.12.drv...
-/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0'
-builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1
-build of /gnu/store/...-foo-3.6.12.drv failed
-View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'.
-cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built
-cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built
-cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built
-guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed
+```sh
+export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org'
```
-It means that `guix` failed to build a package named `foo`, which was a
-dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed"
-line is not necessarily the root cause, the first "failed" line is.
+## Option 2: Disabling substitutes on an ad-hoc basis
-Most of the time, the build failure is due to a spurious test failure or the
-package's build system/test suite breaking when running multi-threaded. To
-rebuild _just_ this derivation in a single-threaded fashion:
+If you prefer not to use any substitutes, make sure to supply `--no-substitutes`
+like in the following snippet. The first build will take a while, but the
+resulting packages will be cached for future builds.
+For direct invocations of `guix`:
```sh
-$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv
+guix <cmd> --no-substitutes
```
-If the single-threaded rebuild did not succeed, you may need to dig deeper.
-You may view `foo`'s build logs in `less` like so (please replace paths with the
-path you see in the build failure output):
-
+For the scripts under `./contrib/guix/`:
```sh
-$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less
+export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes'
```
-`foo`'s build directory is also preserved and available at
-`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple
-times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build
-failure output for the most accurate, up-to-date information.
+## Option 3: Disabling substitutes by default
+
+`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that,
+unless otherwise overridden by a command line invocation, no substitutes will be
+used.
-#### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character
+If you start `guix-daemon` using an init script, you can edit said script to
+supply this flag.
-This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem
-which rejects characters not present in the UTF-8 character code set. An example
-is ZFS with the utf8only=on option set.
-More information: https://bugs.python.org/issue37584
+# Purging/Uninstalling Guix
-## FAQ
+In the extraordinarily rare case where you messed up your Guix installation in
+an irreversible way, you may want to completely purge Guix from your system and
+start over.
-### How can I trust the binary installation?
+1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt
+ purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source).
+2. Remove all build users and groups
-As mentioned at the bottom of [this manual page][guix/bin-install]:
+ You may check for relevant users and groups using:
-> The binary installation tarballs can be (re)produced and verified simply by
-> running the following command in the Guix source tree:
->
-> make guix-binary.x86_64-linux.tar.xz
+ ```
+ getent passwd | grep guix
+ getent group | grep guix
+ ```
-### Is Guix packaged in my operating system?
+ Then, you may remove users and groups using:
-Guix is shipped starting with [Debian Bullseye][debian/guix-bullseye] and
-[Ubuntu 21.04 "Hirsute Hippo"][ubuntu/guix-hirsute]. Other operating systems
-are working on packaging Guix as well.
+ ```
+ sudo userdel <user>
+ sudo groupdel <group>
+ ```
+
+3. Remove all possible Guix-related directories
+ - `/var/guix/`
+ - `/var/log/guix/`
+ - `/gnu/`
+ - `/etc/guix/`
+ - `/home/*/.config/guix/`
+ - `/home/*/.cache/guix/`
+ - `/home/*/.guix-profile/`
+ - `/root/.config/guix/`
+ - `/root/.cache/guix/`
+ - `/root/.guix-profile/`
[b17e]: http://bootstrappable.org/
[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/
@@ -343,3 +480,5 @@ are working on packaging Guix as well.
[debian/guix-bullseye]: https://packages.debian.org/bullseye/guix
[ubuntu/guix-hirsute]: https://packages.ubuntu.com/hirsute/guix
[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix
+
+[env-vars-list]: #recognized-environment-variables
diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest
index c8cf73d400..6e12cbead7 100755
--- a/contrib/guix/guix-attest
+++ b/contrib/guix/guix-attest
@@ -18,7 +18,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
# Required non-builtin commands should be invokable
################
-check_tools cat env basename mkdir xargs find
+check_tools cat env basename mkdir diff sort
if [ -z "$NO_SIGN" ]; then
check_tools gpg
fi
@@ -162,6 +162,18 @@ EOF
echo "Attesting to build outputs for version: '${VERSION}'"
echo ""
+# Given a SHA256SUMS file as stdin that has lines like:
+# 0ba536819b221a91d3d42e978be016aac918f40984754d74058aa0c921cd3ea6 a/b/d/c/d/s/bitcoin-22.0rc2-riscv64-linux-gnu.tar.gz
+# ...
+#
+# Replace each line's file name with its basename:
+# 0ba536819b221a91d3d42e978be016aac918f40984754d74058aa0c921cd3ea6 bitcoin-22.0rc2-riscv64-linux-gnu.tar.gz
+# ...
+#
+basenameify_SHA256SUMS() {
+ sed -E 's@(^[[:xdigit:]]{64}[[:space:]]+).+/([^/]+$)@\1\2@'
+}
+
outsigdir="$GUIX_SIGS_REPO/$VERSION/$signer_name"
mkdir -p "$outsigdir"
(
@@ -174,7 +186,8 @@ mkdir -p "$outsigdir"
cat "${noncodesigned_fragments[@]}" \
| sort -u \
| sort -k2 \
- > "$temp_noncodesigned"
+ | basenameify_SHA256SUMS \
+ > "$temp_noncodesigned"
if [ -e noncodesigned.SHA256SUMS ]; then
# The SHA256SUMS already exists, make sure it's exactly what we
# expect, error out if not
@@ -192,8 +205,8 @@ mkdir -p "$outsigdir"
exit 1
fi
- temp_codesigned="$(mktemp)"
- trap 'rm -rf -- "$temp_codesigned"' EXIT
+ temp_all="$(mktemp)"
+ trap 'rm -rf -- "$temp_all"' EXIT
if (( ${#codesigned_fragments[@]} )); then
# Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is
@@ -201,18 +214,19 @@ mkdir -p "$outsigdir"
cat "${sha256sum_fragments[@]}" \
| sort -u \
| sort -k2 \
- > "$temp_codesigned"
- if [ -e codesigned.SHA256SUMS ]; then
+ | basenameify_SHA256SUMS \
+ > "$temp_all"
+ if [ -e all.SHA256SUMS ]; then
# The SHA256SUMS already exists, make sure it's exactly what we
# expect, error out if not
- if diff -u all.SHA256SUMS "$temp_codesigned"; then
+ if diff -u all.SHA256SUMS "$temp_all"; then
echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date."
else
shasum_already_exists all.SHA256SUMS
exit 1
fi
else
- mv "$temp_codesigned" codesigned.SHA256SUMS
+ mv "$temp_all" all.SHA256SUMS
fi
else
# It is fine to have the codesigned outputs be missing (perhaps the
@@ -226,6 +240,7 @@ mkdir -p "$outsigdir"
for i in *.SHA256SUMS; do
if [ ! -e "$i".asc ]; then
gpg --detach-sign \
+ --digest-algo sha256 \
--local-user "$gpg_key_name" \
--armor \
--output "$i".asc "$i"
diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build
index 29d6701b25..dd7229b6fa 100755
--- a/contrib/guix/guix-build
+++ b/contrib/guix/guix-build
@@ -18,7 +18,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
# Required non-builtin commands should be invocable
################
-check_tools cat mkdir make git guix
+check_tools cat mkdir make getent curl git guix
################
# GUIX_BUILD_OPTIONS should be empty
@@ -186,6 +186,29 @@ fi
#
# However, the internal API is likely to change more than the CLI invocation
+################
+# Services database must have basic entries
+################
+
+if ! getent services http https ftp > /dev/null 2>&1; then
+cat << EOF
+ERR: Your system's C library can not find service database entries for at least
+ one of the following services: http, https, ftp.
+
+Hint: Most likely, /etc/services does not exist yet (common for docker images
+ and minimal distros), or you don't have permissions to access it.
+
+ If /etc/services does not exist yet, you may want to install the
+ appropriate package for your distro which provides it.
+
+ On Debian/Ubuntu: netbase
+ On Arch Linux: iana-etc
+
+ For more information, see: getent(1), services(5)
+
+EOF
+
+fi
#########
# SETUP #
@@ -209,14 +232,14 @@ host_to_commonname() {
}
# Determine the reference time used for determinism (overridable by environment)
-SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}"
+SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}"
# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility
# across time.
time-machine() {
# shellcheck disable=SC2086
- guix time-machine --url=https://github.com/dongcarl/guix.git \
- --commit=490e39ff303f4f6873a04bfb8253755bdae1b29c \
+ guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \
+ --commit=aa34d4d28dfe25ba47d5800d05000fb7221788c0 \
--cores="$JOBS" \
--keep-failed \
--fallback \
diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign
index 62853dbf4d..3f464f89e6 100755
--- a/contrib/guix/guix-codesign
+++ b/contrib/guix/guix-codesign
@@ -220,20 +220,20 @@ fi
JOBS="${JOBS:-$(nproc)}"
# Determine the reference time used for determinism (overridable by environment)
-SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}"
+SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}"
# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility
# across time.
time-machine() {
# shellcheck disable=SC2086
- guix time-machine --url=https://github.com/dongcarl/guix.git \
- --commit=490e39ff303f4f6873a04bfb8253755bdae1b29c \
- --cores="$JOBS" \
- --keep-failed \
- --fallback \
- ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
- ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \
- -- "$@"
+ guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \
+ --commit=aa34d4d28dfe25ba47d5800d05000fb7221788c0 \
+ --cores="$JOBS" \
+ --keep-failed \
+ --fallback \
+ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
+ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \
+ -- "$@"
}
# Make sure an output directory exists for our builds
diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify
index a6e2c4065e..02ae022741 100755
--- a/contrib/guix/guix-verify
+++ b/contrib/guix/guix-verify
@@ -28,7 +28,11 @@ cmd_usage() {
cat <<EOF
Synopsis:
- env GUIX_SIGS_REPO=<path/to/guix.sigs> ./contrib/guix/guix-verify
+ env GUIX_SIGS_REPO=<path/to/guix.sigs> [ SIGNER=<signer> ] ./contrib/guix/guix-verify
+
+Example overriding signer's manifest to use as base
+
+ env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify
EOF
}
@@ -73,11 +77,13 @@ verify() {
echo ""
echo "Hint: Either the signature is invalid or the public key is missing"
echo ""
+ failure=1
elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then
echo "ERR: The SHA256SUMS attestation in these two directories differ:"
echo " '${compare_manifest}'"
echo " '${current_manifest}'"
echo ""
+ failure=1
else
echo "Verified: '${current_manifest}'"
echo ""
@@ -92,6 +98,17 @@ echo "--------------------"
echo ""
if (( ${#all_noncodesigned[@]} )); then
compare_noncodesigned="${all_noncodesigned[0]}"
+ if [[ -n "$SIGNER" ]]; then
+ signer_noncodesigned="$OUTSIGDIR_BASE/$SIGNER/noncodesigned.SHA256SUMS"
+ if [[ -f "$signer_noncodesigned" ]]; then
+ echo "Using $SIGNER's manifest as the base to compare against"
+ compare_noncodesigned="$signer_noncodesigned"
+ else
+ echo "Unable to find $SIGNER's manifest, using the first one found"
+ fi
+ else
+ echo "No SIGNER provided, using the first manifest found"
+ fi
for current_manifest in "${all_noncodesigned[@]}"; do
verify "$compare_noncodesigned" "$current_manifest"
@@ -112,6 +129,17 @@ echo "--------------------"
echo ""
if (( ${#all_all[@]} )); then
compare_all="${all_all[0]}"
+ if [[ -n "$SIGNER" ]]; then
+ signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS"
+ if [[ -f "$signer_all" ]]; then
+ echo "Using $SIGNER's manifest as the base to compare against"
+ compare_all="$signer_all"
+ else
+ echo "Unable to find $SIGNER's manifest, using the first one found"
+ fi
+ else
+ echo "No SIGNER provided, using the first manifest found"
+ fi
for current_manifest in "${all_all[@]}"; do
verify "$compare_all" "$current_manifest"
@@ -140,3 +168,7 @@ if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then
echo ""
exit 1
fi
+
+if [ -n "$failure" ]; then
+ exit 1
+fi
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 6741328473..356bd70070 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -214,6 +214,7 @@ make -C depends --jobs="$JOBS" HOST="$HOST" \
x86_64_linux_NM=x86_64-linux-gnu-nm \
x86_64_linux_STRIP=x86_64-linux-gnu-strip \
qt_config_opts_i686_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' \
+ qt_config_opts_x86_64_linux='-platform linux-g++ -xplatform bitcoin-linux-g++' \
FORCE_USE_SYSTEM_CLANG=1
@@ -226,7 +227,6 @@ GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}.tar.gz"
# Create the source tarball if not already there
if [ ! -e "$GIT_ARCHIVE" ]; then
mkdir -p "$(dirname "$GIT_ARCHIVE")"
- touch "${DIST_ARCHIVE_BASE}"/SKIPATTEST.TAG
git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD
fi
@@ -239,7 +239,7 @@ mkdir -p "$OUTDIR"
# CONFIGFLAGS
CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary"
case "$HOST" in
- *linux*) CONFIGFLAGS+=" --enable-glibc-back-compat" ;;
+ *linux*) CONFIGFLAGS+=" --disable-threadlocal" ;;
esac
# CFLAGS
@@ -253,12 +253,23 @@ esac
# CXXFLAGS
HOST_CXXFLAGS="$HOST_CFLAGS"
+case "$HOST" in
+ arm-linux-gnueabihf) HOST_CXXFLAGS="${HOST_CXXFLAGS} -Wno-psabi" ;;
+esac
+
# LDFLAGS
case "$HOST" in
*linux*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++ -Wl,-O2" ;;
*mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;;
esac
+# Using --no-tls-get-addr-optimize retains compatibility with glibc 2.17, by
+# avoiding a PowerPC64 optimisation available in glibc 2.22 and later.
+# https://sourceware.org/binutils/docs-2.35/ld/PowerPC64-ELF64.html
+case "$HOST" in
+ *powerpc64*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,--no-tls-get-addr-optimize" ;;
+esac
+
case "$HOST" in
powerpc64-linux-*|riscv64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;;
esac
@@ -291,10 +302,11 @@ mkdir -p "$DISTSRC"
# Build Bitcoin Core
make --jobs="$JOBS" ${V:+V=1}
- # Perform basic ELF security checks on a series of executables.
+ # Check that symbol/security checks tools are sane.
+ make test-security-check ${V:+V=1}
+ # Perform basic security checks on a series of executables.
make -C src --jobs=1 check-security ${V:+V=1}
- # Check that executables only contain allowed gcc, glibc and libstdc++
- # version symbols for Linux distro back-compatibility.
+ # Check that executables only contain allowed version symbols.
make -C src --jobs=1 check-symbols ${V:+V=1}
mkdir -p "$OUTDIR"
@@ -445,5 +457,6 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
find "$ACTUAL_OUTDIR" -type f
} | xargs realpath --relative-base="$PWD" \
| xargs sha256sum \
+ | sort -k2 \
| sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh
index b1eec686ec..f484ac5774 100755
--- a/contrib/guix/libexec/codesign.sh
+++ b/contrib/guix/libexec/codesign.sh
@@ -108,5 +108,6 @@ mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
find "$ACTUAL_OUTDIR" -type f
} | xargs realpath --relative-base="$PWD" \
| xargs sha256sum \
+ | sort -k2 \
| sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash
index 9705607119..40ae4b5208 100644
--- a/contrib/guix/libexec/prelude.bash
+++ b/contrib/guix/libexec/prelude.bash
@@ -49,7 +49,7 @@ fi
# Set common variables
################
-VERSION="${VERSION:-$(git_head_version)}"
+VERSION="${FORCE_VERSION:-$(git_head_version)}"
DISTNAME="${DISTNAME:-bitcoin-${VERSION}}"
version_base_prefix="${PWD}/guix-build-"
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index ba168a2a4a..5805006053 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -80,6 +80,10 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html"
(("-rpath=") "-rpath-link="))
#t))))))))
+(define (make-binutils-with-mingw-w64-disable-flags xbinutils)
+ (package-with-extra-patches xbinutils
+ (search-our-patches "binutils-mingw-w64-disable-flags.patch")))
+
(define (make-cross-toolchain target
base-gcc-for-libc
base-kernel-headers
@@ -135,11 +139,25 @@ chain for " target " development."))
(package-with-extra-patches gcc-8
(search-our-patches "gcc-8-sort-libtool-find-output.patch")))
+;; Building glibc with stack smashing protector first landed in glibc 2.25, use
+;; this function to disable for older glibcs
+;;
+;; From glibc 2.25 changelog:
+;;
+;; * Most of glibc can now be built with the stack smashing protector enabled.
+;; It is recommended to build glibc with --enable-stack-protector=strong.
+;; Implemented by Nick Alcock (Oracle).
+(define (make-glibc-without-ssp xglibc)
+ (package-with-extra-configure-variable
+ (package-with-extra-configure-variable
+ xglibc "libc_cv_ssp" "no")
+ "libc_cv_ssp_strong" "no"))
+
(define* (make-bitcoin-cross-toolchain target
#:key
(base-gcc-for-libc gcc-7)
- (base-kernel-headers linux-libre-headers-5.4)
- (base-libc glibc) ; glibc 2.31
+ (base-kernel-headers linux-libre-headers-4.9)
+ (base-libc (make-glibc-without-ssp glibc-2.24))
(base-gcc (make-gcc-rpath-link base-gcc)))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
@@ -154,7 +172,7 @@ desirable for building Bitcoin Core release binaries."
(define (make-mingw-pthreads-cross-toolchain target)
"Create a cross-compilation toolchain package for TARGET"
- (let* ((xbinutils (cross-binutils target))
+ (let* ((xbinutils (make-binutils-with-mingw-w64-disable-flags (cross-binutils target)))
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
(pthreads-xgcc (make-gcc-with-pthreads
(cross-gcc target
@@ -557,6 +575,28 @@ and endian independent.")
inspecting signatures in Mach-O binaries.")
(license license:expat))))
+(define-public glibc-2.24
+ (package
+ (inherit glibc)
+ (version "2.24")
+ (source (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://sourceware.org/git/glibc.git")
+ (commit "0d7f1ed30969886c8dde62fbf7d2c79967d4bace")))
+ (file-name (git-file-name "glibc" "0d7f1ed30969886c8dde62fbf7d2c79967d4bace"))
+ (sha256
+ (base32
+ "0g5hryia5v1k0qx97qffgwzrz4lr4jw3s5kj04yllhswsxyjbic3"))
+ (patches (search-our-patches "glibc-ldd-x86_64.patch"
+ "glibc-versioned-locpath.patch"
+ "glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch"
+ "glibc-2.24-no-build-time-cxx-header-run.patch"))))))
+
+(define glibc-2.27/bitcoin-patched
+ (package-with-extra-patches glibc-2.27
+ (search-our-patches "glibc-2.27-riscv64-Use-__has_include__-to-include-asm-syscalls.h.patch")))
+
(packages->manifest
(append
(list ;; The Basics
@@ -606,7 +646,12 @@ inspecting signatures in Mach-O binaries.")
(make-nsis-with-sde-support nsis-x86_64)
osslsigncode))
((string-contains target "-linux-")
- (list (make-bitcoin-cross-toolchain target)))
+ (list (cond ((string-contains target "riscv64-")
+ (make-bitcoin-cross-toolchain target
+ #:base-libc glibc-2.27/bitcoin-patched
+ #:base-kernel-headers linux-libre-headers-4.19))
+ (else
+ (make-bitcoin-cross-toolchain target)))))
((string-contains target "darwin")
(list clang-toolchain-10 binutils imagemagick libtiff librsvg font-tuffy cmake xorriso python-signapple))
(else '())))))
diff --git a/contrib/guix/patches/binutils-mingw-w64-disable-flags.patch b/contrib/guix/patches/binutils-mingw-w64-disable-flags.patch
new file mode 100644
index 0000000000..8f88eb9dfd
--- /dev/null
+++ b/contrib/guix/patches/binutils-mingw-w64-disable-flags.patch
@@ -0,0 +1,171 @@
+Description: Add disable opposites to the security-related flags
+Author: Stephen Kitt <skitt@debian.org>
+
+This patch adds "no-" variants to disable the various security flags:
+"no-dynamicbase", "no-nxcompat", "no-high-entropy-va", "disable-reloc-section".
+
+--- a/ld/emultempl/pe.em
++++ b/ld/emultempl/pe.em
+@@ -259,9 +261,11 @@
+ (OPTION_ENABLE_LONG_SECTION_NAMES + 1)
+ /* DLLCharacteristics flags. */
+ #define OPTION_DYNAMIC_BASE (OPTION_DISABLE_LONG_SECTION_NAMES + 1)
+-#define OPTION_FORCE_INTEGRITY (OPTION_DYNAMIC_BASE + 1)
++#define OPTION_NO_DYNAMIC_BASE (OPTION_DYNAMIC_BASE + 1)
++#define OPTION_FORCE_INTEGRITY (OPTION_NO_DYNAMIC_BASE + 1)
+ #define OPTION_NX_COMPAT (OPTION_FORCE_INTEGRITY + 1)
+-#define OPTION_NO_ISOLATION (OPTION_NX_COMPAT + 1)
++#define OPTION_NO_NX_COMPAT (OPTION_NX_COMPAT + 1)
++#define OPTION_NO_ISOLATION (OPTION_NO_NX_COMPAT + 1)
+ #define OPTION_NO_SEH (OPTION_NO_ISOLATION + 1)
+ #define OPTION_NO_BIND (OPTION_NO_SEH + 1)
+ #define OPTION_WDM_DRIVER (OPTION_NO_BIND + 1)
+@@ -271,6 +275,7 @@
+ #define OPTION_NO_INSERT_TIMESTAMP (OPTION_INSERT_TIMESTAMP + 1)
+ #define OPTION_BUILD_ID (OPTION_NO_INSERT_TIMESTAMP + 1)
+ #define OPTION_ENABLE_RELOC_SECTION (OPTION_BUILD_ID + 1)
++#define OPTION_DISABLE_RELOC_SECTION (OPTION_ENABLE_RELOC_SECTION + 1)
+
+ static void
+ gld${EMULATION_NAME}_add_options
+@@ -342,8 +347,10 @@
+ {"enable-long-section-names", no_argument, NULL, OPTION_ENABLE_LONG_SECTION_NAMES},
+ {"disable-long-section-names", no_argument, NULL, OPTION_DISABLE_LONG_SECTION_NAMES},
+ {"dynamicbase",no_argument, NULL, OPTION_DYNAMIC_BASE},
++ {"no-dynamicbase", no_argument, NULL, OPTION_NO_DYNAMIC_BASE},
+ {"forceinteg", no_argument, NULL, OPTION_FORCE_INTEGRITY},
+ {"nxcompat", no_argument, NULL, OPTION_NX_COMPAT},
++ {"no-nxcompat", no_argument, NULL, OPTION_NO_NX_COMPAT},
+ {"no-isolation", no_argument, NULL, OPTION_NO_ISOLATION},
+ {"no-seh", no_argument, NULL, OPTION_NO_SEH},
+ {"no-bind", no_argument, NULL, OPTION_NO_BIND},
+@@ -351,6 +358,7 @@
+ {"tsaware", no_argument, NULL, OPTION_TERMINAL_SERVER_AWARE},
+ {"build-id", optional_argument, NULL, OPTION_BUILD_ID},
+ {"enable-reloc-section", no_argument, NULL, OPTION_ENABLE_RELOC_SECTION},
++ {"disable-reloc-section", no_argument, NULL, OPTION_DISABLE_RELOC_SECTION},
+ {NULL, no_argument, NULL, 0}
+ };
+
+@@ -485,9 +494,12 @@
+ in object files\n"));
+ fprintf (file, _(" --dynamicbase Image base address may be relocated using\n\
+ address space layout randomization (ASLR)\n"));
++ fprintf (file, _(" --no-dynamicbase Image base address may not be relocated\n"));
+ fprintf (file, _(" --enable-reloc-section Create the base relocation table\n"));
++ fprintf (file, _(" --disable-reloc-section Disable the base relocation table\n"));
+ fprintf (file, _(" --forceinteg Code integrity checks are enforced\n"));
+ fprintf (file, _(" --nxcompat Image is compatible with data execution prevention\n"));
++ fprintf (file, _(" --no-nxcompat Image is not compatible with data execution prevention\n"));
+ fprintf (file, _(" --no-isolation Image understands isolation but do not isolate the image\n"));
+ fprintf (file, _(" --no-seh Image does not use SEH. No SE handler may\n\
+ be called in this image\n"));
+@@ -862,12 +874,21 @@
+ case OPTION_ENABLE_RELOC_SECTION:
+ pe_dll_enable_reloc_section = 1;
+ break;
++ case OPTION_DISABLE_RELOC_SECTION:
++ pe_dll_enable_reloc_section = 0;
++ /* fall through */
++ case OPTION_NO_DYNAMIC_BASE:
++ pe_dll_characteristics &= ~IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE;
++ break;
+ case OPTION_FORCE_INTEGRITY:
+ pe_dll_characteristics |= IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY;
+ break;
+ case OPTION_NX_COMPAT:
+ pe_dll_characteristics |= IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
+ break;
++ case OPTION_NO_NX_COMPAT:
++ pe_dll_characteristics &= ~IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
++ break;
+ case OPTION_NO_ISOLATION:
+ pe_dll_characteristics |= IMAGE_DLLCHARACTERISTICS_NO_ISOLATION;
+ break;
+--- a/ld/emultempl/pep.em
++++ b/ld/emultempl/pep.em
+@@ -237,9 +240,12 @@
+ OPTION_ENABLE_LONG_SECTION_NAMES,
+ OPTION_DISABLE_LONG_SECTION_NAMES,
+ OPTION_HIGH_ENTROPY_VA,
++ OPTION_NO_HIGH_ENTROPY_VA,
+ OPTION_DYNAMIC_BASE,
++ OPTION_NO_DYNAMIC_BASE,
+ OPTION_FORCE_INTEGRITY,
+ OPTION_NX_COMPAT,
++ OPTION_NO_NX_COMPAT,
+ OPTION_NO_ISOLATION,
+ OPTION_NO_SEH,
+ OPTION_NO_BIND,
+@@ -248,7 +254,8 @@
+ OPTION_NO_INSERT_TIMESTAMP,
+ OPTION_TERMINAL_SERVER_AWARE,
+ OPTION_BUILD_ID,
+- OPTION_ENABLE_RELOC_SECTION
++ OPTION_ENABLE_RELOC_SECTION,
++ OPTION_DISABLE_RELOC_SECTION
+ };
+
+ static void
+@@ -315,9 +322,12 @@
+ {"enable-long-section-names", no_argument, NULL, OPTION_ENABLE_LONG_SECTION_NAMES},
+ {"disable-long-section-names", no_argument, NULL, OPTION_DISABLE_LONG_SECTION_NAMES},
+ {"high-entropy-va", no_argument, NULL, OPTION_HIGH_ENTROPY_VA},
++ {"no-high-entropy-va", no_argument, NULL, OPTION_NO_HIGH_ENTROPY_VA},
+ {"dynamicbase",no_argument, NULL, OPTION_DYNAMIC_BASE},
++ {"no-dynamicbase", no_argument, NULL, OPTION_NO_DYNAMIC_BASE},
+ {"forceinteg", no_argument, NULL, OPTION_FORCE_INTEGRITY},
+ {"nxcompat", no_argument, NULL, OPTION_NX_COMPAT},
++ {"no-nxcompat", no_argument, NULL, OPTION_NO_NX_COMPAT},
+ {"no-isolation", no_argument, NULL, OPTION_NO_ISOLATION},
+ {"no-seh", no_argument, NULL, OPTION_NO_SEH},
+ {"no-bind", no_argument, NULL, OPTION_NO_BIND},
+@@ -327,6 +337,7 @@
+ {"no-insert-timestamp", no_argument, NULL, OPTION_NO_INSERT_TIMESTAMP},
+ {"build-id", optional_argument, NULL, OPTION_BUILD_ID},
+ {"enable-reloc-section", no_argument, NULL, OPTION_ENABLE_RELOC_SECTION},
++ {"disable-reloc-section", no_argument, NULL, OPTION_DISABLE_RELOC_SECTION},
+ {NULL, no_argument, NULL, 0}
+ };
+
+@@ -448,11 +461,15 @@
+ in object files\n"));
+ fprintf (file, _(" --high-entropy-va Image is compatible with 64-bit address space\n\
+ layout randomization (ASLR)\n"));
++ fprintf (file, _(" --no-high-entropy-va Image is not compatible with 64-bit ASLR\n"));
+ fprintf (file, _(" --dynamicbase Image base address may be relocated using\n\
+ address space layout randomization (ASLR)\n"));
++ fprintf (file, _(" --no-dynamicbase Image base address may not be relocated\n"));
+ fprintf (file, _(" --enable-reloc-section Create the base relocation table\n"));
++ fprintf (file, _(" --disable-reloc-section Disable the base relocation table\n"));
+ fprintf (file, _(" --forceinteg Code integrity checks are enforced\n"));
+ fprintf (file, _(" --nxcompat Image is compatible with data execution prevention\n"));
++ fprintf (file, _(" --no-nxcompat Image is not compatible with data execution prevention\n"));
+ fprintf (file, _(" --no-isolation Image understands isolation but do not isolate the image\n"));
+ fprintf (file, _(" --no-seh Image does not use SEH; no SE handler may\n\
+ be called in this image\n"));
+@@ -809,12 +826,24 @@
+ case OPTION_ENABLE_RELOC_SECTION:
+ pep_dll_enable_reloc_section = 1;
+ break;
++ case OPTION_DISABLE_RELOC_SECTION:
++ pep_dll_enable_reloc_section = 0;
++ /* fall through */
++ case OPTION_NO_DYNAMIC_BASE:
++ pe_dll_characteristics &= ~IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE;
++ /* fall through */
++ case OPTION_NO_HIGH_ENTROPY_VA:
++ pe_dll_characteristics &= ~IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA;
++ break;
+ case OPTION_FORCE_INTEGRITY:
+ pe_dll_characteristics |= IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY;
+ break;
+ case OPTION_NX_COMPAT:
+ pe_dll_characteristics |= IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
+ break;
++ case OPTION_NO_NX_COMPAT:
++ pe_dll_characteristics &= ~IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
++ break;
+ case OPTION_NO_ISOLATION:
+ pe_dll_characteristics |= IMAGE_DLLCHARACTERISTICS_NO_ISOLATION;
+ break;
diff --git a/contrib/guix/patches/gcc-8-sort-libtool-find-output.patch b/contrib/guix/patches/gcc-8-sort-libtool-find-output.patch
index 1dfe3ba132..f327c464f3 100644
--- a/contrib/guix/patches/gcc-8-sort-libtool-find-output.patch
+++ b/contrib/guix/patches/gcc-8-sort-libtool-find-output.patch
@@ -1,3 +1,11 @@
+guix: repro: Sort find output in libtool for gcc-8
+
+Otherwise the resulting .a static libraries (e.g. libstdc++.a) will not
+be reproducible and end up making the Bitcoin binaries non-reproducible
+as well.
+
+See: https://reproducible-builds.org/docs/archives/#gnu-libtool
+
diff --git a/gcc/configure b/gcc/configure
index 97ba7d7d69c..e37a96f0c0c 100755
--- a/gcc/configure
diff --git a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
new file mode 100644
index 0000000000..5c4d0c6ebe
--- /dev/null
+++ b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
@@ -0,0 +1,62 @@
+https://sourceware.org/git/?p=glibc.git;a=commit;h=a68ba2f3cd3cbe32c1f31e13c20ed13487727b32
+
+commit 6b02af31e9a721bb15a11380cd22d53b621711f8
+Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
+Date: Wed Oct 18 17:26:23 2017 +0100
+
+ [AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol
+
+ This patch rewrites aarch64 elf_machine_load_address to use special _DYNAMIC
+ symbol instead of _dl_start.
+
+ The static address of _DYNAMIC symbol is stored in the first GOT entry.
+ Here is the change which makes this solution work (part of binutils 2.24):
+ https://sourceware.org/ml/binutils/2013-06/msg00248.html
+
+ i386, x86_64 targets use the same method to do this as well.
+
+ The original implementation relies on a trick that R_AARCH64_ABS32 relocation
+ being resolved at link time and the static address fits in the 32bits.
+ However, in LP64, normally, the address is defined to be 64 bit.
+
+ Here is the C version one which should be portable in all cases.
+
+ * sysdeps/aarch64/dl-machine.h (elf_machine_load_address): Use
+ _DYNAMIC symbol to calculate load address.
+
+diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
+index e86d8b5b63..5a5b8a5de5 100644
+--- a/sysdeps/aarch64/dl-machine.h
++++ b/sysdeps/aarch64/dl-machine.h
+@@ -49,26 +49,11 @@ elf_machine_load_address (void)
+ /* To figure out the load address we use the definition that for any symbol:
+ dynamic_addr(symbol) = static_addr(symbol) + load_addr
+
+- The choice of symbol is arbitrary. The static address we obtain
+- by constructing a non GOT reference to the symbol, the dynamic
+- address of the symbol we compute using adrp/add to compute the
+- symbol's address relative to the PC.
+- This depends on 32bit relocations being resolved at link time
+- and that the static address fits in the 32bits. */
+-
+- ElfW(Addr) static_addr;
+- ElfW(Addr) dynamic_addr;
+-
+- asm (" \n"
+-" adrp %1, _dl_start; \n"
+-" add %1, %1, #:lo12:_dl_start \n"
+-" ldr %w0, 1f \n"
+-" b 2f \n"
+-"1: \n"
+-" .word _dl_start \n"
+-"2: \n"
+- : "=r" (static_addr), "=r" (dynamic_addr));
+- return dynamic_addr - static_addr;
++ _DYNAMIC sysmbol is used here as its link-time address stored in
++ the special unrelocated first GOT entry. */
++
++ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
++ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
+ }
+
+ /* Set up the loaded object described by L so its unrelocated PLT
diff --git a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
new file mode 100644
index 0000000000..11fe7fdc99
--- /dev/null
+++ b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
@@ -0,0 +1,100 @@
+https://sourceware.org/git/?p=glibc.git;a=commit;h=fc3e1337be1c6935ab58bd13520f97a535cf70cc
+
+commit dc23a45db566095e83ff0b7a57afc87fb5ca89a1
+Author: Florian Weimer <fweimer@redhat.com>
+Date: Wed Sep 21 10:45:32 2016 +0200
+
+ Avoid running $(CXX) during build to obtain header file paths
+
+ This reduces the build time somewhat and is particularly noticeable
+ during rebuilds with few code changes.
+
+diff --git a/Makerules b/Makerules
+index 7e4077ee50..c338850de5 100644
+--- a/Makerules
++++ b/Makerules
+@@ -121,14 +121,10 @@ ifneq (,$(CXX))
+ # will be used instead of /usr/include/stdlib.h and /usr/include/math.h.
+ before-compile := $(common-objpfx)cstdlib $(common-objpfx)cmath \
+ $(before-compile)
+-cstdlib=$(shell echo "\#include <cstdlib>" | $(CXX) -M -MP -x c++ - \
+- | sed -n "/cstdlib:/{s/:$$//;p}")
+-$(common-objpfx)cstdlib: $(cstdlib)
++$(common-objpfx)cstdlib: $(c++-cstdlib-header)
+ $(INSTALL_DATA) $< $@T
+ $(move-if-change) $@T $@
+-cmath=$(shell echo "\#include <cmath>" | $(CXX) -M -MP -x c++ - \
+- | sed -n "/cmath:/{s/:$$//;p}")
+-$(common-objpfx)cmath: $(cmath)
++$(common-objpfx)cmath: $(c++-cmath-header)
+ $(INSTALL_DATA) $< $@T
+ $(move-if-change) $@T $@
+ endif
+diff --git a/config.make.in b/config.make.in
+index 95c6f36876..04a8b3ed7f 100644
+--- a/config.make.in
++++ b/config.make.in
+@@ -45,6 +45,8 @@ defines = @DEFINES@
+ sysheaders = @sysheaders@
+ sysincludes = @SYSINCLUDES@
+ c++-sysincludes = @CXX_SYSINCLUDES@
++c++-cstdlib-header = @CXX_CSTDLIB_HEADER@
++c++-cmath-header = @CXX_CMATH_HEADER@
+ all-warnings = @all_warnings@
+ enable-werror = @enable_werror@
+
+diff --git a/configure b/configure
+index 17625e1041..6ff252744b 100755
+--- a/configure
++++ b/configure
+@@ -635,6 +635,8 @@ BISON
+ INSTALL_INFO
+ PERL
+ BASH_SHELL
++CXX_CMATH_HEADER
++CXX_CSTDLIB_HEADER
+ CXX_SYSINCLUDES
+ SYSINCLUDES
+ AUTOCONF
+@@ -5054,6 +5056,18 @@ fi
+
+
+
++# Obtain some C++ header file paths. This is used to make a local
++# copy of those headers in Makerules.
++if test -n "$CXX"; then
++ find_cxx_header () {
++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
++ }
++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
++ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
++fi
++
++
++
+ # Test if LD_LIBRARY_PATH contains the notation for the current directory
+ # since this would lead to problems installing/building glibc.
+ # LD_LIBRARY_PATH contains the current directory if one of the following
+diff --git a/configure.ac b/configure.ac
+index 33bcd62180..9938ab0dc2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1039,6 +1039,18 @@ fi
+ AC_SUBST(SYSINCLUDES)
+ AC_SUBST(CXX_SYSINCLUDES)
+
++# Obtain some C++ header file paths. This is used to make a local
++# copy of those headers in Makerules.
++if test -n "$CXX"; then
++ find_cxx_header () {
++ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
++ }
++ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
++ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
++fi
++AC_SUBST(CXX_CSTDLIB_HEADER)
++AC_SUBST(CXX_CMATH_HEADER)
++
+ # Test if LD_LIBRARY_PATH contains the notation for the current directory
+ # since this would lead to problems installing/building glibc.
+ # LD_LIBRARY_PATH contains the current directory if one of the following
diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include__-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include__-to-include-asm-syscalls.h.patch
new file mode 100644
index 0000000000..d6217157ee
--- /dev/null
+++ b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include__-to-include-asm-syscalls.h.patch
@@ -0,0 +1,72 @@
+https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e
+
+From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" <hjl.tools@gmail.com>
+Date: Mon, 31 Dec 2018 09:26:42 -0800
+Subject: [PATCH] riscv: Use __has_include__ to include <asm/syscalls.h> [BZ
+ #24022]
+
+<asm/syscalls.h> has been removed by
+
+commit 27f8899d6002e11a6e2d995e29b8deab5aa9cc25
+Author: David Abdurachmanov <david.abdurachmanov@gmail.com>
+Date: Thu Nov 8 20:02:39 2018 +0100
+
+ riscv: add asm/unistd.h UAPI header
+
+ Marcin Juszkiewicz reported issues while generating syscall table for riscv
+ using 4.20-rc1. The patch refactors our unistd.h files to match some other
+ architectures.
+
+ - Add asm/unistd.h UAPI header, which has __ARCH_WANT_NEW_STAT only for 64-bit
+ - Remove asm/syscalls.h UAPI header and merge to asm/unistd.h
+ - Adjust kernel asm/unistd.h
+
+ So now asm/unistd.h UAPI header should show all syscalls for riscv.
+
+<asm/syscalls.h> may be restored by
+
+Subject: [PATCH] riscv: restore asm/syscalls.h UAPI header
+Date: Tue, 11 Dec 2018 09:09:35 +0100
+
+UAPI header asm/syscalls.h was merged into UAPI asm/unistd.h header,
+which did resolve issue with missing syscalls macros resulting in
+glibc (2.28) build failure. It also broke glibc in a different way:
+asm/syscalls.h is being used by glibc. I noticed this while doing
+Fedora 30/Rawhide mass rebuild.
+
+The patch returns asm/syscalls.h header and incl. it into asm/unistd.h.
+I plan to send a patch to glibc to use asm/unistd.h instead of
+asm/syscalls.h
+
+In the meantime, we use __has_include__, which was added to GCC 5, to
+check if <asm/syscalls.h> exists before including it. Tested with
+build-many-glibcs.py for riscv against kernel 4.19.12 and 4.20-rc7.
+
+ [BZ #24022]
+ * sysdeps/unix/sysv/linux/riscv/flush-icache.c: Check if
+ <asm/syscalls.h> exists with __has_include__ before including it.
+---
+ sysdeps/unix/sysv/linux/riscv/flush-icache.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
+index d612ef4c6c..0b2042620b 100644
+--- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c
++++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
+@@ -21,7 +21,11 @@
+ #include <stdlib.h>
+ #include <atomic.h>
+ #include <sys/cachectl.h>
+-#include <asm/syscalls.h>
++#if __has_include__ (<asm/syscalls.h>)
++# include <asm/syscalls.h>
++#else
++# include <asm/unistd.h>
++#endif
+
+ typedef int (*func_type) (void *, void *, unsigned long int);
+
+--
+2.31.1
+
diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch
new file mode 100644
index 0000000000..b1b6d5a548
--- /dev/null
+++ b/contrib/guix/patches/glibc-ldd-x86_64.patch
@@ -0,0 +1,10 @@
+By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas
+it's in 'lib/' for us. This patch fixes that.
+
+--- glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2012-12-25 04:02:13.000000000 +0100
++++ glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2013-09-15 23:08:03.000000000 +0200
+@@ -1,3 +1,3 @@
+ /LD_TRACE_LOADED_OBJECTS=1/a\
+ add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out"
+-s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \264\4-x86-64\6 \2x32\4-x32\6"_
++s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \2\4-x86-64\6 \2x32\4-x32\6"_
diff --git a/contrib/guix/patches/glibc-versioned-locpath.patch b/contrib/guix/patches/glibc-versioned-locpath.patch
new file mode 100644
index 0000000000..bc7652127f
--- /dev/null
+++ b/contrib/guix/patches/glibc-versioned-locpath.patch
@@ -0,0 +1,240 @@
+The format of locale data can be incompatible between libc versions, and
+loading incompatible data can lead to 'setlocale' returning EINVAL at best
+or triggering an assertion failure at worst. See
+https://lists.gnu.org/archive/html/guix-devel/2015-09/msg00717.html
+for background information.
+
+To address that, this patch changes libc to honor a new 'GUIX_LOCPATH'
+variable, and to look for locale data in version-specific sub-directories of
+that variable. So, if GUIX_LOCPATH=/foo:/bar, locale data is searched for in
+/foo/X.Y and /bar/X.Y, where X.Y is the libc version number.
+
+That way, a single 'GUIX_LOCPATH' setting can work even if different libc
+versions coexist on the system.
+
+--- a/locale/newlocale.c
++++ b/locale/newlocale.c
+@@ -30,6 +30,7 @@
+ /* Lock for protecting global data. */
+ __libc_rwlock_define (extern , __libc_setlocale_lock attribute_hidden)
+
++extern error_t compute_locale_search_path (char **, size_t *);
+
+ /* Use this when we come along an error. */
+ #define ERROR_RETURN \
+@@ -48,7 +49,6 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
+ __locale_t result_ptr;
+ char *locale_path;
+ size_t locale_path_len;
+- const char *locpath_var;
+ int cnt;
+ size_t names_len;
+
+@@ -102,17 +102,8 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
+ locale_path = NULL;
+ locale_path_len = 0;
+
+- locpath_var = getenv ("LOCPATH");
+- if (locpath_var != NULL && locpath_var[0] != '\0')
+- {
+- if (__argz_create_sep (locpath_var, ':',
+- &locale_path, &locale_path_len) != 0)
+- return NULL;
+-
+- if (__argz_add_sep (&locale_path, &locale_path_len,
+- _nl_default_locale_path, ':') != 0)
+- return NULL;
+- }
++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0)
++ return NULL;
+
+ /* Get the names for the locales we are interested in. We either
+ allow a composite name or a single name. */
+diff --git a/locale/setlocale.c b/locale/setlocale.c
+index ead030d..0c0e314 100644
+--- a/locale/setlocale.c
++++ b/locale/setlocale.c
+@@ -215,12 +215,65 @@ setdata (int category, struct __locale_data *data)
+ }
+ }
+
++/* Return in *LOCALE_PATH and *LOCALE_PATH_LEN the locale data search path as
++ a colon-separated list. Return ENOMEN on error, zero otherwise. */
++error_t
++compute_locale_search_path (char **locale_path, size_t *locale_path_len)
++{
++ char* guix_locpath_var = getenv ("GUIX_LOCPATH");
++ char *locpath_var = getenv ("LOCPATH");
++
++ if (guix_locpath_var != NULL && guix_locpath_var[0] != '\0')
++ {
++ /* Entries in 'GUIX_LOCPATH' take precedence over 'LOCPATH'. These
++ entries are systematically prefixed with "/X.Y" where "X.Y" is the
++ libc version. */
++ if (__argz_create_sep (guix_locpath_var, ':',
++ locale_path, locale_path_len) != 0
++ || __argz_suffix_entries (locale_path, locale_path_len,
++ "/" VERSION) != 0)
++ goto bail_out;
++ }
++
++ if (locpath_var != NULL && locpath_var[0] != '\0')
++ {
++ char *reg_locale_path = NULL;
++ size_t reg_locale_path_len = 0;
++
++ if (__argz_create_sep (locpath_var, ':',
++ &reg_locale_path, &reg_locale_path_len) != 0)
++ goto bail_out;
++
++ if (__argz_append (locale_path, locale_path_len,
++ reg_locale_path, reg_locale_path_len) != 0)
++ goto bail_out;
++
++ free (reg_locale_path);
++ }
++
++ if (*locale_path != NULL)
++ {
++ /* Append the system default locale directory. */
++ if (__argz_add_sep (locale_path, locale_path_len,
++ _nl_default_locale_path, ':') != 0)
++ goto bail_out;
++ }
++
++ return 0;
++
++ bail_out:
++ free (*locale_path);
++ *locale_path = NULL;
++ *locale_path_len = 0;
++
++ return ENOMEM;
++}
++
+ char *
+ setlocale (int category, const char *locale)
+ {
+ char *locale_path;
+ size_t locale_path_len;
+- const char *locpath_var;
+ char *composite;
+
+ /* Sanity check for CATEGORY argument. */
+@@ -251,17 +304,10 @@ setlocale (int category, const char *locale)
+ locale_path = NULL;
+ locale_path_len = 0;
+
+- locpath_var = getenv ("LOCPATH");
+- if (locpath_var != NULL && locpath_var[0] != '\0')
++ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0)
+ {
+- if (__argz_create_sep (locpath_var, ':',
+- &locale_path, &locale_path_len) != 0
+- || __argz_add_sep (&locale_path, &locale_path_len,
+- _nl_default_locale_path, ':') != 0)
+- {
+- __libc_rwlock_unlock (__libc_setlocale_lock);
+- return NULL;
+- }
++ __libc_rwlock_unlock (__libc_setlocale_lock);
++ return NULL;
+ }
+
+ if (category == LC_ALL)
+diff --git a/string/Makefile b/string/Makefile
+index 8424a61..f925503 100644
+--- a/string/Makefile
++++ b/string/Makefile
+@@ -38,7 +38,7 @@ routines := strcat strchr strcmp strcoll strcpy strcspn \
+ swab strfry memfrob memmem rawmemchr strchrnul \
+ $(addprefix argz-,append count create ctsep next \
+ delete extract insert stringify \
+- addsep replace) \
++ addsep replace suffix) \
+ envz basename \
+ strcoll_l strxfrm_l string-inlines memrchr \
+ xpg-strerror strerror_l
+diff --git a/string/argz-suffix.c b/string/argz-suffix.c
+new file mode 100644
+index 0000000..505b0f2
+--- /dev/null
++++ b/string/argz-suffix.c
+@@ -0,0 +1,56 @@
++/* Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Ludovic Courtès <ludo@gnu.org>.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <argz.h>
++#include <errno.h>
++#include <stdlib.h>
++#include <string.h>
++
++
++error_t
++__argz_suffix_entries (char **argz, size_t *argz_len, const char *suffix)
++
++{
++ size_t suffix_len = strlen (suffix);
++ size_t count = __argz_count (*argz, *argz_len);
++ size_t new_argz_len = *argz_len + count * suffix_len;
++ char *new_argz = malloc (new_argz_len);
++
++ if (new_argz)
++ {
++ char *p = new_argz, *entry;
++
++ for (entry = *argz;
++ entry != NULL;
++ entry = argz_next (*argz, *argz_len, entry))
++ {
++ p = stpcpy (p, entry);
++ p = stpcpy (p, suffix);
++ p++;
++ }
++
++ free (*argz);
++ *argz = new_argz;
++ *argz_len = new_argz_len;
++
++ return 0;
++ }
++ else
++ return ENOMEM;
++}
++weak_alias (__argz_suffix_entries, argz_suffix_entries)
+diff --git a/string/argz.h b/string/argz.h
+index bb62a31..d276a35 100644
+--- a/string/argz.h
++++ b/string/argz.h
+@@ -134,6 +134,16 @@ extern error_t argz_replace (char **__restrict __argz,
+ const char *__restrict __str,
+ const char *__restrict __with,
+ unsigned int *__restrict __replace_count);
++
++/* Suffix each entry of ARGZ & ARGZ_LEN with SUFFIX. Return 0 on success,
++ and ENOMEN if memory cannot be allocated. */
++extern error_t __argz_suffix_entries (char **__restrict __argz,
++ size_t *__restrict __argz_len,
++ const char *__restrict __suffix);
++extern error_t argz_suffix_entries (char **__restrict __argz,
++ size_t *__restrict __argz_len,
++ const char *__restrict __suffix);
++
+
+ /* Returns the next entry in ARGZ & ARGZ_LEN after ENTRY, or NULL if there
+ are no more. If entry is NULL, then the first entry is returned. This
diff --git a/contrib/guix/patches/nsis-SConstruct-sde-support.patch b/contrib/guix/patches/nsis-SConstruct-sde-support.patch
index 5edf1b7c8e..f58406a7a0 100644
--- a/contrib/guix/patches/nsis-SConstruct-sde-support.patch
+++ b/contrib/guix/patches/nsis-SConstruct-sde-support.patch
@@ -1,3 +1,6 @@
+https://github.com/kichik/nsis/pull/13
+https://sourceforge.net/p/nsis/code/7248/
+
diff --git a/SConstruct b/SConstruct
index e8252c9..41786f2 100755
--- a/SConstruct
diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh
index 4037936404..dd4d862dee 100755
--- a/contrib/install_db4.sh
+++ b/contrib/install_db4.sh
@@ -221,10 +221,10 @@ EOF
# The packaged config.guess and config.sub are ancient (2009) and can cause build issues.
# Replace them with modern versions.
# See https://github.com/bitcoin/bitcoin/issues/16064
-CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=55eaf3e779455c4e5cc9f82efb5278be8f8f900b'
-CONFIG_GUESS_HASH='2d1ff7bca773d2ec3c6217118129220fa72d8adda67c7d2bf79994b3129232c1'
-CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=55eaf3e779455c4e5cc9f82efb5278be8f8f900b'
-CONFIG_SUB_HASH='3a4befde9bcdf0fdb2763fc1bfa74e8696df94e1ad7aac8042d133c8ff1d2e32'
+CONFIG_GUESS_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f'
+CONFIG_GUESS_HASH='c8f530e01840719871748a8071113435bdfdf75b74c57e78e47898edea8754ae'
+CONFIG_SUB_URL='https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=4550d2f15b3a7ce2451c1f29500b9339430c877f'
+CONFIG_SUB_HASH='3969f7d5f6967ccc6f792401b8ef3916a1d1b1d0f0de5a4e354c95addb8b800e'
rm -f "dist/config.guess"
rm -f "dist/config.sub"
diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py
index 73f54cd488..9a8bcc57a5 100755
--- a/contrib/linearize/linearize-data.py
+++ b/contrib/linearize/linearize-data.py
@@ -17,7 +17,6 @@ import datetime
import time
import glob
from collections import namedtuple
-from binascii import unhexlify
settings = {}
@@ -332,7 +331,7 @@ if __name__ == '__main__':
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
- settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
+ settings['netmagic'] = bytes.fromhex(settings['netmagic'])
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index 21f6ba2eb3..a685aac1c0 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -16,7 +16,10 @@ Our current macOS SDK
(`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be
extracted from
[Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
-An Apple ID is needed to download this.
+Alternatively, after logging in to your account go to 'Downloads', then 'More'
+and look for [`Xcode_12.1`](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
+An Apple ID and cookies enabled for the hostname are needed to download this.
+The `sha256sum` of the archive should be `612443b1894b39368a596ea1607f30cbb0481ad44d5e29c75edb71a6d2cf050f`.
After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip`
archive. This makes the SDK less-trivial to extract on non-macOS machines. One
@@ -76,7 +79,7 @@ and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done
To complicate things further, all builds must target an Apple SDK. These SDKs are free to
download, but not redistributable. To obtain it, register for an Apple Developer Account,
-then download [Xcode_11.3.1](https://download.developer.apple.com/Developer_Tools/Xcode_11.3.1/Xcode_11.3.1.xip).
+then download [Xcode_12.1](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
This file is many gigabytes in size, but most (but not all) of what we need is
contained only in a single directory:
@@ -87,9 +90,9 @@ Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
See the SDK Extraction notes above for how to obtain it.
-The Gitian descriptors build 2 sets of files: Linux tools, then Apple binaries which are
+The Guix process build 2 sets of files: Linux tools, then Apple binaries which are
created using these tools. The build process has been designed to avoid including the
-SDK's files in Gitian's outputs. All interim tarballs are fully deterministic and may be freely
+SDK's files in Guix's outputs. All interim tarballs are fully deterministic and may be freely
redistributed.
[`xorrisofs`](https://www.gnu.org/software/xorriso/) is used to create the DMG.
@@ -110,11 +113,11 @@ order to satisfy the new Gatekeeper requirements. Because this private key canno
shared, we'll have to be a bit creative in order for the build process to remain somewhat
deterministic. Here's how it works:
-- Builders use Gitian to create an unsigned release. This outputs an unsigned DMG which
+- Builders use Guix to create an unsigned release. This outputs an unsigned DMG which
users may choose to bless and run. It also outputs an unsigned app structure in the form
of a tarball, which also contains all of the tools that have been previously (deterministically)
built in order to create a final DMG.
- The Apple keyholder uses this unsigned app to create a detached signature, using the
script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs).
-- Builders feed the unsigned app + detached signature back into Gitian. It uses the
+- Builders feed the unsigned app + detached signature back into Guix. It uses the
pre-built tools to recombine the pieces into a deterministic DMG.
diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus
index 9bf3305288..055a932eee 100755
--- a/contrib/macdeploy/macdeployqtplus
+++ b/contrib/macdeploy/macdeployqtplus
@@ -16,7 +16,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
-import plistlib
import sys, re, os, shutil, stat, os.path
from argparse import ArgumentParser
from ds_store import DSStore
@@ -53,7 +52,7 @@ class FrameworkInfo(object):
return False
def __str__(self):
- return f""" Framework name: {frameworkName}
+ return f""" Framework name: {self.frameworkName}
Framework directory: {self.frameworkDirectory}
Framework path: {self.frameworkPath}
Binary name: {self.binaryName}
@@ -85,8 +84,8 @@ class FrameworkInfo(object):
if line == "":
return None
- # Don't deploy system libraries (exception for libQtuitools and libQtlucene).
- if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line):
+ # Don't deploy system libraries
+ if line.startswith("/System/Library/") or line.startswith("@executable_path") or line.startswith("/usr/lib/"):
return None
m = cls.reOLine.match(line)
@@ -287,14 +286,6 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
if verbose:
print("Copied Contents:", fromContentsDir)
print(" to:", toContentsDir)
- elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout)
- qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib")
- qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib")
- if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath):
- shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True)
- if verbose:
- print("Copied for libQtGui:", qtMenuNibSourcePath)
- print(" to:", qtMenuNibDestinationPath)
return toPath
@@ -351,115 +342,20 @@ def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip
return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose)
def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int):
- # Lookup available plugins, exclude unneeded
plugins = []
if deploymentInfo.pluginPath is None:
return
for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath):
pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath)
- if pluginDirectory == "designer":
- # Skip designer plugins
- continue
- elif pluginDirectory == "printsupport":
- # Skip printsupport plugins
- continue
- elif pluginDirectory == "imageformats":
- # Skip imageformats plugins
+
+ if pluginDirectory not in ['styles', 'platforms']:
continue
- elif pluginDirectory == "sqldrivers":
- # Deploy the sql plugins only if QtSql is in use
- if not deploymentInfo.usesFramework("QtSql"):
- continue
- elif pluginDirectory == "script":
- # Deploy the script plugins only if QtScript is in use
- if not deploymentInfo.usesFramework("QtScript"):
- continue
- elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling":
- # Deploy the qml plugins only if QtDeclarative is in use
- if not deploymentInfo.usesFramework("QtDeclarative"):
- continue
- elif pluginDirectory == "bearer":
- # Deploy the bearer plugins only if QtNetwork is in use
- if not deploymentInfo.usesFramework("QtNetwork"):
- continue
- elif pluginDirectory == "position":
- # Deploy the position plugins only if QtPositioning is in use
- if not deploymentInfo.usesFramework("QtPositioning"):
- continue
- elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures":
- # Deploy the sensor plugins only if QtSensors is in use
- if not deploymentInfo.usesFramework("QtSensors"):
- continue
- elif pluginDirectory == "audio" or pluginDirectory == "playlistformats":
- # Deploy the audio plugins only if QtMultimedia is in use
- if not deploymentInfo.usesFramework("QtMultimedia"):
- continue
- elif pluginDirectory == "mediaservice":
- # Deploy the mediaservice plugins only if QtMultimediaWidgets is in use
- if not deploymentInfo.usesFramework("QtMultimediaWidgets"):
- continue
- elif pluginDirectory == "canbus":
- # Deploy the canbus plugins only if QtSerialBus is in use
- if not deploymentInfo.usesFramework("QtSerialBus"):
- continue
- elif pluginDirectory == "webview":
- # Deploy the webview plugins only if QtWebView is in use
- if not deploymentInfo.usesFramework("QtWebView"):
- continue
- elif pluginDirectory == "gamepads":
- # Deploy the webview plugins only if QtGamepad is in use
- if not deploymentInfo.usesFramework("QtGamepad"):
- continue
- elif pluginDirectory == "geoservices":
- # Deploy the webview plugins only if QtLocation is in use
- if not deploymentInfo.usesFramework("QtLocation"):
- continue
- elif pluginDirectory == "texttospeech":
- # Deploy the texttospeech plugins only if QtTextToSpeech is in use
- if not deploymentInfo.usesFramework("QtTextToSpeech"):
- continue
- elif pluginDirectory == "virtualkeyboard":
- # Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is in use
- if not deploymentInfo.usesFramework("QtVirtualKeyboard"):
- continue
- elif pluginDirectory == "sceneparsers":
- # Deploy the virtualkeyboard plugins only if Qt3DCore is in use
- if not deploymentInfo.usesFramework("Qt3DCore"):
- continue
- elif pluginDirectory == "renderplugins":
- # Deploy the renderplugins plugins only if Qt3DCore is in use
- if not deploymentInfo.usesFramework("Qt3DCore"):
- continue
- elif pluginDirectory == "geometryloaders":
- # Deploy the geometryloaders plugins only if Qt3DCore is in use
- if not deploymentInfo.usesFramework("Qt3DCore"):
- continue
for pluginName in filenames:
pluginPath = os.path.join(pluginDirectory, pluginName)
- if pluginName.endswith("_debug.dylib"):
- # Skip debug plugins
+
+ if pluginName.split('.')[0] not in ['libqminimal', 'libqcocoa', 'libqmacstyle']:
continue
- elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib":
- # Deploy the svg plugins only if QtSvg is in use
- if not deploymentInfo.usesFramework("QtSvg"):
- continue
- elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib":
- # Deploy accessibility for Qt3Support only if the Qt3Support is in use
- if not deploymentInfo.usesFramework("Qt3Support"):
- continue
- elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib":
- # Deploy the opengl graphicssystem plugin only if QtOpenGL is in use
- if not deploymentInfo.usesFramework("QtOpenGL"):
- continue
- elif pluginPath == "accessible/libqtaccessiblequick.dylib":
- # Deploy the accessible qtquick plugin only if QtQuick is in use
- if not deploymentInfo.usesFramework("QtQuick"):
- continue
- elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib":
- # Deploy the virtualkeyboardplugin plugin only if QtVirtualKeyboard is in use
- if not deploymentInfo.usesFramework("QtVirtualKeyboard"):
- continue
plugins.append((pluginDirectory, pluginName))
@@ -527,6 +423,9 @@ if os.path.exists(appname + ".dmg"):
print("+ Removing existing DMG +")
os.unlink(appname + ".dmg")
+if os.path.exists(appname + ".temp.dmg"):
+ os.unlink(appname + ".temp.dmg")
+
# ------------------------------------------------
target = os.path.join("dist", "Bitcoin-Qt.app")
diff --git a/contrib/seeds/nodes_main.txt b/contrib/seeds/nodes_main.txt
index a62150a930..b9dfdb4b0a 100644
--- a/contrib/seeds/nodes_main.txt
+++ b/contrib/seeds/nodes_main.txt
@@ -677,12 +677,20 @@ tddeij4qigtjr6jfnrmq6btnirmq5msgwcsdpcdjr7atftm7cxlqztid.onion:8333
vi5bnbxkleeqi6hfccjochnn65lcxlfqs4uwgmhudph554zibiusqnad.onion:8333
xqt25cobm5zqucac3634zfght72he6u3eagfyej5ellbhcdgos7t2had.onion:8333
-# manually added 2021-05 for minimal i2p bootstrap support
-72l3ucjkuscrbiiepoehuwqgknyzgo7zuix5ty4puwrkyhtmnsga.b32.i2p:8333
-c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:8333
-gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:8333
-h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:8333
-hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:8333
-pjs7or2ctvteeo5tu4bwyrtydeuhqhvdprtujn4daxr75jpebjxa.b32.i2p:8333
-wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:8333
-zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:8333
+# manually added 2021-08 for minimal i2p bootstrap support
+4hllr6w55mbtemb3ebvlzl4zj6qke4si7zcob5qdyg63mjgq624a.b32.i2p:0
+6s33jtpvwzkiej3nff5qm72slgqljxhxn62hdt6m7nvynqsxqdda.b32.i2p:0
+a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0
+bitcornrd36coazsbzsz4pdebyzvaplmsalq4kpoljmn6cg6x5zq.b32.i2p:0
+c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0
+dhtq2p76tyhi442aidb3vd2bv7yxxjuddpb2jydnnrl2ons5bhha.b32.i2p:0
+gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:0
+h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0
+hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:0
+i3hcdakiz2tyvggkwefvdjoi7444kgvd2mbdfizjvv43q7zukezq.b32.i2p:0
+jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0
+kokkmpquqlkptu5hkmzqlttsmtwxicldr4so7wqsufk6bwf32nma.b32.i2p:0
+kvrde7mcgjhz3xzeltwy4gs2rxdfbnbs2wc67mh2pt43wjmjnmbq.b32.i2p:0
+shh2ewyegnuwnmdse5kl5toybdvzkvk2yj4zcowz6iwhhh3ykdfa.b32.i2p:0
+wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0
+zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0
diff --git a/contrib/signet/getcoins.py b/contrib/signet/getcoins.py
index 691f0bb1b6..dc203f1254 100755
--- a/contrib/signet/getcoins.py
+++ b/contrib/signet/getcoins.py
@@ -5,32 +5,64 @@
import argparse
import subprocess
-import requests
import sys
+import requests
+
+DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim'
+GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53'
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use')
-parser.add_argument('-f', '--faucet', dest='faucet', default='https://signetfaucet.com/claim', help='URL of the faucet')
+parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)')
args = parser.parse_args()
-if args.addr == '':
- if args.bitcoin_cli_args == []:
- args.bitcoin_cli_args = ['-signet']
- # get address for receiving coins
+if args.bitcoin_cli_args == []:
+ args.bitcoin_cli_args = ['-signet']
+
+
+def bitcoin_cli(rpc_command_and_params):
+ argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params
try:
- args.addr = subprocess.check_output([args.cmd] + args.bitcoin_cli_args + ['getnewaddress', 'faucet', 'bech32']).strip()
+ return subprocess.check_output(argv).strip().decode()
except FileNotFoundError:
print('The binary', args.cmd, 'could not be found.')
- exit()
+ exit(1)
+ except subprocess.CalledProcessError:
+ cmdline = ' '.join(argv)
+ print(f'-----\nError while calling "{cmdline}" (see output above).')
+ exit(1)
+
+
+if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET:
+ # Get the hash of the block at height 1 of the currently active signet chain
+ curr_signet_hash = bitcoin_cli(['getblockhash', '1'])
+ if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH:
+ print('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n')
+ exit(1)
+
+if args.addr == '':
+ # get address for receiving coins
+ args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32'])
data = {'address': args.addr, 'password': args.password}
try:
res = requests.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
- exit()
-print(res.text)
+ exit(1)
+
+# Display the output as per the returned status code
+if res:
+ # When the return code is in between 200 and 400 i.e. successful
+ print(res.text)
+elif res.status_code == 404:
+ print('The specified faucet URL does not exist. Please check for any server issues/typo.')
+elif res.status_code == 429:
+ print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually')
+else:
+ print(f'Returned Error Code {res.status_code}\n{res.text}\n')
+ print('Please check the provided arguments for their validity and/or any possible typo.')
diff --git a/contrib/signet/miner b/contrib/signet/miner
index 78e1fa5ecd..012bd6cc31 100755
--- a/contrib/signet/miner
+++ b/contrib/signet/miner
@@ -15,7 +15,6 @@ import sys
import time
import subprocess
-from binascii import unhexlify
from io import BytesIO
PATH_BASE_CONTRIB_SIGNET = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
@@ -202,7 +201,7 @@ def finish_block(block, signet_solution, grind_cmd):
def generate_psbt(tmpl, reward_spk, *, blocktime=None):
signet_spk = tmpl["signet_challenge"]
- signet_spk_bin = unhexlify(signet_spk)
+ signet_spk_bin = bytes.fromhex(signet_spk)
cbtx = create_coinbase(height=tmpl["height"], value=tmpl["coinbasevalue"], spk=reward_spk)
cbtx.vin[0].nSequence = 2**32-2
@@ -258,7 +257,7 @@ def get_reward_addr_spk(args, height):
return args.address, args.reward_spk
reward_addr = get_reward_address(args, height)
- reward_spk = unhexlify(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"])
+ reward_spk = bytes.fromhex(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"])
if args.address is not None:
# will always be the same, so cache
args.reward_spk = reward_spk
diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md
new file mode 100644
index 0000000000..047354cda1
--- /dev/null
+++ b/contrib/tracing/README.md
@@ -0,0 +1,241 @@
+Example scripts for User-space, Statically Defined Tracing (USDT)
+=================================================================
+
+This directory contains scripts showcasing User-space, Statically Defined
+Tracing (USDT) support for Bitcoin Core on Linux using. For more information on
+USDT support in Bitcoin Core see the [USDT documentation].
+
+[USDT documentation]: ../../doc/tracing.md
+
+
+Examples for the two main eBPF front-ends, [bpftrace] and
+[BPF Compiler Collection (BCC)], with support for USDT, are listed. BCC is used
+for complex tools and daemons and `bpftrace` is preferred for one-liners and
+shorter scripts.
+
+[bpftrace]: https://github.com/iovisor/bpftrace
+[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc
+
+
+To develop and run bpftrace and BCC scripts you need to install the
+corresponding packages. See [installing bpftrace] and [installing BCC] for more
+information. For development there exist a [bpftrace Reference Guide], a
+[BCC Reference Guide], and a [bcc Python Developer Tutorial].
+
+[installing bpftrace]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md
+[installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md
+[bpftrace Reference Guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md
+[BCC Reference Guide]: https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md
+[bcc Python Developer Tutorial]: https://github.com/iovisor/bcc/blob/master/docs/tutorial_bcc_python_developer.md
+
+## Examples
+
+The bpftrace examples contain a relative path to the `bitcoind` binary. By
+default, the scripts should be run from the repository-root and assume a
+self-compiled `bitcoind` binary. The paths in the examples can be changed, for
+example, to point to release builds if needed. See the
+[Bitcoin Core USDT documentation] on how to list available tracepoints in your
+`bitcoind` binary.
+
+[Bitcoin Core USDT documentation]: ../../doc/tracing.md#listing-available-tracepoints
+
+**WARNING: eBPF programs require root privileges to be loaded into a Linux
+kernel VM. This means the bpftrace and BCC examples must be executed with root
+privileges. Make sure to carefully review any scripts that you run with root
+privileges first!**
+
+### log_p2p_traffic.bt
+
+A bpftrace script logging information about inbound and outbound P2P network
+messages. Based on the `net:inbound_message` and `net:outbound_message`
+tracepoints.
+
+By default, `bpftrace` limits strings to 64 bytes due to the limited stack size
+in the eBPF VM. For example, Tor v3 addresses exceed the string size limit which
+results in the port being cut off during logging. The string size limit can be
+increased with the `BPFTRACE_STRLEN` environment variable (`BPFTRACE_STRLEN=70`
+works fine).
+
+```
+$ bpftrace contrib/tracing/log_p2p_traffic.bt
+```
+
+Output
+```
+outbound 'ping' msg to peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes
+inbound 'pong' msg from peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes
+inbound 'inv' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes
+outbound 'getdata' msg to peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes
+inbound 'tx' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 222 bytes
+outbound 'inv' msg to peer 9 (outbound-full-relay, faketorv3addressa2ufa6odvoi3s77j4uegey0xb10csyfyve2t33curbyd.onion:8333) with 37 bytes
+outbound 'inv' msg to peer 7 (outbound-full-relay, XX.XX.XXX.242:8333) with 37 bytes
+…
+```
+
+### p2p_monitor.py
+
+A BCC Python script using curses for an interactive P2P message monitor. Based
+on the `net:inbound_message` and `net:outbound_message` tracepoints.
+
+Inbound and outbound traffic is listed for each peer together with information
+about the connection. Peers can be selected individually to view recent P2P
+messages.
+
+```
+$ python3 contrib/tracing/p2p_monitor.py ./src/bitcoind
+```
+
+Lists selectable peers and traffic and connection information.
+```
+ P2P Message Monitor
+ Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages
+
+ PEER OUTBOUND INBOUND TYPE ADDR
+ 0 46 398 byte 61 1407590 byte block-relay-only XX.XX.XXX.196:8333
+ 11 1156 253570 byte 3431 2394924 byte outbound-full-relay XXX.X.XX.179:8333
+ 13 3425 1809620 byte 1236 305458 byte inbound XXX.X.X.X:60380
+ 16 1046 241633 byte 1589 1199220 byte outbound-full-relay 4faketorv2pbfu7x.onion:8333
+ 19 577 181679 byte 390 148951 byte outbound-full-relay kfake4vctorjv2o2.onion:8333
+ 20 11 1248 byte 13 1283 byte block-relay-only [2600:fake:64d9:b10c:4436:aaaa:fe:bb]:8333
+ 21 11 1248 byte 13 1299 byte block-relay-only XX.XXX.X.155:8333
+ 22 5 103 byte 1 102 byte feeler XX.XX.XXX.173:8333
+ 23 11 1248 byte 12 1255 byte block-relay-only XX.XXX.XXX.220:8333
+ 24 3 103 byte 1 102 byte feeler XXX.XXX.XXX.64:8333
+…
+```
+
+Showing recent P2P messages between our node and a selected peer.
+
+```
+ ----------------------------------------------------------------------
+ | PEER 16 (4faketorv2pbfu7x.onion:8333) |
+ | OUR NODE outbound-full-relay PEER |
+ | <--- sendcmpct (9 bytes) |
+ | inv (37 byte) ---> |
+ | <--- ping (8 bytes) |
+ | pong (8 byte) ---> |
+ | inv (37 byte) ---> |
+ | <--- addr (31 bytes) |
+ | inv (37 byte) ---> |
+ | <--- getheaders (1029 bytes) |
+ | headers (1 byte) ---> |
+ | <--- feefilter (8 bytes) |
+ | <--- pong (8 bytes) |
+ | <--- headers (82 bytes) |
+ | <--- addr (30003 bytes) |
+ | inv (1261 byte) ---> |
+ | … |
+
+```
+
+### log_raw_p2p_msgs.py
+
+A BCC Python script showcasing eBPF and USDT limitations when passing data
+larger than about 32kb. Based on the `net:inbound_message` and
+`net:outbound_message` tracepoints.
+
+Bitcoin P2P messages can be larger than 32kb (e.g. `tx`, `block`, ...). The
+eBPF VM's stack is limited to 512 bytes, and we can't allocate more than about
+32kb for a P2P message in the eBPF VM. The **message data is cut off** when the
+message is larger than MAX_MSG_DATA_LENGTH (see script). This can be detected
+in user-space by comparing the data length to the message length variable. The
+message is cut off when the data length is smaller than the message length.
+A warning is included with the printed message data.
+
+Data is submitted to user-space (i.e. to this script) via a ring buffer. The
+throughput of the ring buffer is limited. Each p2p_message is about 32kb in
+size. In- or outbound messages submitted to the ring buffer in rapid
+succession fill the ring buffer faster than it can be read. Some messages are
+lost. BCC prints: `Possibly lost 2 samples` on lost messages.
+
+
+```
+$ python3 contrib/tracing/log_raw_p2p_msgs.py ./src/bitcoind
+```
+
+```
+Logging raw P2P messages.
+Messages larger that about 32kb will be cut off!
+Some messages might be lost!
+ outbound msg 'inv' from peer 4 (outbound-full-relay, XX.XXX.XX.4:8333) with 253 bytes: 0705000000be2245c8f844c9f763748e1a7…
+…
+Warning: incomplete message (only 32568 out of 53552 bytes)! inbound msg 'tx' from peer 32 (outbound-full-relay, XX.XXX.XXX.43:8333) with 53552 bytes: 020000000001fd3c01939c85ad6756ed9fc…
+…
+Possibly lost 2 samples
+```
+
+### connectblock_benchmark.bt
+
+A `bpftrace` script to benchmark the `ConnectBlock()` function during, for
+example, a blockchain re-index. Based on the `validation:block_connected` USDT
+tracepoint.
+
+The script takes three positional arguments. The first two arguments, the start,
+and end height indicate between which blocks the benchmark should be run. The
+third acts as a duration threshold in milliseconds. When the `ConnectBlock()`
+function takes longer than the threshold, information about the block, is
+printed. For more details, see the header comment in the script.
+
+By default, `bpftrace` limits strings to 64 bytes due to the limited stack size
+in the kernel VM. Block hashes as zero-terminated hex strings are 65 bytes which
+exceed the string limit. The string size limit can be set to 65 bytes with the
+environment variable `BPFTRACE_STRLEN`.
+
+The following command can be used to benchmark, for example, `ConnectBlock()`
+between height 20000 and 38000 on SigNet while logging all blocks that take
+longer than 25ms to connect.
+
+```
+$ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25
+```
+
+In a different terminal, starting Bitcoin Core in SigNet mode and with
+re-indexing enabled.
+
+```
+$ ./src/bitcoind -signet -reindex
+```
+
+This produces the following output.
+```
+Attaching 5 probes...
+ConnectBlock Benchmark between height 20000 and 38000 inclusive
+Logging blocks taking longer than 25 ms to connect.
+Starting Connect Block Benchmark between height 20000 and 38000.
+BENCH 39 blk/s 59 tx/s 59 inputs/s 20 sigops/s (height 20038)
+Block 20492 (000000f555653bb05e2f3c6e79925e01a20dd57033f4dc7c354b46e34735d32b) 20 tx 2319 ins 2318 sigops took 38 ms
+BENCH 1840 blk/s 2117 tx/s 4478 inputs/s 2471 sigops/s (height 21879)
+BENCH 1816 blk/s 4972 tx/s 4982 inputs/s 125 sigops/s (height 23695)
+BENCH 2095 blk/s 2890 tx/s 2910 inputs/s 152 sigops/s (height 25790)
+BENCH 1684 blk/s 3979 tx/s 4053 inputs/s 288 sigops/s (height 27474)
+BENCH 1155 blk/s 3216 tx/s 3252 inputs/s 115 sigops/s (height 28629)
+BENCH 1797 blk/s 2488 tx/s 2503 inputs/s 111 sigops/s (height 30426)
+BENCH 1849 blk/s 6318 tx/s 6569 inputs/s 12189 sigops/s (height 32275)
+BENCH 946 blk/s 20209 tx/s 20775 inputs/s 83809 sigops/s (height 33221)
+Block 33406 (0000002adfe4a15cfcd53bd890a89bbae836e5bb7f38bac566f61ad4548c87f6) 25 tx 2045 ins 2090 sigops took 29 ms
+Block 33687 (00000073231307a9828e5607ceb8156b402efe56747271a4442e75eb5b77cd36) 52 tx 1797 ins 1826 sigops took 26 ms
+BENCH 582 blk/s 21581 tx/s 27673 inputs/s 60345 sigops/s (height 33803)
+BENCH 1035 blk/s 19735 tx/s 19776 inputs/s 51355 sigops/s (height 34838)
+Block 35625 (0000006b00b347390c4768ea9df2655e9ff4b120f29d78594a2a702f8a02c997) 20 tx 3374 ins 3371 sigops took 49 ms
+BENCH 887 blk/s 17857 tx/s 22191 inputs/s 24404 sigops/s (height 35725)
+Block 35937 (000000d816d13d6e39b471cd4368db60463a764ba1f29168606b04a22b81ea57) 75 tx 3943 ins 3940 sigops took 61 ms
+BENCH 823 blk/s 16298 tx/s 21031 inputs/s 18440 sigops/s (height 36548)
+Block 36583 (000000c3e260556dbf42968aae3f904dba8b8c1ff96a6f6e3aa5365d2e3ad317) 24 tx 2198 ins 2194 sigops took 34 ms
+Block 36700 (000000b3b173de9e65a3cfa738d976af6347aaf83fa17ab3f2a4d2ede3ddfac4) 73 tx 1615 ins 1611 sigops took 31 ms
+Block 36832 (0000007859578c02c1ac37dabd1b9ec19b98f350b56935f5dd3a41e9f79f836e) 34 tx 1440 ins 1436 sigops took 26 ms
+BENCH 613 blk/s 16718 tx/s 25074 inputs/s 23022 sigops/s (height 37161)
+Block 37870 (000000f5c1086291ba2d943fb0c3bc82e71c5ee341ee117681d1456fbf6c6c38) 25 tx 1517 ins 1514 sigops took 29 ms
+BENCH 811 blk/s 16031 tx/s 20921 inputs/s 18696 sigops/s (height 37972)
+
+Took 14055 ms to connect the blocks between height 20000 and 38000.
+
+Histogram of block connection times in milliseconds (ms).
+@durations:
+[0] 16838 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
+[1] 882 |@@ |
+[2, 4) 236 | |
+[4, 8) 23 | |
+[8, 16) 9 | |
+[16, 32) 9 | |
+[32, 64) 4 | |
+```
diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt
new file mode 100755
index 0000000000..d268eff7f8
--- /dev/null
+++ b/contrib/tracing/connectblock_benchmark.bt
@@ -0,0 +1,150 @@
+#!/usr/bin/env bpftrace
+
+/*
+
+ USAGE:
+
+ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt <start height> <end height> <logging threshold in ms>
+
+ - The environment variable BPFTRACE_STRLEN needs to be set to 65 chars as
+ strings are limited to 64 chars by default. Hex strings with Bitcoin block
+ hashes are 64 hex chars + 1 null-termination char.
+ - <start height> sets the height at which the benchmark should start. Setting
+ the start height to 0 starts the benchmark immediately, even before the
+ first block is connected.
+ - <end height> sets the height after which the benchmark should end. Setting
+ the end height to 0 disables the benchmark. The script only logs blocks
+ over <logging threshold in ms>.
+ - Threshold <logging threshold in ms>
+
+ This script requires a 'bitcoind' binary compiled with eBPF support and the
+ 'validation:block_connected' USDT. By default, it's assumed that 'bitcoind' is
+ located in './src/bitcoind'. This can be modified in the script below.
+
+ EXAMPLES:
+
+ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000
+
+ When run together 'bitcoind -reindex', this benchmarks the time it takes to
+ connect the blocks between height 300.000 and 680.000 (inclusive) and prints
+ details about all blocks that take longer than 1000ms to connect. Prints a
+ histogram with block connection times when the benchmark is finished.
+
+
+ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500
+
+ When running together 'bitcoind', all newly connected blocks that
+ take longer than 500ms to connect are logged. A histogram with block
+ connection times is shown when the script is terminated.
+
+*/
+
+BEGIN
+{
+ $start_height = $1;
+ $end_height = $2;
+ $logging_threshold_ms = $3;
+
+ if ($end_height < $start_height) {
+ printf("Error: start height (%d) larger than end height (%d)!\n", $start_height, $end_height);
+ exit();
+ }
+
+ if ($end_height > 0) {
+ printf("ConnectBlock benchmark between height %d and %d inclusive\n", $start_height, $end_height);
+ } else {
+ printf("ConnectBlock logging starting at height %d\n", $start_height);
+ }
+
+ if ($logging_threshold_ms > 0) {
+ printf("Logging blocks taking longer than %d ms to connect.\n", $3);
+ }
+
+ if ($start_height == 0) {
+ @start = nsecs;
+ }
+}
+
+/*
+ Attaches to the 'validation:block_connected' USDT and collects stats when the
+ connected block is between the start and end height (or the end height is
+ unset).
+*/
+usdt:./src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2 == 0 )/
+{
+ $height = arg1;
+ $transactions = arg2;
+ $inputs = arg3;
+ $sigops = arg4;
+ $duration = (uint64) arg5;
+
+ @height = $height;
+
+ @blocks = @blocks + 1;
+ @transactions = @transactions + $transactions;
+ @inputs = @inputs + $inputs;
+ @sigops = @sigops + $sigops;
+
+ @durations = hist($duration / 1000);
+
+ if ($height == $1 && $height != 0) {
+ @start = nsecs;
+ printf("Starting Connect Block Benchmark between height %d and %d.\n", $1, $2);
+ }
+
+ if ($2 > 0 && $height >= $2) {
+ @end = nsecs;
+ $duration = @end - @start;
+ printf("\nTook %d ms to connect the blocks between height %d and %d.\n", $duration / 1000000, $1, $2);
+ exit();
+ }
+}
+
+/*
+ Attaches to the 'validation:block_connected' USDT and logs information about
+ blocks where the time it took to connect the block is above the
+ <logging threshold in ms>.
+*/
+usdt:./src/bitcoind:validation:block_connected / (uint64) arg5 / 1000> $3 /
+{
+ $hash_str = str(arg0);
+ $height = (int32) arg1;
+ $transactions = (uint64) arg2;
+ $inputs = (int32) arg3;
+ $sigops = (int64) arg4;
+ $duration = (int64) arg5;
+
+ printf("Block %d (%s) %4d tx %5d ins %5d sigops took %4d ms\n", $height, $hash_str, $transactions, $inputs, $sigops, (uint64) $duration / 1000);
+}
+
+
+/*
+ Prints stats about the blocks, transactions, inputs, and sigops processed in
+ the last second (if any).
+*/
+interval:s:1 {
+ if (@blocks > 0) {
+ printf("BENCH %4d blk/s %6d tx/s %7d inputs/s %8d sigops/s (height %d)\n", @blocks, @transactions, @inputs, @sigops, @height);
+
+ zero(@blocks);
+ zero(@transactions);
+ zero(@inputs);
+ zero(@sigops);
+ }
+}
+
+END
+{
+ printf("\nHistogram of block connection times in milliseconds (ms).\n");
+ print(@durations);
+
+ clear(@durations);
+ clear(@blocks);
+ clear(@transactions);
+ clear(@inputs);
+ clear(@sigops);
+ clear(@height);
+ clear(@start);
+ clear(@end);
+}
+
diff --git a/contrib/tracing/log_p2p_traffic.bt b/contrib/tracing/log_p2p_traffic.bt
new file mode 100755
index 0000000000..f62956aa5e
--- /dev/null
+++ b/contrib/tracing/log_p2p_traffic.bt
@@ -0,0 +1,28 @@
+#!/usr/bin/env bpftrace
+
+BEGIN
+{
+ printf("Logging P2P traffic\n")
+}
+
+usdt:./src/bitcoind:net:inbound_message
+{
+ $peer_id = (int64) arg0;
+ $peer_addr = str(arg1);
+ $peer_type = str(arg2);
+ $msg_type = str(arg3);
+ $msg_len = arg4;
+ printf("inbound '%s' msg from peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len);
+}
+
+usdt:./src/bitcoind:net:outbound_message
+{
+ $peer_id = (int64) arg0;
+ $peer_addr = str(arg1);
+ $peer_type = str(arg2);
+ $msg_type = str(arg3);
+ $msg_len = arg4;
+
+ printf("outbound '%s' msg to peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len);
+}
+
diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py
new file mode 100755
index 0000000000..b5b5755632
--- /dev/null
+++ b/contrib/tracing/log_raw_p2p_msgs.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+
+""" Demonstration of eBPF limitations and the effect on USDT with the
+ net:inbound_message and net:outbound_message tracepoints. """
+
+# This script shows a limitation of eBPF when data larger than 32kb is passed to
+# user-space. It uses BCC (https://github.com/iovisor/bcc) to load a sandboxed
+# eBPF program into the Linux kernel (root privileges are required). The eBPF
+# program attaches to two statically defined tracepoints. The tracepoint
+# 'net:inbound_message' is called when a new P2P message is received, and
+# 'net:outbound_message' is called on outbound P2P messages. The eBPF program
+# submits the P2P messages to this script via a BPF ring buffer. The submitted
+# messages are printed.
+
+# eBPF Limitations:
+#
+# Bitcoin P2P messages can be larger than 32kb (e.g. tx, block, ...). The eBPF
+# VM's stack is limited to 512 bytes, and we can't allocate more than about 32kb
+# for a P2P message in the eBPF VM. The message data is cut off when the message
+# is larger than MAX_MSG_DATA_LENGTH (see definition below). This can be detected
+# in user-space by comparing the data length to the message length variable. The
+# message is cut off when the data length is smaller than the message length.
+# A warning is included with the printed message data.
+#
+# Data is submitted to user-space (i.e. to this script) via a ring buffer. The
+# throughput of the ring buffer is limited. Each p2p_message is about 32kb in
+# size. In- or outbound messages submitted to the ring buffer in rapid
+# succession fill the ring buffer faster than it can be read. Some messages are
+# lost.
+#
+# BCC prints: "Possibly lost 2 samples" on lost messages.
+
+import sys
+from bcc import BPF, USDT
+
+# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into
+# a sandboxed Linux kernel VM.
+program = """
+#include <uapi/linux/ptrace.h>
+
+#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; })
+
+// Maximum possible allocation size
+// from include/linux/percpu.h in the Linux kernel
+#define PCPU_MIN_UNIT_SIZE (32 << 10)
+
+// Tor v3 addresses are 62 chars + 6 chars for the port (':12345').
+#define MAX_PEER_ADDR_LENGTH 62 + 6
+#define MAX_PEER_CONN_TYPE_LENGTH 20
+#define MAX_MSG_TYPE_LENGTH 20
+#define MAX_MSG_DATA_LENGTH PCPU_MIN_UNIT_SIZE - 200
+
+struct p2p_message
+{
+ u64 peer_id;
+ char peer_addr[MAX_PEER_ADDR_LENGTH];
+ char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH];
+ char msg_type[MAX_MSG_TYPE_LENGTH];
+ u64 msg_size;
+ u8 msg[MAX_MSG_DATA_LENGTH];
+};
+
+// We can't store the p2p_message struct on the eBPF stack as it is limited to
+// 512 bytes and P2P message can be bigger than 512 bytes. However, we can use
+// an BPF-array with a length of 1 to allocate up to 32768 bytes (this is
+// defined by PCPU_MIN_UNIT_SIZE in include/linux/percpu.h in the Linux kernel).
+// Also see https://github.com/iovisor/bcc/issues/2306
+BPF_ARRAY(msg_arr, struct p2p_message, 1);
+
+// Two BPF perf buffers for pushing data (here P2P messages) to user-space.
+BPF_PERF_OUTPUT(inbound_messages);
+BPF_PERF_OUTPUT(outbound_messages);
+
+int trace_inbound_message(struct pt_regs *ctx) {
+ int idx = 0;
+ struct p2p_message *msg = msg_arr.lookup(&idx);
+
+ // lookup() does not return a NULL pointer. However, the BPF verifier
+ // requires an explicit check that that the `msg` pointer isn't a NULL
+ // pointer. See https://github.com/iovisor/bcc/issues/2595
+ if (msg == NULL) return 1;
+
+ bpf_usdt_readarg(1, ctx, &msg->peer_id);
+ bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH);
+ bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
+ bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH);
+ bpf_usdt_readarg(5, ctx, &msg->msg_size);
+ bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH));
+
+ inbound_messages.perf_submit(ctx, msg, sizeof(*msg));
+ return 0;
+};
+
+int trace_outbound_message(struct pt_regs *ctx) {
+ int idx = 0;
+ struct p2p_message *msg = msg_arr.lookup(&idx);
+
+ // lookup() does not return a NULL pointer. However, the BPF verifier
+ // requires an explicit check that that the `msg` pointer isn't a NULL
+ // pointer. See https://github.com/iovisor/bcc/issues/2595
+ if (msg == NULL) return 1;
+
+ bpf_usdt_readarg(1, ctx, &msg->peer_id);
+ bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH);
+ bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
+ bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH);
+ bpf_usdt_readarg(5, ctx, &msg->msg_size);
+ bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH));
+
+ outbound_messages.perf_submit(ctx, msg, sizeof(*msg));
+ return 0;
+};
+"""
+
+
+def print_message(event, inbound):
+ print(f"%s %s msg '%s' from peer %d (%s, %s) with %d bytes: %s" %
+ (
+ f"Warning: incomplete message (only %d out of %d bytes)!" % (
+ len(event.msg), event.msg_size) if len(event.msg) < event.msg_size else "",
+ "inbound" if inbound else "outbound",
+ event.msg_type.decode("utf-8"),
+ event.peer_id,
+ event.peer_conn_type.decode("utf-8"),
+ event.peer_addr.decode("utf-8"),
+ event.msg_size,
+ bytes(event.msg[:event.msg_size]).hex(),
+ )
+ )
+
+
+def main(bitcoind_path):
+ bitcoind_with_usdts = USDT(path=str(bitcoind_path))
+
+ # attaching the trace functions defined in the BPF program to the tracepoints
+ bitcoind_with_usdts.enable_probe(
+ probe="inbound_message", fn_name="trace_inbound_message")
+ bitcoind_with_usdts.enable_probe(
+ probe="outbound_message", fn_name="trace_outbound_message")
+ bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts])
+
+ # BCC: perf buffer handle function for inbound_messages
+ def handle_inbound(_, data, size):
+ """ Inbound message handler.
+
+ Called each time a message is submitted to the inbound_messages BPF table."""
+
+ event = bpf["inbound_messages"].event(data)
+ print_message(event, True)
+
+ # BCC: perf buffer handle function for outbound_messages
+
+ def handle_outbound(_, data, size):
+ """ Outbound message handler.
+
+ Called each time a message is submitted to the outbound_messages BPF table."""
+
+ event = bpf["outbound_messages"].event(data)
+ print_message(event, False)
+
+ # BCC: add handlers to the inbound and outbound perf buffers
+ bpf["inbound_messages"].open_perf_buffer(handle_inbound)
+ bpf["outbound_messages"].open_perf_buffer(handle_outbound)
+
+ print("Logging raw P2P messages.")
+ print("Messages larger that about 32kb will be cut off!")
+ print("Some messages might be lost!")
+ while True:
+ try:
+ bpf.perf_buffer_poll()
+ except KeyboardInterrupt:
+ exit()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ print("USAGE:", sys.argv[0], "path/to/bitcoind")
+ exit()
+ path = sys.argv[1]
+ main(path)
diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py
new file mode 100755
index 0000000000..14e3e3a801
--- /dev/null
+++ b/contrib/tracing/p2p_monitor.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python3
+
+""" Interactive bitcoind P2P network traffic monitor utilizing USDT and the
+ net:inbound_message and net:outbound_message tracepoints. """
+
+# This script demonstrates what USDT for Bitcoin Core can enable. It uses BCC
+# (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the
+# Linux kernel (root privileges are required). The eBPF program attaches to two
+# statically defined tracepoints. The tracepoint 'net:inbound_message' is called
+# when a new P2P message is received, and 'net:outbound_message' is called on
+# outbound P2P messages. The eBPF program submits the P2P messages to
+# this script via a BPF ring buffer.
+
+import sys
+import curses
+from curses import wrapper, panel
+from bcc import BPF, USDT
+
+# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into
+# a sandboxed Linux kernel VM.
+program = """
+#include <uapi/linux/ptrace.h>
+
+// Tor v3 addresses are 62 chars + 6 chars for the port (':12345').
+// I2P addresses are 60 chars + 6 chars for the port (':12345').
+#define MAX_PEER_ADDR_LENGTH 62 + 6
+#define MAX_PEER_CONN_TYPE_LENGTH 20
+#define MAX_MSG_TYPE_LENGTH 20
+
+struct p2p_message
+{
+ u64 peer_id;
+ char peer_addr[MAX_PEER_ADDR_LENGTH];
+ char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH];
+ char msg_type[MAX_MSG_TYPE_LENGTH];
+ u64 msg_size;
+};
+
+
+// Two BPF perf buffers for pushing data (here P2P messages) to user space.
+BPF_PERF_OUTPUT(inbound_messages);
+BPF_PERF_OUTPUT(outbound_messages);
+
+int trace_inbound_message(struct pt_regs *ctx) {
+ struct p2p_message msg = {};
+
+ bpf_usdt_readarg(1, ctx, &msg.peer_id);
+ bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
+ bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
+ bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH);
+ bpf_usdt_readarg(5, ctx, &msg.msg_size);
+
+ inbound_messages.perf_submit(ctx, &msg, sizeof(msg));
+ return 0;
+};
+
+int trace_outbound_message(struct pt_regs *ctx) {
+ struct p2p_message msg = {};
+
+ bpf_usdt_readarg(1, ctx, &msg.peer_id);
+ bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH);
+ bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH);
+ bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH);
+ bpf_usdt_readarg(5, ctx, &msg.msg_size);
+
+ outbound_messages.perf_submit(ctx, &msg, sizeof(msg));
+ return 0;
+};
+"""
+
+
+class Message:
+ """ A P2P network message. """
+ msg_type = ""
+ size = 0
+ data = bytes()
+ inbound = False
+
+ def __init__(self, msg_type, size, inbound):
+ self.msg_type = msg_type
+ self.size = size
+ self.inbound = inbound
+
+
+class Peer:
+ """ A P2P network peer. """
+ id = 0
+ address = ""
+ connection_type = ""
+ last_messages = list()
+
+ total_inbound_msgs = 0
+ total_inbound_bytes = 0
+ total_outbound_msgs = 0
+ total_outbound_bytes = 0
+
+ def __init__(self, id, address, connection_type):
+ self.id = id
+ self.address = address
+ self.connection_type = connection_type
+ self.last_messages = list()
+
+ def add_message(self, message):
+ self.last_messages.append(message)
+ if len(self.last_messages) > 25:
+ self.last_messages.pop(0)
+ if message.inbound:
+ self.total_inbound_bytes += message.size
+ self.total_inbound_msgs += 1
+ else:
+ self.total_outbound_bytes += message.size
+ self.total_outbound_msgs += 1
+
+
+def main(bitcoind_path):
+ peers = dict()
+
+ bitcoind_with_usdts = USDT(path=str(bitcoind_path))
+
+ # attaching the trace functions defined in the BPF program to the tracepoints
+ bitcoind_with_usdts.enable_probe(
+ probe="inbound_message", fn_name="trace_inbound_message")
+ bitcoind_with_usdts.enable_probe(
+ probe="outbound_message", fn_name="trace_outbound_message")
+ bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts])
+
+ # BCC: perf buffer handle function for inbound_messages
+ def handle_inbound(_, data, size):
+ """ Inbound message handler.
+
+ Called each time a message is submitted to the inbound_messages BPF table."""
+ event = bpf["inbound_messages"].event(data)
+ if event.peer_id not in peers:
+ peer = Peer(event.peer_id, event.peer_addr.decode(
+ "utf-8"), event.peer_conn_type.decode("utf-8"))
+ peers[peer.id] = peer
+ peers[event.peer_id].add_message(
+ Message(event.msg_type.decode("utf-8"), event.msg_size, True))
+
+ # BCC: perf buffer handle function for outbound_messages
+ def handle_outbound(_, data, size):
+ """ Outbound message handler.
+
+ Called each time a message is submitted to the outbound_messages BPF table."""
+ event = bpf["outbound_messages"].event(data)
+ if event.peer_id not in peers:
+ peer = Peer(event.peer_id, event.peer_addr.decode(
+ "utf-8"), event.peer_conn_type.decode("utf-8"))
+ peers[peer.id] = peer
+ peers[event.peer_id].add_message(
+ Message(event.msg_type.decode("utf-8"), event.msg_size, False))
+
+ # BCC: add handlers to the inbound and outbound perf buffers
+ bpf["inbound_messages"].open_perf_buffer(handle_inbound)
+ bpf["outbound_messages"].open_perf_buffer(handle_outbound)
+
+ wrapper(loop, bpf, peers)
+
+
+def loop(screen, bpf, peers):
+ screen.nodelay(1)
+ cur_list_pos = 0
+ win = curses.newwin(30, 70, 2, 7)
+ win.erase()
+ win.border(ord("|"), ord("|"), ord("-"), ord("-"),
+ ord("-"), ord("-"), ord("-"), ord("-"))
+ info_panel = panel.new_panel(win)
+ info_panel.hide()
+
+ ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5
+ scroll = 0
+
+ while True:
+ try:
+ # BCC: poll the perf buffers for new events or timeout after 50ms
+ bpf.perf_buffer_poll(timeout=50)
+
+ ch = screen.getch()
+ if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len(
+ peers.keys()) -1 and info_panel.hidden():
+ cur_list_pos += 1
+ if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST:
+ scroll += 1
+ if (ch == curses.KEY_UP or ch == ord("k")) and cur_list_pos > 0 and info_panel.hidden():
+ cur_list_pos -= 1
+ if scroll > 0:
+ scroll -= 1
+ if ch == ord('\n') or ch == ord(' '):
+ if info_panel.hidden():
+ info_panel.show()
+ else:
+ info_panel.hide()
+ screen.erase()
+ render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel)
+ curses.panel.update_panels()
+ screen.refresh()
+ except KeyboardInterrupt:
+ exit()
+
+
+def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel):
+ """ renders the list of peers and details panel
+
+ This code is unrelated to USDT, BCC and BPF.
+ """
+ header_format = "%6s %-20s %-20s %-22s %-67s"
+ row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s"
+
+ screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE)
+ screen.addstr(
+ 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL)
+ screen.addstr(3, 0,
+ header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE)
+ peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST+scroll]
+ for i, peer_id in enumerate(peer_list):
+ peer = peers[peer_id]
+ screen.addstr(i + 4, 0,
+ row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes,
+ peer.total_inbound_msgs, peer.total_inbound_bytes,
+ peer.connection_type, peer.address),
+ curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL)
+ if i + scroll == cur_list_pos:
+ info_window = info_panel.window()
+ info_window.erase()
+ info_window.border(
+ ord("|"), ord("|"), ord("-"), ord("-"),
+ ord("-"), ord("-"), ord("-"), ord("-"))
+
+ info_window.addstr(
+ 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD)
+ info_window.addstr(
+ 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ",
+ curses.A_BOLD)
+ for i, msg in enumerate(peer.last_messages):
+ if msg.inbound:
+ info_window.addstr(
+ i + 3, 1, "%68s" %
+ (f"<--- {msg.msg_type} ({msg.size} bytes) "), curses.A_NORMAL)
+ else:
+ info_window.addstr(
+ i + 3, 1, " %s (%d byte) --->" %
+ (msg.msg_type, msg.size), curses.A_NORMAL)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ print("USAGE:", sys.argv[0], "path/to/bitcoind")
+ exit()
+ path = sys.argv[1]
+ main(path)
diff --git a/contrib/verify-commits/README.md b/contrib/verify-commits/README.md
index e95a57586f..b8b15280ba 100644
--- a/contrib/verify-commits/README.md
+++ b/contrib/verify-commits/README.md
@@ -40,7 +40,7 @@ Import trusted keys
In order to check the commit signatures, you must add the trusted PGP keys to your machine. [GnuPG](https://gnupg.org/) may be used to import the trusted keys by running the following command:
```sh
-gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys $(<contrib/verify-commits/trusted-keys)
+gpg --keyserver hkps://keys.openpgp.org --recv-keys $(<contrib/verify-commits/trusted-keys)
```
Key expiry/revocation
diff --git a/contrib/verifybinaries/verify.py b/contrib/verifybinaries/verify.py
index 6cbaf2dc0c..51c151add8 100755
--- a/contrib/verifybinaries/verify.py
+++ b/contrib/verifybinaries/verify.py
@@ -2,7 +2,7 @@
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Script for verifying Bitoin Core release binaries
+"""Script for verifying Bitcoin Core release binaries
This script attempts to download the signature file SHA256SUMS.asc from
bitcoincore.org and bitcoin.org and compares them.
diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py
index 9cb887e2dc..6269269d37 100755
--- a/contrib/zmq/zmq_sub.py
+++ b/contrib/zmq/zmq_sub.py
@@ -23,7 +23,6 @@
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
-import binascii
import asyncio
import zmq
import zmq.asyncio
@@ -58,18 +57,18 @@ class ZMQHandler():
sequence = str(struct.unpack('<I', seq)[-1])
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
- print(binascii.hexlify(body))
+ print(body.hex())
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
- print(binascii.hexlify(body))
+ print(body.hex())
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
- print(binascii.hexlify(body[:80]))
+ print(body[:80].hex())
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
- print(binascii.hexlify(body))
+ print(body.hex())
elif topic == b"sequence":
- hash = binascii.hexlify(body[:32])
+ hash = body[:32].hex()
label = chr(body[32])
mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]
print('- SEQUENCE ('+sequence+') -')
diff --git a/depends/Makefile b/depends/Makefile
index ac12e91e49..a3b9cd2099 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -1,7 +1,7 @@
.NOTPARALLEL :
# Pattern rule to print variables, e.g. make print-top_srcdir
-print-%:
+print-%: FORCE
@echo '$*'='$($*)'
# When invoking a sub-make, keep only the command line variable definitions
@@ -284,3 +284,4 @@ download: download-osx download-linux download-win
$(foreach package,$(all_packages),$(eval $(call ext_add_stages,$(package))))
.PHONY: install cached clean clean-all download-one download-osx download-linux download-win download check-packages check-sources
+.PHONY: FORCE
diff --git a/depends/README.md b/depends/README.md
index 50e1a32c70..4f3b6df487 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -87,6 +87,14 @@ For linux S390X cross compilation:
sudo apt-get install g++-s390x-linux-gnu binutils-s390x-linux-gnu
+### Install the required dependencies: M1-based macOS
+
+To be able to build the `qt` package, ensure that Rosetta 2 is installed:
+
+```
+softwareupdate --install-rosetta
+```
+
### Dependency Options
The following can be set when running make: `make FOO=bar`
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index d45ac3d03f..8a3116bb3b 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -12,7 +12,7 @@ $(package)_config_opts_mingw32=--enable-mingw
$(package)_config_opts_linux=--with-pic
$(package)_config_opts_android=--with-pic
$(package)_cflags+=-Wno-error=implicit-function-declaration
-$(package)_cxxflags=-std=c++17
+$(package)_cxxflags+=-std=c++17
$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
endef
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index f879d176f5..21df50b040 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -23,7 +23,7 @@ else
$(package)_toolset_$(host_os)=gcc
endif
$(package)_config_libraries=filesystem,system,test
-$(package)_cxxflags=-std=c++17 -fvisibility=hidden
+$(package)_cxxflags+=-std=c++17 -fvisibility=hidden
$(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
$(package)_cxxflags_x86_64_darwin=-fcf-protection=full
diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk
index dad317193c..0af5412d94 100644
--- a/depends/packages/libevent.mk
+++ b/depends/packages/libevent.mk
@@ -16,6 +16,10 @@ define $(package)_set_vars
$(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601
endef
+define $(package)_preprocess_cmds
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub build-aux
+endef
+
define $(package)_config_cmds
$($(package)_autoconf)
endef
diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk
index 885207fce9..d169eb6723 100644
--- a/depends/packages/native_cctools.mk
+++ b/depends/packages/native_cctools.mk
@@ -16,6 +16,10 @@ define $(package)_set_vars
$(package)_cxx=$(clangxx_prog)
endef
+define $(package)_preprocess_cmds
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools
+endef
+
define $(package)_config_cmds
$($(package)_autoconf)
endef
diff --git a/depends/packages/native_clang.mk b/depends/packages/native_clang.mk
index 36adeb196d..25ac77c1a3 100644
--- a/depends/packages/native_clang.mk
+++ b/depends/packages/native_clang.mk
@@ -1,9 +1,15 @@
package=native_clang
$(package)_version=10.0.1
$(package)_download_path=https://github.com/llvm/llvm-project/releases/download/llvmorg-$($(package)_version)
+ifneq (,$(findstring aarch64,$(BUILD)))
+$(package)_download_file=clang+llvm-$($(package)_version)-aarch64-linux-gnu.tar.xz
+$(package)_file_name=clang+llvm-$($(package)_version)-aarch64-linux-gnu.tar.xz
+$(package)_sha256_hash=90dc69a4758ca15cd0ffa45d07fbf5bf4309d47d2c7745a9f0735ecffde9c31f
+else
$(package)_download_file=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz
$(package)_file_name=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz
$(package)_sha256_hash=48b83ef827ac2c213d5b64f5ad7ed082c8bcb712b46644e0dc5045c6f462c231
+endif
define $(package)_preprocess_cmds
rm -f $($(package)_extract_dir)/lib/libc++abi.so*
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 165c27f5e3..9004b064d6 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -6,9 +6,11 @@ $(package)_file_name=qtbase-$($(package)_suffix)
$(package)_sha256_hash=1c1b4e33137ca77881074c140d54c3c9747e845a31338cfe8680f171f0bc3a39
$(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon
$(package)_qt_libs=corelib network widgets gui plugins testlib
-$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_no_printer.patch no-xlib.patch
-$(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch dont_hardcode_pwd.patch
-$(package)_patches+= drop_lrelease_dependency.patch no_sdk_version_check.patch
+$(package)_linguist_tools = lrelease lupdate lconvert
+$(package)_patches = qt.pro qttools_src.pro
+$(package)_patches += fix_qt_pkgconfig.patch mac-qmake.conf fix_no_printer.patch no-xlib.patch
+$(package)_patches += support_new_android_ndks.patch fix_android_jni_static.patch dont_hardcode_pwd.patch
+$(package)_patches+= no_sdk_version_check.patch
$(package)_patches+= fix_lib_paths.patch fix_android_pch.patch
$(package)_patches+= qtbase-moc-ignore-gcc-macro.patch fix_limits_header.patch
@@ -64,6 +66,7 @@ $(package)_config_opts += -no-system-proxies
$(package)_config_opts += -no-use-gold-linker
$(package)_config_opts += -nomake examples
$(package)_config_opts += -nomake tests
+$(package)_config_opts += -nomake tools
$(package)_config_opts += -opensource
$(package)_config_opts += -pkg-config
$(package)_config_opts += -prefix $(host_prefix)
@@ -113,14 +116,13 @@ $(package)_config_opts_darwin = -no-dbus
$(package)_config_opts_darwin += -no-opengl
$(package)_config_opts_darwin += -pch
$(package)_config_opts_darwin += -no-feature-corewlan
-$(package)_config_opts_darwin += -device-option QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION)
+$(package)_config_opts_darwin += QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION)
ifneq ($(build_os),darwin)
$(package)_config_opts_darwin += -xplatform macx-clang-linux
$(package)_config_opts_darwin += -device-option MAC_SDK_PATH=$(OSX_SDK)
$(package)_config_opts_darwin += -device-option MAC_SDK_VERSION=$(OSX_SDK_VERSION)
$(package)_config_opts_darwin += -device-option CROSS_COMPILE="$(host)-"
-$(package)_config_opts_darwin += -device-option MAC_MIN_VERSION=$(OSX_MIN_VERSION)
$(package)_config_opts_darwin += -device-option MAC_TARGET=$(host)
$(package)_config_opts_darwin += -device-option XCODE_VERSION=$(XCODE_VERSION)
endif
@@ -201,30 +203,28 @@ endef
#
# 1. Apply our patches to the extracted source. See each patch for more info.
#
-# 2. Point to lrelease in qttools/bin/lrelease; otherwise Qt will look for it in
-# $(host)/native/bin/lrelease and not find it.
+# 2. Create a macOS-Clang-Linux mkspec using our mac-qmake.conf.
#
-# 3. Create a macOS-Clang-Linux mkspec using our mac-qmake.conf.
-#
-# 4. After making a copy of the mkspec for the linux-arm-gnueabi host, named
+# 3. After making a copy of the mkspec for the linux-arm-gnueabi host, named
# bitcoin-linux-g++, replace instances of linux-arm-gnueabi with $(host). This
# way we can generically support hosts like riscv64-linux-gnu, which Qt doesn't
# ship a mkspec for. See it's usage in config_opts_* above.
#
-# 5. Put our C, CXX and LD FLAGS into gcc-base.conf. Only used for non-host builds.
+# 4. Put our C, CXX and LD FLAGS into gcc-base.conf. Only used for non-host builds.
#
-# 6. Do similar for the win32-g++ mkspec.
+# 5. Do similar for the win32-g++ mkspec.
#
-# 7. In clang.conf, swap out clang & clang++, for our compiler + flags. See #17466.
+# 6. In clang.conf, swap out clang & clang++, for our compiler + flags. See #17466.
#
-# 8. Adjust a regex in toolchain.prf, to accommodate Guix's usage of
+# 7. Adjust a regex in toolchain.prf, to accommodate Guix's usage of
# CROSS_LIBRARY_PATH. See #15277.
define $(package)_preprocess_cmds
- patch -p1 -i $($(package)_patch_dir)/drop_lrelease_dependency.patch && \
+ cp $($(package)_patch_dir)/qt.pro qt.pro && \
+ cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \
patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_no_printer.patch && \
- patch -p1 -i $($(package)_patch_dir)/fix_android_qmake_conf.patch && \
+ patch -p1 -i $($(package)_patch_dir)/support_new_android_ndks.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_android_pch.patch && \
patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \
@@ -232,7 +232,6 @@ define $(package)_preprocess_cmds
patch -p1 -i $($(package)_patch_dir)/fix_lib_paths.patch && \
patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_limits_header.patch && \
- sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \
mkdir -p qtbase/mkspecs/macx-clang-linux &&\
cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\
cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \
@@ -249,35 +248,22 @@ endef
define $(package)_config_cmds
export PKG_CONFIG_SYSROOT_DIR=/ && \
export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \
- export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \
+ export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \
cd qtbase && \
- ./configure $($(package)_config_opts) && \
- cd .. && \
- $(MAKE) -C qtbase sub-src-clean && \
- qtbase/bin/qmake -o qttranslations/Makefile qttranslations/qttranslations.pro && \
- qtbase/bin/qmake -o qttranslations/translations/Makefile qttranslations/translations/translations.pro && \
- qtbase/bin/qmake -o qttools/src/linguist/lrelease/Makefile qttools/src/linguist/lrelease/lrelease.pro && \
- qtbase/bin/qmake -o qttools/src/linguist/lupdate/Makefile qttools/src/linguist/lupdate/lupdate.pro && \
- qtbase/bin/qmake -o qttools/src/linguist/lconvert/Makefile qttools/src/linguist/lconvert/lconvert.pro
+ ./configure -top-level $($(package)_config_opts)
endef
define $(package)_build_cmds
- $(MAKE) -C qtbase/src $(addprefix sub-,$($(package)_qt_libs)) && \
- $(MAKE) -C qttools/src/linguist/lrelease && \
- $(MAKE) -C qttools/src/linguist/lupdate && \
- $(MAKE) -C qttools/src/linguist/lconvert && \
- $(MAKE) -C qttranslations
+ $(MAKE)
endef
define $(package)_stage_cmds
$(MAKE) -C qtbase/src INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_qt_libs))) && \
- $(MAKE) -C qttools/src/linguist/lrelease INSTALL_ROOT=$($(package)_staging_dir) install_target && \
- $(MAKE) -C qttools/src/linguist/lupdate INSTALL_ROOT=$($(package)_staging_dir) install_target && \
- $(MAKE) -C qttools/src/linguist/lconvert INSTALL_ROOT=$($(package)_staging_dir) install_target && \
+ $(MAKE) -C qttools/src/linguist INSTALL_ROOT=$($(package)_staging_dir) $(addsuffix -install_subtargets,$(addprefix sub-,$($(package)_linguist_tools))) && \
$(MAKE) -C qttranslations INSTALL_ROOT=$($(package)_staging_dir) install_subtargets
endef
define $(package)_postprocess_cmds
rm -rf native/mkspecs/ native/lib/ lib/cmake/ && \
- rm -f lib/lib*.la lib/*.prl plugins/*/*.prl
+ rm -f lib/lib*.la
endef
diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk
index 5b3a61b239..af5e0d09c9 100644
--- a/depends/packages/sqlite.mk
+++ b/depends/packages/sqlite.mk
@@ -9,6 +9,10 @@ $(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-ext
$(package)_config_opts_linux=--with-pic
endef
+define $(package)_preprocess_cmds
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub .
+endef
+
define $(package)_config_cmds
$($(package)_autoconf)
endef
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 3b7f3690a4..9798248c61 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -12,7 +12,7 @@ define $(package)_set_vars
$(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking
$(package)_config_opts_linux=--with-pic
$(package)_config_opts_android=--with-pic
- $(package)_cxxflags=-std=c++17
+ $(package)_cxxflags+=-std=c++17
endef
define $(package)_preprocess_cmds
diff --git a/depends/patches/qt/drop_lrelease_dependency.patch b/depends/patches/qt/drop_lrelease_dependency.patch
deleted file mode 100644
index 9b918af77c..0000000000
--- a/depends/patches/qt/drop_lrelease_dependency.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-commit 67b3ed7406e1d0762188dbad2c44a06824ba0778
-Author: fanquake <fanquake@gmail.com>
-Date: Tue Aug 18 15:24:01 2020 +0800
-
- Drop dependency on lrelease
-
- Qts buildsystem insists on using the installed lrelease, but gets
- confused about how to find it. Since we manually control the build
- order, just drop the dependency.
-
- See #9469
-
-diff --git a/qttranslations/translations/translations.pro b/qttranslations/translations/translations.pro
-index 694544c..eff339d 100644
---- a/qttranslations/translations/translations.pro
-+++ b/qttranslations/translations/translations.pro
-@@ -107,3 +107,2 @@ updateqm.commands = $$LRELEASE ${QMAKE_FILE_IN} -qm ${QMAKE_FILE_OUT}
- silent:updateqm.commands = @echo lrelease ${QMAKE_FILE_IN} && $$updateqm.commands
--updateqm.depends = $$LRELEASE_EXE
- updateqm.name = LRELEASE ${QMAKE_FILE_IN}
diff --git a/depends/patches/qt/fix_android_pch.patch b/depends/patches/qt/fix_android_pch.patch
index bed6e4bb63..195e1c5e59 100644
--- a/depends/patches/qt/fix_android_pch.patch
+++ b/depends/patches/qt/fix_android_pch.patch
@@ -1,6 +1,6 @@
--- old/qtbase/mkspecs/common/android-base-head.conf
+++ new/qtbase/mkspecs/common/android-base-head.conf
-@@ -73,6 +73,6 @@ CROSS_COMPILE = $$NDK_TOOLCHAIN_PATH/bin/$$NDK_TOOLS_PREFIX-
+@@ -72,6 +72,6 @@ CROSS_COMPILE = $$NDK_TOOLCHAIN_PATH/bin/$$NDK_TOOLS_PREFIX-
QMAKE_PCH_OUTPUT_EXT = .gch
QMAKE_CFLAGS_PRECOMPILE = -x c-header -c ${QMAKE_PCH_INPUT} -o ${QMAKE_PCH_OUTPUT}
diff --git a/depends/patches/qt/fix_android_qmake_conf.patch b/depends/patches/qt/fix_android_qmake_conf.patch
deleted file mode 100644
index 3a8753fd1d..0000000000
--- a/depends/patches/qt/fix_android_qmake_conf.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- old/qtbase/mkspecs/android-clang/qmake.conf
-+++ new/qtbase/mkspecs/android-clang/qmake.conf
-@@ -47,7 +47,7 @@ ANDROID_STDCPP_PATH = $$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++_shared.so
- ANDROID_USE_LLVM = true
-
- exists($$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++.so): \
-- ANDROID_CXX_STL_LIBS = -lc++
-+ ANDROID_CXX_STL_LIBS = -lc++_shared
- else: \
- ANDROID_CXX_STL_LIBS = $$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++.so.$$replace(ANDROID_PLATFORM, "android-", "")
diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf
index 0142667547..190ab7a160 100644
--- a/depends/patches/qt/mac-qmake.conf
+++ b/depends/patches/qt/mac-qmake.conf
@@ -8,7 +8,6 @@ include(../common/clang-mac.conf)
QMAKE_MAC_SDK_PATH=$${MAC_SDK_PATH}
QMAKE_XCODE_VERSION = $${XCODE_VERSION}
QMAKE_XCODE_DEVELOPER_PATH=/Developer
-QMAKE_MACOSX_DEPLOYMENT_TARGET = $${MAC_MIN_VERSION}
QMAKE_MAC_SDK=macosx
QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH}
QMAKE_MAC_SDK.macosx.platform_name = macosx
diff --git a/depends/patches/qt/qt.pro b/depends/patches/qt/qt.pro
new file mode 100644
index 0000000000..8f2e900a84
--- /dev/null
+++ b/depends/patches/qt/qt.pro
@@ -0,0 +1,16 @@
+# Create the super cache so modules will add themselves to it.
+cache(, super)
+
+!QTDIR_build: cache(CONFIG, add, $$list(QTDIR_build))
+
+prl = no_install_prl
+CONFIG += $$prl
+cache(CONFIG, add stash, prl)
+
+TEMPLATE = subdirs
+SUBDIRS = qtbase qttools qttranslations
+
+qttools.depends = qtbase
+qttranslations.depends = qttools
+
+load(qt_configure)
diff --git a/depends/patches/qt/qttools_src.pro b/depends/patches/qt/qttools_src.pro
new file mode 100644
index 0000000000..6ef71a0942
--- /dev/null
+++ b/depends/patches/qt/qttools_src.pro
@@ -0,0 +1,6 @@
+TEMPLATE = subdirs
+SUBDIRS = linguist
+
+fb = force_bootstrap
+CONFIG += $$fb
+cache(CONFIG, add, fb)
diff --git a/depends/patches/qt/support_new_android_ndks.patch b/depends/patches/qt/support_new_android_ndks.patch
new file mode 100644
index 0000000000..85c8ae2132
--- /dev/null
+++ b/depends/patches/qt/support_new_android_ndks.patch
@@ -0,0 +1,122 @@
+Follow Google's BuildSystemMaintainers doc to support future NDK releases.
+
+Upstream commit:
+ - Qt 5.14: 9b14950ff600a4ce5a8698b67ab38907c50417f1
+
+--- old/qtbase/mkspecs/android-clang/qmake.conf
++++ new/qtbase/mkspecs/android-clang/qmake.conf
+@@ -14,43 +14,29 @@
+ QMAKE_CC = $$NDK_LLVM_PATH/bin/clang
+ QMAKE_CXX = $$NDK_LLVM_PATH/bin/clang++
+
++# Follow https://android.googlesource.com/platform/ndk/+/ndk-release-r20/docs/BuildSystemMaintainers.md
++
+ equals(ANDROID_TARGET_ARCH, armeabi-v7a): \
+- QMAKE_CFLAGS += -target armv7-none-linux-androideabi
+-else: equals(ANDROID_TARGET_ARCH, armeabi): \
+- QMAKE_CFLAGS += -target armv5te-none-linux-androideabi
++ QMAKE_CFLAGS = -target armv7a-linux-androideabi$$replace(ANDROID_PLATFORM, "android-", "")
+ else: equals(ANDROID_TARGET_ARCH, arm64-v8a): \
+- QMAKE_CFLAGS += -target aarch64-none-linux-android
++ QMAKE_CFLAGS = -target aarch64-linux-android$$replace(ANDROID_PLATFORM, "android-", "")
+ else: equals(ANDROID_TARGET_ARCH, x86): \
+- QMAKE_CFLAGS += -target i686-none-linux-android -mstackrealign
++ QMAKE_CFLAGS = -target i686-linux-android$$replace(ANDROID_PLATFORM, "android-", "") -mstackrealign
+ else: equals(ANDROID_TARGET_ARCH, x86_64): \
+- QMAKE_CFLAGS += -target x86_64-none-linux-android
+-else: equals(ANDROID_TARGET_ARCH, mips): \
+- QMAKE_CFLAGS += -target mipsel-none-linux-android
+-else: equals(ANDROID_TARGET_ARCH, mips64): \
+- QMAKE_CFLAGS += -target mips64el-none-linux-android
+-
+-QMAKE_CFLAGS += -gcc-toolchain $$NDK_TOOLCHAIN_PATH -fno-limit-debug-info
+-
+-QMAKE_LINK = $$QMAKE_CXX $$QMAKE_CFLAGS -Wl,--exclude-libs,libgcc.a -Wl,--exclude-libs,libatomic.a -nostdlib++
+-equals(ANDROID_TARGET_ARCH, armeabi-v7a): QMAKE_LINK += -Wl,--exclude-libs,libunwind.a
+-
+-QMAKE_CFLAGS += -DANDROID_HAS_WSTRING --sysroot=$$NDK_ROOT/sysroot \
+- -isystem $$NDK_ROOT/sysroot/usr/include/$$NDK_TOOLS_PREFIX \
+- -isystem $$NDK_ROOT/sources/cxx-stl/llvm-libc++/include \
+- -isystem $$NDK_ROOT/sources/android/support/include \
+- -isystem $$NDK_ROOT/sources/cxx-stl/llvm-libc++abi/include
++ QMAKE_CFLAGS = -target x86_64-linux-android$$replace(ANDROID_PLATFORM, "android-", "")
+
+-ANDROID_SOURCES_CXX_STL_LIBDIR = $$NDK_ROOT/sources/cxx-stl/llvm-libc++/libs/$$ANDROID_TARGET_ARCH
++QMAKE_CFLAGS += -fno-limit-debug-info
+
+-ANDROID_STDCPP_PATH = $$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++_shared.so
++QMAKE_LINK = $$QMAKE_CXX $$QMAKE_CFLAGS
+
+-ANDROID_USE_LLVM = true
++ANDROID_STDCPP_PATH = $$NDK_LLVM_PATH/sysroot/usr/lib/$$NDK_TOOLS_PREFIX/libc++_shared.so
+
+-exists($$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++.so): \
+- ANDROID_CXX_STL_LIBS = -lc++
+-else: \
+- ANDROID_CXX_STL_LIBS = $$ANDROID_SOURCES_CXX_STL_LIBDIR/libc++.so.$$replace(ANDROID_PLATFORM, "android-", "")
++ANDROID_USE_LLVM = true
+
+-QMAKE_CFLAGS_OPTIMIZE_SIZE = -Oz
++QMAKE_CFLAGS_OPTIMIZE_SIZE = -Oz
++QMAKE_LIBDIR_POST =
++QMAKE_LFLAGS =
++QMAKE_LIBS_PRIVATE =
++ANDROID_CXX_STL_LIBS =
+
+ include(../common/android-base-tail.conf)
+
+--- old/qtbase/mkspecs/common/android-base-head.conf
++++ new/qtbase/mkspecs/common/android-base-head.conf
+@@ -64,7 +58,6 @@
+ }
+
+ CONFIG += $$ANDROID_PLATFORM
+-QMAKE_CFLAGS = -D__ANDROID_API__=$$replace(ANDROID_PLATFORM, "android-", "")
+
+ ANDROID_PLATFORM_ROOT_PATH = $$NDK_ROOT/platforms/$$ANDROID_PLATFORM/arch-$$ANDROID_ARCHITECTURE/
+
+--- old/qtbase/mkspecs/common/android-base-tail.conf
++++ new/qtbase/mkspecs/common/android-base-tail.conf
+@@ -6,22 +6,17 @@
+ QMAKE_CFLAGS += -fstack-protector-strong -DANDROID
+
+ equals(ANDROID_TARGET_ARCH, armeabi-v7a): \
+- QMAKE_CFLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=vfp -fno-builtin-memmove
++ QMAKE_CFLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=vfp
+ else: equals(ANDROID_TARGET_ARCH, armeabi): \
+- QMAKE_CFLAGS += -march=armv5te -mtune=xscale -msoft-float -fno-builtin-memmove
+-# -fno-builtin-memmove is used to workaround https://code.google.com/p/android/issues/detail?id=81692
++ QMAKE_CFLAGS += -march=armv5te -mtune=xscale -msoft-float
+
+ QMAKE_CFLAGS_WARN_ON = -Wall -W
+ QMAKE_CFLAGS_WARN_OFF =
+ equals(ANDROID_TARGET_ARCH, armeabi-v7a) | equals(ANDROID_TARGET_ARCH, armeabi) {
+ CONFIG += optimize_size
+ QMAKE_CFLAGS_DEBUG = -g -marm -O0
+- equals(ANDROID_TARGET_ARCH, armeabi):if(equals(NDK_TOOLCHAIN_VERSION, 4.8)|equals(NDK_TOOLCHAIN_VERSION, 4.9)) {
+- DEFINES += QT_OS_ANDROID_GCC_48_WORKAROUND
+- } else {
+- QMAKE_CFLAGS_RELEASE += -mthumb
+- QMAKE_CFLAGS_RELEASE_WITH_DEBUGINFO += -mthumb
+- }
++ QMAKE_CFLAGS_RELEASE += -mthumb
++ QMAKE_CFLAGS_RELEASE_WITH_DEBUGINFO += -mthumb
+ }
+
+ QMAKE_CFLAGS_SHLIB = -fPIC
+@@ -61,15 +56,12 @@
+ QMAKE_RANLIB = $${CROSS_COMPILE}ranlib
+
+ QMAKE_INCDIR_POST =
+-QMAKE_LIBDIR_POST = $$ANDROID_SOURCES_CXX_STL_LIBDIR
+ QMAKE_INCDIR_X11 =
+ QMAKE_LIBDIR_X11 =
+ QMAKE_INCDIR_OPENGL =
+ QMAKE_LIBDIR_OPENGL =
+
+ QMAKE_LINK_SHLIB = $$QMAKE_LINK
+-QMAKE_LFLAGS = --sysroot=$$ANDROID_PLATFORM_ROOT_PATH
+-equals(ANDROID_TARGET_ARCH, x86_64) QMAKE_LFLAGS += -L$$ANDROID_PLATFORM_ROOT_PATH/usr/lib64
+ QMAKE_LFLAGS_APP = -Wl,--no-undefined -Wl,-z,noexecstack -shared
+ QMAKE_LFLAGS_SHLIB = -Wl,--no-undefined -Wl,-z,noexecstack -shared
+ QMAKE_LFLAGS_PLUGIN = $$QMAKE_LFLAGS_SHLIB
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 21bf587eaf..d8fd46d1c7 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -863,9 +863,7 @@ RECURSIVE = YES
EXCLUDE = src/crc32c \
src/leveldb \
- src/json \
- src/test \
- src/qt/test
+ src/json
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
diff --git a/doc/README.md b/doc/README.md
index 38f6b1d327..aabfe220bc 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -46,7 +46,6 @@ The following are developer notes on how to build Bitcoin Core on your native pl
- [OpenBSD Build Notes](build-openbsd.md)
- [NetBSD Build Notes](build-netbsd.md)
- [Android Build Notes](build-android.md)
-- [Gitian Building Guide (External Link)](https://github.com/bitcoin-core/docs/blob/master/gitian-building.md)
Development
---------------------
@@ -77,6 +76,7 @@ The Bitcoin repo's [root README](/README.md) contains relevant information on th
- [Fuzz-testing](fuzzing.md)
- [I2P Support](i2p.md)
- [Init Scripts (systemd/upstart/openrc)](init.md)
+- [Managing Wallets](managing-wallets.md)
- [PSBT support](psbt.md)
- [Reduce Memory](reduce-memory.md)
- [Reduce Traffic](reduce-traffic.md)
diff --git a/doc/benchmarking.md b/doc/benchmarking.md
index b6cd86eafe..84d5f2c444 100644
--- a/doc/benchmarking.md
+++ b/doc/benchmarking.md
@@ -8,8 +8,10 @@ thread queue, wallet balance.
Running
---------------------
-For benchmarks purposes you only need to compile `bitcoin_bench`. Beware of configuring without `--enable-debug` as this would impact
-benchmarking by unlatching log printers and lock analysis.
+For benchmarking, you only need to compile `bitcoin_bench`. The bench runner
+warns if you configure with `--enable-debug`, but consider if building without
+it will impact the benchmark(s) you are interested in by unlatching log printers
+and lock analysis.
make -C src bitcoin_bench
@@ -19,19 +21,28 @@ After compiling bitcoin-core, the benchmarks can be run with:
The output will look similar to:
```
-| ns/byte | byte/s | error % | benchmark
-|--------------------:|--------------------:|--------:|:----------------------------------------------
-| 64.13 | 15,592,356.01 | 0.1% | `Base58CheckEncode`
-| 24.56 | 40,722,672.68 | 0.2% | `Base58Decode`
+| ns/op | op/s | err% | total | benchmark
+|--------------------:|--------------------:|--------:|----------:|:----------
+| 57,927,463.00 | 17.26 | 3.6% | 0.66 | `AddrManAdd`
+| 677,816.00 | 1,475.33 | 4.9% | 0.01 | `AddrManGetAddr`
+
+...
+
+| ns/byte | byte/s | err% | total | benchmark
+|--------------------:|--------------------:|--------:|----------:|:----------
+| 127.32 | 7,854,302.69 | 0.3% | 0.00 | `Base58CheckEncode`
+| 31.95 | 31,303,226.99 | 0.2% | 0.00 | `Base58Decode`
+
...
```
Help
---------------------
- src/bench/bench_bitcoin --help
+ src/bench/bench_bitcoin -?
-To print options like scaling factor or per-benchmark filter.
+To print the various options, like listing the benchmarks without running them
+or using a regex filter to only run certain benchmarks.
Notes
---------------------
diff --git a/doc/bitcoin-conf.md b/doc/bitcoin-conf.md
index 9a312bc33c..8c9035c45b 100644
--- a/doc/bitcoin-conf.md
+++ b/doc/bitcoin-conf.md
@@ -4,6 +4,8 @@ The configuration file is used by `bitcoind`, `bitcoin-qt` and `bitcoin-cli`.
All command-line options (except for `-?`, `-help`, `-version` and `-conf`) may be specified in a configuration file, and all configuration file options (except for `includeconf`) may also be specified on the command line. Command-line options override values set in the configuration file and configuration file options override values set in the GUI.
+Changes to the configuration file while `bitcoind` or `bitcoin-qt` is running only take effect after restarting.
+
## Configuration File Format
The configuration file is a plain text file and consists of `option=value` entries, one per line. Leading and trailing whitespaces are removed.
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index 613aea438f..6e54f67edc 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -1,6 +1,6 @@
OpenBSD build guide
======================
-(updated for OpenBSD 6.7)
+(updated for OpenBSD 6.9)
This guide describes how to build bitcoind, bitcoin-qt, and command-line utilities on OpenBSD.
@@ -67,9 +67,16 @@ export AUTOMAKE_VERSION=1.16
```
Make sure `BDB_PREFIX` is set to the appropriate path from the above steps.
+Note that building with external signer support currently fails on OpenBSD,
+hence you have to explicitly disable it by passing the parameter
+`--disable-external-signer` to the configure script.
+(Background: the feature requires the header-only library boost::process, which
+is available on OpenBSD 6.9 via Boost 1.72.0, but contains certain system calls
+and preprocessor defines like `waitid()` and `WEXITED` that are not available.)
+
To configure with wallet:
```bash
-./configure --with-gui=no CC=cc CXX=c++ \
+./configure --with-gui=no --disable-external-signer CC=cc CXX=c++ \
BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
BDB_CFLAGS="-I${BDB_PREFIX}/include" \
MAKE=gmake
@@ -77,12 +84,12 @@ To configure with wallet:
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake
+./configure --disable-wallet --with-gui=no --disable-external-signer CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake
```
To configure with GUI:
```bash
-./configure --with-gui=yes CC=cc CXX=c++ \
+./configure --with-gui=yes --disable-external-signer CC=cc CXX=c++ \
BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
BDB_CFLAGS="-I${BDB_PREFIX}/include" \
MAKE=gmake
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 73c0bf8779..44b6ad5968 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -48,6 +48,7 @@ Optional dependencies:
univalue | Utility | JSON parsing and encoding (bundled version will be used unless --with-system-univalue passed to configure)
libzmq3 | ZMQ notification | Optional, allows generating ZMQ notifications (requires ZMQ version >= 4.0.0)
sqlite3 | SQLite DB | Optional, wallet storage (only needed when wallet enabled)
+ systemtap | Tracing (USDT) | Optional, statically defined tracepoints
For the versions used, see [dependencies.md](dependencies.md)
@@ -107,6 +108,10 @@ ZMQ dependencies (provides ZMQ API):
sudo apt-get install libzmq3-dev
+User-Space, Statically Defined Tracing (USDT) dependencies:
+
+ sudo apt install systemtap-sdt-dev
+
GUI dependencies:
If you want to build bitcoin-qt, make sure that the required packages for Qt development
@@ -117,6 +122,10 @@ To build with Qt 5 you need the following:
sudo apt-get install libqt5gui5 libqt5core5a libqt5dbus5 qttools5-dev qttools5-dev-tools
+Additionally, to support Wayland protocol for modern desktop environments:
+
+ sudo apt install qtwayland5
+
libqrencode (optional) can be installed with:
sudo apt-get install libqrencode-dev
@@ -162,6 +171,10 @@ ZMQ dependencies (provides ZMQ API):
sudo dnf install zeromq-devel
+User-Space, Statically Defined Tracing (USDT) dependencies:
+
+ sudo dnf install systemtap
+
GUI dependencies:
If you want to build bitcoin-qt, make sure that the required packages for Qt development
@@ -172,6 +185,10 @@ To build with Qt 5 you need the following:
sudo dnf install qt5-qttools-devel qt5-qtbase-devel
+Additionally, to support Wayland protocol for modern desktop environments:
+
+ sudo dnf install qt5-qtwayland
+
libqrencode (optional) can be installed with:
sudo dnf install qrencode-devel
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 66c5a76b3b..b7634718e8 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -24,6 +24,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | |
| SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | |
| XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) |
+| systemtap ([tracing](tracing.md))| | | | | |
| xkbcommon | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) |
| ZeroMQ | [4.3.1](https://github.com/zeromq/libzmq/releases) | 4.0.0 | No | | |
| zlib | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
@@ -41,6 +42,7 @@ Some dependencies are not needed in all configurations. The following are some f
* SQLite is not needed with `--disable-wallet` or `--without-sqlite`.
* Qt is not needed with `--without-gui`.
* If the qrencode dependency is absent, QR support won't be added. To force an error when that happens, pass `--with-qrencode`.
+* If the systemtap dependency is absent, USDT support won't compiled in.
* ZeroMQ is needed only with the `--with-zmq` option.
#### Other
diff --git a/doc/descriptors.md b/doc/descriptors.md
index e27ff87546..3bbb626a42 100644
--- a/doc/descriptors.md
+++ b/doc/descriptors.md
@@ -99,7 +99,7 @@ Descriptors consist of several types of expressions. The top level expression is
`ADDR` expressions are any type of supported address:
- P2PKH addresses (base58, of the form `1...` for mainnet or `[nm]...` for testnet). Note that P2PKH addresses in descriptors cannot be used for P2PK outputs (use the `pk` function instead).
- P2SH addresses (base58, of the form `3...` for mainnet or `2...` for testnet, defined in [BIP 13](https://github.com/bitcoin/bips/blob/master/bip-0013.mediawiki)).
-- Segwit addresses (bech32, of the form `bc1...` for mainnet or `tb1...` for testnet, defined in [BIP 173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki)).
+- Segwit addresses (bech32 and bech32m, of the form `bc1...` for mainnet or `tb1...` for testnet, defined in [BIP 173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) and [BIP 350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki)).
## Explanation
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 583c50a763..3e13adeec0 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -89,6 +89,10 @@ code.
- Class member variables have a `m_` prefix.
- Global variables have a `g_` prefix.
- Constant names are all uppercase, and use `_` to separate words.
+ - Enumerator constants may be `snake_case`, `PascalCase` or `ALL_CAPS`.
+ This is a more tolerant policy than the [C++ Core
+ Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Renum-caps),
+ which recommend using `snake_case`. Please use what seems appropriate.
- Class names, function names, and method names are UpperCamelCase
(PascalCase). Do not prefix class names with `C`.
- Test suite naming convention: The Boost test suite in file
@@ -669,19 +673,19 @@ Foo(vec);
```cpp
enum class Tabs {
- INFO,
- CONSOLE,
- GRAPH,
- PEERS
+ info,
+ console,
+ network_graph,
+ peers
};
int GetInt(Tabs tab)
{
switch (tab) {
- case Tabs::INFO: return 0;
- case Tabs::CONSOLE: return 1;
- case Tabs::GRAPH: return 2;
- case Tabs::PEERS: return 3;
+ case Tabs::info: return 0;
+ case Tabs::console: return 1;
+ case Tabs::network_graph: return 2;
+ case Tabs::peers: return 3;
} // no default case, so the compiler can warn about missing cases
assert(false);
}
diff --git a/doc/files.md b/doc/files.md
index e670d77ae5..f88d3f91a1 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -56,7 +56,6 @@ Subdirectory | File(s) | Description
`indexes/coinstats/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1`
`wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location)
`./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup
-`./` | `banlist.dat` | Stores the addresses/subnets of banned nodes (deprecated). `bitcoind` or `bitcoin-qt` no longer save the banlist to this file, but read it on startup if `banlist.json` is not present.
`./` | `banlist.json` | Stores the addresses/subnets of banned nodes.
`./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option
`./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option
@@ -114,6 +113,7 @@ These subdirectories and files are no longer used by Bitcoin Core:
Path | Description | Repository notes
---------------|-------------|-----------------
+`banlist.dat` | Stores the addresses/subnets of banned nodes; superseded by `banlist.json` in 22.0 and completely ignored in 23.0 | [PR #20966](https://github.com/bitcoin/bitcoin/pull/20966), [PR #22570](https://github.com/bitcoin/bitcoin/pull/22570)
`blktree/` | Blockchain index; replaced by `blocks/index/` in [0.8.0](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.8.0.md#improvements) | [PR #2231](https://github.com/bitcoin/bitcoin/pull/2231), [`8fdc94cc`](https://github.com/bitcoin/bitcoin/commit/8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15)
`coins/` | Unspent transaction output database; replaced by `chainstate/` in 0.8.0 | [PR #2231](https://github.com/bitcoin/bitcoin/pull/2231), [`8fdc94cc`](https://github.com/bitcoin/bitcoin/commit/8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15)
`blkindex.dat` | Blockchain index BDB database; replaced by {`chainstate/`, `blocks/index/`, `blocks/revNNNNN.dat`<sup>[\[2\]](#note2)</sup>} in 0.8.0 | [PR #1677](https://github.com/bitcoin/bitcoin/pull/1677)
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index 6fc9077e4c..ee9c65d4d4 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -16,6 +16,9 @@ $ FUZZ=process_message src/test/fuzz/fuzz
# abort fuzzing using ctrl-c
```
+There is also a runner script to execute all fuzz targets. Refer to
+`./test/fuzz/test_runner.py --help` for more details.
+
## Fuzzing harnesses and output
[`process_message`](https://github.com/bitcoin/bitcoin/blob/master/src/test/fuzz/process_message.cpp) is a fuzzing harness for the [`ProcessMessage(...)` function (`net_processing`)](https://github.com/bitcoin/bitcoin/blob/master/src/net_processing.cpp). The available fuzzing harnesses are found in [`src/test/fuzz/`](https://github.com/bitcoin/bitcoin/tree/master/src/test/fuzz).
@@ -83,6 +86,10 @@ INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb
```
+## Run without sanitizers for increased throughput
+
+Fuzzing on a harness compiled with `--with-sanitizers=address,fuzzer,undefined` is good for finding bugs. However, the very slow execution even under libFuzzer will limit the ability to find new coverage. A good approach is to perform occasional long runs without the additional bug-detectors (configure `--with-sanitizers=fuzzer`) and then merge new inputs into a corpus as described in the qa-assets repo (https://github.com/bitcoin-core/qa-assets/blob/main/.github/PULL_REQUEST_TEMPLATE.md). Patience is useful; even with improved throughput, libFuzzer may need days and 10s of millions of executions to reach deep/hard targets.
+
## Reproduce a fuzzer crash reported by the CI
- `cd` into the `qa-assets` directory and update it with `git pull qa-assets`
diff --git a/doc/gitian-building.md b/doc/gitian-building.md
deleted file mode 100644
index 3a48f4a0b3..0000000000
--- a/doc/gitian-building.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Gitian building
-================
-
-This file was moved to [the Bitcoin Core documentation repository](https://github.com/bitcoin-core/docs/blob/master/gitian-building.md) at [https://github.com/bitcoin-core/docs](https://github.com/bitcoin-core/docs).
diff --git a/doc/i2p.md b/doc/i2p.md
index 8b4607208a..5f631c11ca 100644
--- a/doc/i2p.md
+++ b/doc/i2p.md
@@ -10,11 +10,22 @@ started with I2P terminology.
## Run Bitcoin Core with an I2P router (proxy)
A running I2P router (proxy) with [SAM](https://geti2p.net/en/docs/api/samv3)
-enabled is required (there is an [official one](https://geti2p.net) and
-[a few alternatives](https://en.wikipedia.org/wiki/I2P#Routers)). Notice the IP
-address and port the SAM proxy is listening to; usually, it is
-`127.0.0.1:7656`. Once it is up and running with SAM enabled, use the following
-Bitcoin Core options:
+enabled is required. Options include:
+
+- [i2prouter (I2P Router)](https://geti2p.net), the official implementation in
+ Java
+- [i2pd (I2P Daemon)](https://github.com/PurpleI2P/i2pd)
+ ([documentation](https://i2pd.readthedocs.io/en/latest)), a lighter
+ alternative in C++ (successfully tested with version 2.23 and up; version 2.36
+ or later recommended)
+- [i2p-zero](https://github.com/i2p-zero/i2p-zero)
+- [other alternatives](https://en.wikipedia.org/wiki/I2P#Routers)
+
+Note the IP address and port the SAM proxy is listening to; usually, it is
+`127.0.0.1:7656`.
+
+Once an I2P router with SAM enabled is up and running, use the following Bitcoin
+Core configuration options:
```
-i2psam=<ip:port>
@@ -42,18 +53,41 @@ named `i2p_private_key` in the Bitcoin Core data directory.
## Additional configuration options related to I2P
-You may set the `debug=i2p` config logging option to have additional
-information in the debug log about your I2P configuration and connections. Run
-`bitcoin-cli help logging` for more information.
+```
+-debug=i2p
+```
+
+Set the `debug=i2p` config logging option to see additional information in the
+debug log about your I2P configuration and connections. Run `bitcoin-cli help
+logging` for more information.
+
+```
+-onlynet=i2p
+```
+
+Make outgoing connections only to I2P addresses. Incoming connections are not
+affected by this option. It can be specified multiple times to allow multiple
+network types, e.g. onlynet=ipv4, onlynet=ipv6, onlynet=onion, onlynet=i2p.
-It is possible to restrict outgoing connections in the usual way with
-`onlynet=i2p`. I2P support was added to Bitcoin Core in version 22.0 (mid 2021)
-and there may be fewer I2P peers than Tor or IP ones. Therefore, using
-`onlynet=i2p` alone (without other `onlynet=`) may make a node more susceptible
-to [Sybil attacks](https://en.bitcoin.it/wiki/Weaknesses#Sybil_attack). Use
+Warning: if you use -onlynet with values other than onion, and the -onion or
+-proxy option is set, then outgoing onion connections will still be made; use
+-noonion or -onion=0 to disable outbound onion connections in this case.
+
+I2P support was added to Bitcoin Core in version 22.0 and there may be fewer I2P
+peers than Tor or IP ones. Therefore, using I2P alone without other networks may
+make a node more susceptible to [Sybil
+attacks](https://en.bitcoin.it/wiki/Weaknesses#Sybil_attack). You can use
`bitcoin-cli -addrinfo` to see the number of I2P addresses known to your node.
-## I2P related information in Bitcoin Core
+Another consideration with `onlynet=i2p` is that the initial blocks download
+phase when syncing up a new node can be very slow. This phase can be sped up by
+using other networks, for instance `onlynet=onion`, at the same time.
+
+In general, a node can be run with both onion and I2P hidden services (or
+any/all of IPv4/IPv6/onion/I2P), which can provide a potential fallback if one
+of the networks has issues.
+
+## I2P-related information in Bitcoin Core
There are several ways to see your I2P address in Bitcoin Core:
- in the debug log (grep for `AddLocal`, the I2P address ends in `.b32.i2p`)
@@ -70,3 +104,18 @@ RPC.
Bitcoin Core uses the [SAM v3.1](https://geti2p.net/en/docs/api/samv3) protocol
to connect to the I2P network. Any I2P router that supports it can be used.
+
+## Ports in I2P and Bitcoin Core
+
+Bitcoin Core uses the [SAM v3.1](https://geti2p.net/en/docs/api/samv3)
+protocol. One particularity of SAM v3.1 is that it does not support ports,
+unlike newer versions of SAM (v3.2 and up) that do support them and default the
+port numbers to 0. From the point of view of peers that use newer versions of
+SAM or other protocols that support ports, a SAM v3.1 peer is connecting to them
+on port 0, from source port 0.
+
+To allow future upgrades to newer versions of SAM, Bitcoin Core sets its
+listening port to 0 when listening for incoming I2P connections and advertises
+its own I2P address with port 0. Furthermore, it will not attempt to connect to
+I2P addresses with a non-zero port number because with SAM v3.1 the destination
+port (`TO_PORT`) is always set to 0 and is not in the control of Bitcoin Core.
diff --git a/doc/managing-wallets.md b/doc/managing-wallets.md
new file mode 100644
index 0000000000..aab6d131bd
--- /dev/null
+++ b/doc/managing-wallets.md
@@ -0,0 +1,125 @@
+# Managing the Wallet
+
+## 1. Backing Up and Restoring The Wallet
+
+### 1.1 Creating the Wallet
+
+Since version 0.21, Bitcoin Core no longer has a default wallet.
+Wallets can be created with the `createwallet` RPC or with the `Create wallet` GUI menu item.
+
+In the GUI, the `Create a new wallet` button is displayed on the main screen when there is no wallet loaded. Alternatively, there is the option `File` ->`Create wallet`.
+
+The following command, for example, creates a descriptor wallet. More information about this command may be found by running `bitcoin-cli help createwallet`.
+
+```
+$ bitcoin-cli -named createwallet wallet_name="wallet-01" descriptors=true
+```
+
+The `descriptors` parameter can be omitted if the intention is to create a legacy wallet. For now, the default type is the legacy wallet, but that is expected to change in a future release.
+
+By default, wallets are created in the `wallets` folder of the data directory, which varies by operating system, as shown below. The user can change the default by using the `-datadir` or `-walletdir` initialization parameters.
+
+| Operating System | Default wallet directory |
+| -----------------|:------------------------------------------------------------|
+| Linux | `/home/<user>/.bitcoin/wallets` |
+| Windows | `C:\Users\<user>\AppData\Roaming\Bitcoin\wallets` |
+| macOS | `/Users/<user>/Library/Application Support/Bitcoin/wallets` |
+
+### 1.2 Encrypting the Wallet
+
+The `wallet.dat` file is not encrypted by default and is, therefore, vulnerable if an attacker gains access to the device where the wallet or the backups are stored.
+
+Wallet encryption may prevent unauthorized access. However, this significantly increases the risk of losing coins due to forgotten passphrases. There is no way to recover a passphrase. This tradeoff should be well thought out by the user.
+
+Wallet encryption may also not protect against more sophisticated attacks. An attacker can, for example, obtain the password by installing a keylogger on the user's machine.
+
+After encrypting the wallet or changing the passphrase, a new backup needs to be created immediately. The reason is that the keypool is flushed and a new HD seed is generated after encryption. Any bitcoins received by the new seed cannot be recovered from the previous backups.
+
+The wallet's private key may be encrypted with the following command:
+
+```
+$ bitcoin-cli -rpcwallet="wallet-01" encryptwallet "passphrase"
+```
+
+Once encrypted, the passphrase can be changed with the `walletpassphrasechange` command.
+
+```
+$ bitcoin-cli -rpcwallet="wallet-01" walletpassphrasechange "oldpassphrase" "newpassphrase"
+```
+
+The argument passed to `-rpcwallet` is the name of the wallet to be encrypted.
+
+Only the wallet's private key is encrypted. All other wallet information, such as transactions, is still visible.
+
+The wallet's private key can also be encrypted in the `createwallet` command via the `passphrase` argument:
+
+```
+$ bitcoin-cli -named createwallet wallet_name="wallet-01" descriptors=true passphrase="passphrase"
+```
+
+Note that if the passphrase is lost, all the coins in the wallet will also be lost forever.
+
+### 1.3 Unlocking the Wallet
+
+If the wallet is encrypted and the user tries any operation related to private keys, such as sending bitcoins, an error message will be displayed.
+
+```
+$ bitcoin-cli -rpcwallet="wallet-01" sendtoaddress "tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx" 0.01
+error code: -13
+error message:
+Error: Please enter the wallet passphrase with walletpassphrase first.
+```
+
+To unlock the wallet and allow it to run these operations, the `walletpassphrase` RPC is required.
+
+This command takes the passphrase and an argument called `timeout`, which specifies the time in seconds that the wallet decryption key is stored in memory. After this period expires, the user needs to execute this RPC again.
+
+```
+$ bitcoin-cli -rpcwallet="wallet-01" walletpassphrase "passphrase" 120
+```
+
+In the GUI, there is no specific menu item to unlock the wallet. When the user sends bitcoins, the passphrase will be prompted automatically.
+
+### 1.4 Backing Up the Wallet
+
+To backup the wallet, the `backupwallet` RPC or the `Backup Wallet` GUI menu item must be used to ensure the file is in a safe state when the copy is made.
+
+In the RPC, the destination parameter must include the name of the file. Otherwise, the command will return an error message like "Error: Wallet backup failed!" for descriptor wallets. If it is a legacy wallet, it will be copied and a file will be created with the default file name `wallet.dat`.
+
+```
+$ bitcoin-cli -rpcwallet="wallet-01" backupwallet /home/node01/Backups/backup-01.dat
+```
+
+In the GUI, the wallet is selected in the `Wallet` drop-down list in the upper right corner. If this list is not present, the wallet can be loaded in `File` ->`Open wallet` if necessary. Then, the backup can be done in `File` -> `Backup Wallet...`.
+
+This backup file can be stored on one or multiple offline devices, which must be reliable enough to work in an emergency and be malware free. Backup files can be regularly tested to avoid problems in the future.
+
+If the computer has malware, it can compromise the wallet when recovering the backup file. One way to minimize this is to not connect the backup to an online device.
+
+If both the wallet and all backups are lost for any reason, the bitcoins related to this wallet will become permanently inaccessible.
+
+### 1.5 Backup Frequency
+
+The original Bitcoin Core wallet was a collection of unrelated private keys. If a non-HD wallet had received funds to an address and then was restored from a backup made before the address was generated, then any funds sent to that address would have been lost because there was no deterministic mechanism to derive the address again.
+
+Bitcoin Core [version 0.13](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.13.0.md) introduced HD wallets with deterministic key derivation. With HD wallets, users no longer lose funds when restoring old backups because all addresses are derived from the HD wallet seed.
+
+This means that a single backup is enough to recover the coins at any time. It is still recommended to make regular backups (once a week) or after a significant number of new transactions to maintain the metadata, such as labels. Metadata cannot be retrieved from a blockchain rescan, so if the backup is too old, the metadata will be lost forever.
+
+Wallets created before version 0.13 are not HD and must be backed up every 100 keys used since the previous backup, or even more often to maintain the metadata.
+
+### 1.6 Restoring the Wallet From a Backup
+
+To restore a wallet, the `restorewallet` RPC must be used.
+
+```
+$ bitcoin-cli restorewallet "restored-wallet" /home/node01/Backups/backup-01.dat
+```
+
+After that, `getwalletinfo` can be used to check if the wallet has been fully restored.
+
+```
+$ bitcoin-cli -rpcwallet="restored-wallet" getwalletinfo
+```
+
+The restored wallet can also be loaded in the GUI via `File` ->`Open wallet`. \ No newline at end of file
diff --git a/doc/release-notes.md b/doc/release-notes.md
index dc28ccb9ed..6815408ca7 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,5 +1,3 @@
-# Release notes now being edited on https://github.com/bitcoin-core/bitcoin-devwiki/wiki/22.0-Release-Notes-draft
-
*After branching off for a major version release of Bitcoin Core, use this
template to create the initial release notes draft.*
@@ -8,7 +6,7 @@ template to create the initial release notes draft.*
for the process.*
*Create the draft, named* "*version* Release Notes Draft"
-*(e.g. "0.20.0 Release Notes Draft"), as a collaborative wiki in:*
+*(e.g. "22.0 Release Notes Draft"), as a collaborative wiki in:*
https://github.com/bitcoin-core/bitcoin-devwiki/wiki/
@@ -53,141 +51,49 @@ Core should also work on most other Unix-like systems but is not as
frequently tested on them. It is not recommended to use Bitcoin Core on
unsupported systems.
-From Bitcoin Core 22.0 onwards, macOS versions earlier than 10.14 are no longer supported.
-
Notable changes
===============
P2P and network changes
-----------------------
-- This release removes support for Tor version 2 hidden services in favor of Tor
- v3 only, as the Tor network [dropped support for Tor
- v2](https://blog.torproject.org/v2-deprecation-timeline) with the release of
- Tor version 0.4.6. Henceforth, Bitcoin Core ignores Tor v2 addresses; it
- neither rumors them over the network to other peers, nor stores them in memory
- or to `peers.dat`. (#22050)
-
-- Added NAT-PMP port mapping support via
- [`libnatpmp`](https://miniupnp.tuxfamily.org/libnatpmp.html). (#18077)
+- A bitcoind node will no longer rumour addresses to inbound peers by default.
+ They will become eligible for address gossip after sending an ADDR, ADDRV2,
+ or GETADDR message. (#21528)
Updated RPCs
------------
-- Due to [BIP 350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki)
- being implemented, behavior for all RPCs that accept addresses is changed when
- a native witness version 1 (or higher) is passed. These now require a Bech32m
- encoding instead of a Bech32 one, and Bech32m encoding will be used for such
- addresses in RPC output as well. No version 1 addresses should be created
- for mainnet until consensus rules are adopted that give them meaning
- (e.g. through [BIP 341](https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki)).
- Once that happens, Bech32m is expected to be used for them, so this shouldn't
- affect any production systems, but may be observed on other networks where such
- addresses already have meaning (like signet). (#20861)
-
-- The `getpeerinfo` RPC returns two new boolean fields, `bip152_hb_to` and
- `bip152_hb_from`, that respectively indicate whether we selected a peer to be
- in compact blocks high-bandwidth mode or whether a peer selected us as a
- compact blocks high-bandwidth peer. High-bandwidth peers send new block
- announcements via a `cmpctblock` message rather than the usual inv/headers
- announcements. See BIP 152 for more details. (#19776)
-
-- `getpeerinfo` no longer returns the following fields: `addnode`, `banscore`,
- and `whitelisted`, which were previously deprecated in 0.21. Instead of
- `addnode`, the `connection_type` field returns manual. Instead of
- `whitelisted`, the `permissions` field indicates if the peer has special
- privileges. The `banscore` field has simply been removed. (#20755)
-
-- The following RPCs: `gettxout`, `getrawtransaction`, `decoderawtransaction`,
- `decodescript`, `gettransaction`, and REST endpoints: `/rest/tx`,
- `/rest/getutxos`, `/rest/block` deprecated the following fields (which are no
- longer returned in the responses by default): `addresses`, `reqSigs`.
- The `-deprecatedrpc=addresses` flag must be passed for these fields to be
- included in the RPC response. This flag/option will be available only for this major release, after which
- the deprecation will be removed entirely. Note that these fields are attributes of
- the `scriptPubKey` object returned in the RPC response. However, in the response
- of `decodescript` these fields are top-level attributes, and included again as attributes
- of the `scriptPubKey` object. (#20286)
-
-- When creating a hex-encoded bitcoin transaction using the `bitcoin-tx` utility
- with the `-json` option set, the following fields: `addresses`, `reqSigs` are no longer
- returned in the tx output of the response. (#20286)
-
-- The `listbanned` RPC now returns two new numeric fields: `ban_duration` and `time_remaining`.
- Respectively, these new fields indicate the duration of a ban and the time remaining until a ban expires,
- both in seconds. Additionally, the `ban_created` field is repositioned to come before `banned_until`. (#21602)
-
-- The `getnodeaddresses` RPC now returns a "network" field indicating the
- network type (ipv4, ipv6, onion, or i2p) for each address. (#21594)
-
-- `getnodeaddresses` now also accepts a "network" argument (ipv4, ipv6, onion,
- or i2p) to return only addresses of the specified network. (#21843)
-
-- The `testmempoolaccept` RPC now accepts multiple transactions (still experimental at the moment,
- API may be unstable). This is intended for testing transaction packages with dependency
- relationships; it is not recommended for batch-validating independent transactions. In addition to
- mempool policy, package policies apply: the list cannot contain more than 25 transactions or have a
- total size exceeding 101K virtual bytes, and cannot conflict with (spend the same inputs as) each other or
- the mempool, even if it would be a valid BIP125 replace-by-fee. There are some known limitations to
- the accuracy of the test accept: it's possible for `testmempoolaccept` to return "allowed"=True for a
- group of transactions, but "too-long-mempool-chain" if they are actually submitted. (#20833)
-
-- `addmultisigaddress` and `createmultisig` now support up to 20 keys for
- Segwit addresses. (#20867)
-
-Changes to Wallet or GUI related RPCs can be found in the GUI or Wallet section below.
-
New RPCs
--------
Build System
------------
+Files
+-----
+
+* On startup, the list of banned hosts and networks (via `setban` RPC) in
+ `banlist.dat` is ignored and only `banlist.json` is considered. Bitcoin Core
+ version 22.x is the only version that can read `banlist.dat` and also write
+ it to `banlist.json`. If `banlist.json` already exists, version 22.x will not
+ try to translate the `banlist.dat` into json. After an upgrade, `listbanned`
+ can be used to double check the parsed entries. (#22570)
+
New settings
------------
-- The `-natpmp` option has been added to use NAT-PMP to map the listening port.
- If both UPnP and NAT-PMP are enabled, a successful allocation from UPnP
- prevails over one from NAT-PMP. (#18077)
-
Updated settings
----------------
-Changes to Wallet or GUI related settings can be found in the GUI or Wallet section below.
-
-- Passing an invalid `-rpcauth` argument now cause bitcoind to fail to start. (#20461)
-
Tools and Utilities
-------------------
-- A new CLI `-addrinfo` command returns the number of addresses known to the
- node per network type (including Tor v2 versus v3) and total. This can be
- useful to see if the node knows enough addresses in a network to use options
- like `-onlynet=<network>` or to upgrade to this release of Bitcoin Core 22.0
- that supports Tor v3 only. (#21595)
-
-- A new `-rpcwaittimeout` argument to `bitcoin-cli` sets the timeout
- in seconds to use with `-rpcwait`. If the timeout expires,
- `bitcoin-cli` will report a failure. (#21056)
+- Update `-getinfo` to return data in a user-friendly format that also reduces vertical space. (#21832)
Wallet
------
-- A new `listdescriptors` RPC is available to inspect the contents of descriptor-enabled wallets.
- The RPC returns public versions of all imported descriptors, including their timestamp and flags.
- For ranged descriptors, it also returns the range boundaries and the next index to generate addresses from. (#20226)
-
-- The `bumpfee` RPC is not available with wallets that have private keys
- disabled. `psbtbumpfee` can be used instead. (#20891)
-
-- The `fundrawtransaction`, `send` and `walletcreatefundedpsbt` RPCs now support an `include_unsafe` option
- that when `true` allows using unsafe inputs to fund the transaction.
- Note that the resulting transaction may become invalid if one of the unsafe inputs disappears.
- If that happens, the transaction must be funded with different inputs and republished. (#21359)
-
-- We now support up to 20 keys in `multi()` and `sortedmulti()` descriptors
- under `wsh()`. (#20867)
-
GUI changes
-----------
@@ -197,22 +103,17 @@ Low-level changes
RPC
---
-- The RPC server can process a limited number of simultaneous RPC requests.
- Previously, if this limit was exceeded, the RPC server would respond with
- [status code 500 (`HTTP_INTERNAL_SERVER_ERROR`)](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes#5xx_server_errors).
- Now it returns status code 503 (`HTTP_SERVICE_UNAVAILABLE`). (#18335)
-
-- Error codes have been updated to be more accurate for the following error cases (#18466):
- - `signmessage` now returns RPC_INVALID_ADDRESS_OR_KEY (-5) if the
- passed address is invalid. Previously returned RPC_TYPE_ERROR (-3).
- - `verifymessage` now returns RPC_INVALID_ADDRESS_OR_KEY (-5) if the
- passed address is invalid. Previously returned RPC_TYPE_ERROR (-3).
- - `verifymessage` now returns RPC_TYPE_ERROR (-3) if the passed signature
- is malformed. Previously returned RPC_INVALID_ADDRESS_OR_KEY (-5).
+- `getblockchaininfo` now returns a new `time` field, that provides the chain tip time. (#22407)
Tests
-----
+- For the `regtest` network the activation heights of several softforks were
+ changed.
+ * BIP 34 (blockheight in coinbase) from 500 to 2 (#16333)
+ * BIP 66 (DERSIG) from 1251 to 102 (#22632)
+ * BIP 65 (CLTV) from 1351 to 111 (#21862)
+
Credits
=======
diff --git a/doc/release-process.md b/doc/release-process.md
index 3ead1181b9..3e4748b742 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -37,15 +37,19 @@ Release Process
- This update should be reviewed with a reindex-chainstate with assumevalid=0 to catch any defect
that causes rejection of blocks in the past history.
- Clear the release notes and move them to the wiki (see "Write the release notes" below).
-
-#### After branch-off (on master)
-
-- Update the version of `contrib/gitian-descriptors/*.yml`.
+- Translations on Transifex
+ - Create [a new resource](https://www.transifex.com/bitcoin/bitcoin/content/) named after the major version with the slug `[bitcoin.qt-translation-<RRR>x]`, where `RRR` is the major branch number padded with zeros. Use `src/qt/locale/bitcoin_en.xlf` to create it.
+ - In the project workflow settings, ensure that [Translation Memory Fill-up](https://docs.transifex.com/translation-memory/enabling-autofill) is enabled and that [Translation Memory Context Matching](https://docs.transifex.com/translation-memory/translation-memory-with-context) is disabled.
+ - Update the Transifex slug in [`.tx/config`](/.tx/config) to the slug of the resource created in the first step. This identifies which resource the translations will be synchronized from.
+ - Make an announcement that translators can start translating for the new version. You can use one of the [previous announcements](https://www.transifex.com/bitcoin/bitcoin/announcements/) as a template.
+ - Change the auto-update URL for the resource to `master`, e.g. `https://raw.githubusercontent.com/bitcoin/bitcoin/master/src/qt/locale/bitcoin_en.xlf`. (Do this only after the previous steps, to prevent an auto-update from interfering.)
#### After branch-off (on the major release branch)
- Update the versions.
- Create a pinned meta-issue for testing the release candidate (see [this issue](https://github.com/bitcoin/bitcoin/issues/17079) for an example) and provide a link to it in the release announcements where useful.
+- Translations on Transifex
+ - Change the auto-update URL for the new major version's resource away from `master` and to the branch, e.g. `https://raw.githubusercontent.com/bitcoin/bitcoin/<branch>/src/qt/locale/bitcoin_en.xlf`. Do not forget this or it will keep tracking the translations on master instead, drifting away from the specific major release.
#### Before final release
@@ -64,14 +68,14 @@ This will perform a few last-minute consistency checks in the build system files
### First time / New builders
-If you're using the automated script (found in [contrib/gitian-build.py](/contrib/gitian-build.py)), then at this point you should run it with the "--setup" command. Otherwise ignore this.
+Install Guix using one of the installation methods detailed in
+[contrib/guix/INSTALL.md](/contrib/guix/INSTALL.md).
Check out the source code in the following directory hierarchy.
cd /path/to/your/toplevel/build
- git clone https://github.com/bitcoin-core/gitian.sigs.git
+ git clone https://github.com/bitcoin-core/guix.sigs.git
git clone https://github.com/bitcoin-core/bitcoin-detached-sigs.git
- git clone https://github.com/devrandom/gitian-builder.git
git clone https://github.com/bitcoin/bitcoin.git
### Write the release notes
@@ -86,110 +90,56 @@ Generate list of authors:
git log --format='- %aN' v(current version, e.g. 0.20.0)..v(new version, e.g. 0.20.1) | sort -fiu
-### Setup and perform Gitian builds
-
-If you're using the automated script (found in [contrib/gitian-build.py](/contrib/gitian-build.py)), then at this point you should run it with the "--build" command. Otherwise ignore this.
-
-Setup Gitian descriptors:
-
- pushd ./bitcoin
- export SIGNER="(your Gitian key, ie bluematt, sipa, etc)"
- export VERSION=(new version, e.g. 0.20.0)
- git fetch
- git checkout v${VERSION}
- popd
-
-Ensure your gitian.sigs are up-to-date if you wish to gverify your builds against other Gitian signatures.
-
- pushd ./gitian.sigs
- git pull
- popd
-
-Ensure gitian-builder is up-to-date:
-
- pushd ./gitian-builder
- git pull
- popd
-
-### Fetch and create inputs: (first time, or when dependency versions change)
-
- pushd ./gitian-builder
- mkdir -p inputs
- wget -O inputs/osslsigncode-2.0.tar.gz https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz
- echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c
- popd
-
-Create the macOS SDK tarball, see the [macdeploy instructions](/contrib/macdeploy/README.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory.
-
-### Optional: Seed the Gitian sources cache and offline git repositories
-
-NOTE: Gitian is sometimes unable to download files. If you have errors, try the step below.
-
-By default, Gitian will fetch source files as needed. To cache them ahead of time, make sure you have checked out the tag you want to build in bitcoin, then:
-
- pushd ./gitian-builder
- make -C ../bitcoin/depends download SOURCES_PATH=`pwd`/cache/common
- popd
-
-Only missing files will be fetched, so this is safe to re-run for each build.
-
-NOTE: Offline builds must use the --url flag to ensure Gitian fetches only from local URLs. For example:
+### Setup and perform Guix builds
- pushd ./gitian-builder
- ./bin/gbuild --url bitcoin=/path/to/bitcoin,signature=/path/to/sigs {rest of arguments}
- popd
+Checkout the Bitcoin Core version you'd like to build:
-The gbuild invocations below <b>DO NOT DO THIS</b> by default.
-
-### Build and sign Bitcoin Core for Linux, Windows, and macOS:
+```sh
+pushd ./bitcoin
+SIGNER='(your builder key, ie bluematt, sipa, etc)'
+VERSION='(new version without v-prefix, e.g. 0.20.0)'
+git fetch "v${VERSION}"
+git checkout "v${VERSION}"
+popd
+```
- pushd ./gitian-builder
- ./bin/gbuild --num-make 2 --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
- ./bin/gsign --signer "$SIGNER" --release ${VERSION}-linux --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
- mv build/out/bitcoin-*.tar.gz build/out/src/bitcoin-*.tar.gz ../
+Ensure your guix.sigs are up-to-date if you wish to `guix-verify` your builds
+against other `guix-attest` signatures.
- ./bin/gbuild --num-make 2 --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
- ./bin/gsign --signer "$SIGNER" --release ${VERSION}-win-unsigned --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
- mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz
- mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe ../
+```sh
+git -C ./guix.sigs pull
+```
- ./bin/gbuild --num-make 2 --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
- ./bin/gsign --signer "$SIGNER" --release ${VERSION}-osx-unsigned --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
- mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz
- mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg ../
- popd
+### Create the macOS SDK tarball: (first time, or when SDK version changes)
-Build output expected:
+Create the macOS SDK tarball, see the [macdeploy
+instructions](/contrib/macdeploy/README.md#deterministic-macos-dmg-notes) for
+details.
- 1. source tarball (`bitcoin-${VERSION}.tar.gz`)
- 2. linux 32-bit and 64-bit dist tarballs (`bitcoin-${VERSION}-linux[32|64].tar.gz`)
- 3. windows 32-bit and 64-bit unsigned installers and dist zips (`bitcoin-${VERSION}-win[32|64]-setup-unsigned.exe`, `bitcoin-${VERSION}-win[32|64].zip`)
- 4. macOS unsigned installer and dist tarball (`bitcoin-${VERSION}-osx-unsigned.dmg`, `bitcoin-${VERSION}-osx64.tar.gz`)
- 5. Gitian signatures (in `gitian.sigs/${VERSION}-<linux|{win,osx}-unsigned>/(your Gitian key)/`)
+### Build and attest to build outputs:
-### Verify other gitian builders signatures to your own. (Optional)
+Follow the relevant Guix README.md sections:
+- [Performing a build](/contrib/guix/README.md#performing-a-build)
+- [Attesting to build outputs](/contrib/guix/README.md#attesting-to-build-outputs)
-Add other gitian builders keys to your gpg keyring, and/or refresh keys: See `../bitcoin/contrib/gitian-keys/README.md`.
+### Verify other builders' signatures to your own. (Optional)
-Verify the signatures
+Add other builders keys to your gpg keyring, and/or refresh keys: See `../bitcoin/contrib/builder-keys/README.md`.
- pushd ./gitian-builder
- ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-linux ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
- ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-win-unsigned ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
- ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-osx-unsigned ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
- popd
+Follow the relevant Guix README.md sections:
+- [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations)
### Next steps:
-Commit your signature to gitian.sigs:
+Commit your signature to guix.sigs:
- pushd gitian.sigs
- git add ${VERSION}-linux/"${SIGNER}"
- git add ${VERSION}-win-unsigned/"${SIGNER}"
- git add ${VERSION}-osx-unsigned/"${SIGNER}"
- git commit -m "Add ${VERSION} unsigned sigs for ${SIGNER}"
- git push # Assuming you can push to the gitian.sigs tree
- popd
+```sh
+pushd ./guix.sigs
+git add "${VERSION}/${SIGNER}"/noncodesigned.SHA256SUMS{,.asc}
+git commit -m "Add attestations by ${SIGNER} for ${VERSION} non-codesigned"
+git push # Assuming you can push to the guix.sigs tree
+popd
+```
Codesigner only: Create Windows/macOS detached signatures:
- Only one person handles codesigning. Everyone else should skip to the next step.
@@ -201,7 +151,7 @@ Codesigner only: Sign the macOS binary:
tar xf bitcoin-osx-unsigned.tar.gz
./detached-sig-create.sh -s "Key ID"
Enter the keychain password and authorize the signature
- Move signature-osx.tar.gz back to the gitian host
+ Move signature-osx.tar.gz back to the guix-build host
Codesigner only: Sign the windows binaries:
@@ -210,110 +160,91 @@ Codesigner only: Sign the windows binaries:
Enter the passphrase for the key when prompted
signature-win.tar.gz will be created
+Code-signer only: It is advised to test that the code signature attaches properly prior to tagging by performing the `guix-codesign` step.
+However if this is done, once the release has been tagged in the bitcoin-detached-sigs repo, the `guix-codesign` step must be performed again in order for the guix attestation to be valid when compared against the attestations of non-codesigner builds.
+
Codesigner only: Commit the detached codesign payloads:
- cd ~/bitcoin-detached-sigs
- checkout the appropriate branch for this release series
- rm -rf *
- tar xf signature-osx.tar.gz
- tar xf signature-win.tar.gz
- git add -A
- git commit -m "point to ${VERSION}"
- git tag -s v${VERSION} HEAD
- git push the current branch and new tag
+```sh
+pushd ./bitcoin-detached-sigs
+# checkout the appropriate branch for this release series
+rm -rf ./*
+tar xf signature-osx.tar.gz
+tar xf signature-win.tar.gz
+git add -A
+git commit -m "point to ${VERSION}"
+git tag -s "v${VERSION}" HEAD
+git push the current branch and new tag
+popd
+```
Non-codesigners: wait for Windows/macOS detached signatures:
- Once the Windows/macOS builds each have 3 matching signatures, they will be signed with their respective release keys.
- Detached signatures will then be committed to the [bitcoin-detached-sigs](https://github.com/bitcoin-core/bitcoin-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries.
-Create (and optionally verify) the signed macOS binary:
-
- pushd ./gitian-builder
- ./bin/gbuild -i --commit signature=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml
- ./bin/gsign --signer "$SIGNER" --release ${VERSION}-osx-signed --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml
- ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-osx-signed ../bitcoin/contrib/gitian-descriptors/gitian-osx-signer.yml
- mv build/out/bitcoin-osx-signed.dmg ../bitcoin-${VERSION}-osx.dmg
- popd
+Create (and optionally verify) the codesigned outputs:
-Create (and optionally verify) the signed Windows binaries:
-
- pushd ./gitian-builder
- ./bin/gbuild -i --commit signature=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml
- ./bin/gsign --signer "$SIGNER" --release ${VERSION}-win-signed --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml
- ./bin/gverify -v -d ../gitian.sigs/ -r ${VERSION}-win-signed ../bitcoin/contrib/gitian-descriptors/gitian-win-signer.yml
- mv build/out/bitcoin-*win64-setup.exe ../bitcoin-${VERSION}-win64-setup.exe
- popd
+- [Codesigning](/contrib/guix/README.md#codesigning)
Commit your signature for the signed macOS/Windows binaries:
- pushd gitian.sigs
- git add ${VERSION}-osx-signed/"${SIGNER}"
- git add ${VERSION}-win-signed/"${SIGNER}"
- git commit -m "Add ${SIGNER} ${VERSION} signed binaries signatures"
- git push # Assuming you can push to the gitian.sigs tree
- popd
+```sh
+pushd ./guix.sigs
+git add "${VERSION}/${SIGNER}"/all.SHA256SUMS{,.asc}
+git commit -m "Add attestations by ${SIGNER} for ${VERSION} codesigned"
+git push # Assuming you can push to the guix.sigs tree
+popd
+```
-### After 3 or more people have gitian-built and their results match:
+### After 3 or more people have guix-built and their results match:
-- Create `SHA256SUMS.asc` for the builds, and GPG-sign it:
+Combine the `all.SHA256SUMS.asc` file from all signers into `SHA256SUMS.asc`:
```bash
-sha256sum * > SHA256SUMS
+cat "$VERSION"/*/all.SHA256SUMS.asc > SHA256SUMS.asc
```
-The list of files should be:
-```
-bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz
-bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz
-bitcoin-${VERSION}-riscv64-linux-gnu.tar.gz
-bitcoin-${VERSION}-x86_64-linux-gnu.tar.gz
-bitcoin-${VERSION}-osx64.tar.gz
-bitcoin-${VERSION}-osx.dmg
-bitcoin-${VERSION}.tar.gz
-bitcoin-${VERSION}-win64-setup.exe
-bitcoin-${VERSION}-win64.zip
-```
-The `*-debug*` files generated by the gitian build contain debug symbols
-for troubleshooting by developers. It is assumed that anyone that is interested
-in debugging can run gitian to generate the files for themselves. To avoid
-end-user confusion about which file to pick, as well as save storage
-space *do not upload these to the bitcoin.org server, nor put them in the torrent*.
-- GPG-sign it, delete the unsigned file:
-```
-gpg --digest-algo sha256 --clearsign SHA256SUMS # outputs SHA256SUMS.asc
-rm SHA256SUMS
-```
-(the digest algorithm is forced to sha256 to avoid confusion of the `Hash:` header that GPG adds with the SHA256 used for the files)
-Note: check that SHA256SUMS itself doesn't end up in SHA256SUMS, which is a spurious/nonsensical entry.
+- Upload to the bitcoincore.org server (`/var/www/bin/bitcoin-core-${VERSION}/`):
+ 1. The contents of each `./bitcoin/guix-build-${VERSION}/output/${HOST}/` directory, except for
+ `*-debug*` files.
-- Upload zips and installers, as well as `SHA256SUMS.asc` from last step, to the bitcoin.org server
- into `/var/www/bin/bitcoin-core-${VERSION}`
+ Guix will output all of the results into host subdirectories, but the SHA256SUMS
+ file does not include these subdirectories. In order for downloads via torrent
+ to verify without directory structure modification, all of the uploaded files
+ need to be in the same directory as the SHA256SUMS file.
-- A `.torrent` will appear in the directory after a few minutes. Optionally help seed this torrent. To get the `magnet:` URI use:
-```bash
-transmission-show -m <torrent file>
-```
-Insert the magnet URI into the announcement sent to mailing lists. This permits
-people without access to `bitcoin.org` to download the binary distribution.
-Also put it into the `optional_magnetlink:` slot in the YAML file for
-bitcoin.org (see below for bitcoin.org update instructions).
+ The `*-debug*` files generated by the guix build contain debug symbols
+ for troubleshooting by developers. It is assumed that anyone that is
+ interested in debugging can run guix to generate the files for
+ themselves. To avoid end-user confusion about which file to pick, as well
+ as save storage space *do not upload these to the bitcoincore.org server,
+ nor put them in the torrent*.
+
+ ```sh
+ find guix-build-${VERSION}/output/ -maxdepth 2 -type f -not -name "SHA256SUMS.part" -and -not -name "*debug*" -exec scp {} user@bitcoincore.org:/var/www/bin/bitcoin-core-${VERSION} \;
+ ```
+
+ 2. The `SHA256SUMS` file
-- Update bitcoin.org version
+ 3. The `SHA256SUMS.asc` combined signature file you just created
- - First, check to see if the Bitcoin.org maintainers have prepared a
- release: https://github.com/bitcoin-dot-org/bitcoin.org/labels/Core
+- Create a torrent of the `/var/www/bin/bitcoin-core-${VERSION}` directory such
+ that at the top level there is only one file: the `bitcoin-core-${VERSION}`
+ directory containing everything else. Name the torrent
+ `bitcoin-${VERSION}.torrent` (note that there is no `-core-` in this name).
- - If they have, it will have previously failed their CI
- checks because the final release files weren't uploaded.
- Trigger a CI rebuild---if it passes, merge.
+ Optionally help seed this torrent. To get the `magnet:` URI use:
- - If they have not prepared a release, follow the Bitcoin.org release
- instructions: https://github.com/bitcoin-dot-org/bitcoin.org/blob/master/docs/adding-events-release-notes-and-alerts.md#release-notes
+ ```sh
+ transmission-show -m <torrent file>
+ ```
- - After the pull request is merged, the website will automatically show the newest version within 15 minutes, as well
- as update the OS download links.
+ Insert the magnet URI into the announcement sent to mailing lists. This permits
+ people without access to `bitcoincore.org` to download the binary distribution.
+ Also put it into the `optional_magnetlink:` slot in the YAML file for
+ bitcoincore.org.
- Update other repositories and websites for new version
@@ -351,14 +282,14 @@ bitcoin.org (see below for bitcoin.org update instructions).
- https://code.launchpad.net/~bitcoin-core/bitcoin-core-snap/+git/packaging/+ref/0.xx (Click "Create snap package")
- Name it "bitcoin-core-snap-0.xx"
- Leave owner and series as-is
- - Select architectures that are compiled via gitian
+ - Select architectures that are compiled via guix
- Leave "automatically build when branch changes" unticked
- Tick "automatically upload to store"
- Put "bitcoin-core" in the registered store package name field
- Tick the "edge" box
- Put "0.xx" in the track field
- Click "create snap package"
- - Click "Request builds" for every new release on this branch (after updating the snapcraft.yml in the branch to reflect the latest gitian results)
+ - Click "Request builds" for every new release on this branch (after updating the snapcraft.yml in the branch to reflect the latest guix results)
- Promote release on https://snapcraft.io/bitcoin-core/releases if it passes sanity checks
- This repo
diff --git a/doc/tor.md b/doc/tor.md
index 7d134b64e0..a3ec1987aa 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -57,11 +57,11 @@ outgoing connections, but more is possible.
-onlynet=onion Make outgoing connections only to .onion addresses. Incoming
connections are not affected by this option. This option can be
specified multiple times to allow multiple network types, e.g.
- ipv4, ipv6 or onion. If you use this option with values other
- than onion you *cannot* disable onion connections; outgoing onion
- connections will be enabled when you use -proxy or -onion. Use
- -noonion or -onion=0 if you want to be sure there are no outbound
- onion connections over the default proxy or your defined -proxy.
+ onlynet=ipv4, onlynet=ipv6, onlynet=onion, onlynet=i2p.
+ Warning: if you use -onlynet with values other than onion, and
+ the -onion or -proxy option is set, then outgoing onion
+ connections will still be made; use -noonion or -onion=0 to
+ disable outbound onion connections in this case.
In a typical situation, this suffices to run behind a Tor proxy:
diff --git a/doc/tracing.md b/doc/tracing.md
new file mode 100644
index 0000000000..87fc9603fe
--- /dev/null
+++ b/doc/tracing.md
@@ -0,0 +1,266 @@
+# User-space, Statically Defined Tracing (USDT) for Bitcoin Core
+
+Bitcoin Core includes statically defined tracepoints to allow for more
+observability during development, debugging, code review, and production usage.
+These tracepoints make it possible to keep track of custom statistics and
+enable detailed monitoring of otherwise hidden internals. They have
+little to no performance impact when unused.
+
+```
+eBPF and USDT Overview
+======================
+
+ ┌──────────────────┐ ┌──────────────┐
+ │ tracing script │ │ bitcoind │
+ │==================│ 2. │==============│
+ │ eBPF │ tracing │ hooks │ │
+ │ code │ logic │ into┌─┤►tracepoint 1─┼───┐ 3.
+ └────┬───┴──▲──────┘ ├─┤►tracepoint 2 │ │ pass args
+ 1. │ │ 4. │ │ ... │ │ to eBPF
+ User compiles │ │ pass data to │ └──────────────┘ │ program
+ Space & loads │ │ tracing script │ │
+ ─────────────────┼──────┼─────────────────┼────────────────────┼───
+ Kernel │ │ │ │
+ Space ┌──┬─▼──────┴─────────────────┴────────────┐ │
+ │ │ eBPF program │◄──────┘
+ │ └───────────────────────────────────────┤
+ │ eBPF kernel Virtual Machine (sandboxed) │
+ └──────────────────────────────────────────┘
+
+1. The tracing script compiles the eBPF code and loads the eBPF program into a kernel VM
+2. The eBPF program hooks into one or more tracepoints
+3. When the tracepoint is called, the arguments are passed to the eBPF program
+4. The eBPF program processes the arguments and returns data to the tracing script
+```
+
+The Linux kernel can hook into the tracepoints during runtime and pass data to
+sandboxed [eBPF] programs running in the kernel. These eBPF programs can, for
+example, collect statistics or pass data back to user-space scripts for further
+processing.
+
+[eBPF]: https://ebpf.io/
+
+The two main eBPF front-ends with support for USDT are [bpftrace] and
+[BPF Compiler Collection (BCC)]. BCC is used for complex tools and daemons and
+`bpftrace` is preferred for one-liners and shorter scripts. Examples for both can
+be found in [contrib/tracing].
+
+[bpftrace]: https://github.com/iovisor/bpftrace
+[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc
+[contrib/tracing]: ../contrib/tracing/
+
+## Tracepoint documentation
+
+The currently available tracepoints are listed here.
+
+### Context `net`
+
+#### Tracepoint `net:inbound_message`
+
+Is called when a message is received from a peer over the P2P network. Passes
+information about our peer, the connection and the message as arguments.
+
+Arguments passed:
+1. Peer ID as `int64`
+2. Peer Address and Port (IPv4, IPv6, Tor v3, I2P, ...) as `pointer to C-style String` (max. length 68 characters)
+3. Connection Type (inbound, feeler, outbound-full-relay, ...) as `pointer to C-style String` (max. length 20 characters)
+4. Message Type (inv, ping, getdata, addrv2, ...) as `pointer to C-style String` (max. length 20 characters)
+5. Message Size in bytes as `uint64`
+6. Message Bytes as `pointer to unsigned chars` (i.e. bytes)
+
+Note: The message is passed to the tracepoint in full, however, due to space
+limitations in the eBPF kernel VM it might not be possible to pass the message
+to user-space in full. Messages longer than a 32kb might be cut off. This can
+be detected in tracing scripts by comparing the message size to the length of
+the passed message.
+
+#### Tracepoint `net:outbound_message`
+
+Is called when a message is send to a peer over the P2P network. Passes
+information about our peer, the connection and the message as arguments.
+
+Arguments passed:
+1. Peer ID as `int64`
+2. Peer Address and Port (IPv4, IPv6, Tor v3, I2P, ...) as `pointer to C-style String` (max. length 68 characters)
+3. Connection Type (inbound, feeler, outbound-full-relay, ...) as `pointer to C-style String` (max. length 20 characters)
+4. Message Type (inv, ping, getdata, addrv2, ...) as `pointer to C-style String` (max. length 20 characters)
+5. Message Size in bytes as `uint64`
+6. Message Bytes as `pointer to unsigned chars` (i.e. bytes)
+
+Note: The message is passed to the tracepoint in full, however, due to space
+limitations in the eBPF kernel VM it might not be possible to pass the message
+to user-space in full. Messages longer than a 32kb might be cut off. This can
+be detected in tracing scripts by comparing the message size to the length of
+the passed message.
+
+### Context `validation`
+
+#### Tracepoint `validation:block_connected`
+
+Is called *after* a block is connected to the chain. Can, for example, be used
+to benchmark block connections together with `-reindex`.
+
+Arguments passed:
+1. Block Header Hash as `pointer to C-style String` (64 characters)
+2. Block Height as `int32`
+3. Transactions in the Block as `uint64`
+4. Inputs spend in the Block as `int32`
+5. SigOps in the Block (excluding coinbase SigOps) `uint64`
+6. Time it took to connect the Block in microseconds (µs) as `uint64`
+7. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian)
+
+Note: The 7th argument can't be accessed by bpftrace and is purposefully chosen
+to be the block header hash as bytes. See [bpftrace argument limit] for more
+details.
+
+[bpftrace argument limit]: #bpftrace-argument-limit
+
+## Adding tracepoints to Bitcoin Core
+
+To add a new tracepoint, `#include <util/trace.h>` in the compilation unit where
+the tracepoint is inserted. Use one of the `TRACEx` macros listed below
+depending on the number of arguments passed to the tracepoint. Up to 12
+arguments can be provided. The `context` and `event` specify the names by which
+the tracepoint is referred to. Please use `snake_case` and try to make sure that
+the tracepoint names make sense even without detailed knowledge of the
+implementation details. Do not forget to update the tracepoint list in this
+document.
+
+```c
+#define TRACE(context, event)
+#define TRACE1(context, event, a)
+#define TRACE2(context, event, a, b)
+#define TRACE3(context, event, a, b, c)
+#define TRACE4(context, event, a, b, c, d)
+#define TRACE5(context, event, a, b, c, d, e)
+#define TRACE6(context, event, a, b, c, d, e, f)
+#define TRACE7(context, event, a, b, c, d, e, f, g)
+#define TRACE8(context, event, a, b, c, d, e, f, g, h)
+#define TRACE9(context, event, a, b, c, d, e, f, g, h, i)
+#define TRACE10(context, event, a, b, c, d, e, f, g, h, i, j)
+#define TRACE11(context, event, a, b, c, d, e, f, g, h, i, j, k)
+#define TRACE12(context, event, a, b, c, d, e, f, g, h, i, j, k, l)
+```
+
+For example:
+
+```C++
+TRACE6(net, inbound_message,
+ pnode->GetId(),
+ pnode->m_addr_name.c_str(),
+ pnode->ConnectionTypeAsString().c_str(),
+ sanitizedType.c_str(),
+ msg.data.size(),
+ msg.data.data()
+);
+```
+
+### Guidelines and best practices
+
+#### Clear motivation and use-case
+Tracepoints need a clear motivation and use-case. The motivation should
+outweigh the impact on, for example, code readability. There is no point in
+adding tracepoints that don't end up being used.
+
+#### Provide an example
+When adding a new tracepoint, provide an example. Examples can show the use case
+and help reviewers testing that the tracepoint works as intended. The examples
+can be kept simple but should give others a starting point when working with
+the tracepoint. See existing examples in [contrib/tracing/].
+
+[contrib/tracing/]: ../contrib/tracing/
+
+#### No expensive computations for tracepoints
+Data passed to the tracepoint should be inexpensive to compute. Although the
+tracepoint itself only has overhead when enabled, the code to compute arguments
+is always run - even if the tracepoint is not used. For example, avoid
+serialization and parsing.
+
+#### Semi-stable API
+Tracepoints should have a semi-stable API. Users should be able to rely on the
+tracepoints for scripting. This means tracepoints need to be documented, and the
+argument order ideally should not change. If there is an important reason to
+change argument order, make sure to document the change and update the examples
+using the tracepoint.
+
+#### eBPF Virtual Machine limits
+Keep the eBPF Virtual Machine limits in mind. eBPF programs receiving data from
+the tracepoints run in a sandboxed Linux kernel VM. This VM has a limited stack
+size of 512 bytes. Check if it makes sense to pass larger amounts of data, for
+example, with a tracing script that can handle the passed data.
+
+#### `bpftrace` argument limit
+While tracepoints can have up to 12 arguments, bpftrace scripts currently only
+support reading from the first six arguments (`arg0` till `arg5`) on `x86_64`.
+bpftrace currently lacks real support for handling and printing binary data,
+like block header hashes and txids. When a tracepoint passes more than six
+arguments, then string and integer arguments should preferably be placed in the
+first six argument fields. Binary data can be placed in later arguments. The BCC
+supports reading from all 12 arguments.
+
+#### Strings as C-style String
+Generally, strings should be passed into the `TRACEx` macros as pointers to
+C-style strings (a null-terminated sequence of characters). For C++
+`std::strings`, [`c_str()`] can be used. It's recommended to document the
+maximum expected string size if known.
+
+
+[`c_str()`]: https://www.cplusplus.com/reference/string/string/c_str/
+
+
+## Listing available tracepoints
+
+Multiple tools can list the available tracepoints in a `bitcoind` binary with
+USDT support.
+
+### GDB - GNU Project Debugger
+
+To list probes in Bitcoin Core, use `info probes` in `gdb`:
+
+```
+$ gdb ./src/bitcoind
+…
+(gdb) info probes
+Type Provider Name Where Semaphore Object
+stap net inbound_message 0x000000000014419e /src/bitcoind
+stap net outbound_message 0x0000000000107c05 /src/bitcoind
+stap validation block_connected 0x00000000002fb10c /src/bitcoind
+…
+```
+
+### With `readelf`
+
+The `readelf` tool can be used to display the USDT tracepoints in Bitcoin Core.
+Look for the notes with the description `NT_STAPSDT`.
+
+```
+$ readelf -n ./src/bitcoind | grep NT_STAPSDT -A 4 -B 2
+Displaying notes found in: .note.stapsdt
+ Owner Data size Description
+ stapsdt 0x0000005d NT_STAPSDT (SystemTap probe descriptors)
+ Provider: net
+ Name: outbound_message
+ Location: 0x0000000000107c05, Base: 0x0000000000579c90, Semaphore: 0x0000000000000000
+ Arguments: -8@%r12 8@%rbx 8@%rdi 8@192(%rsp) 8@%rax 8@%rdx
+…
+```
+
+### With `tplist`
+
+The `tplist` tool is provided by BCC (see [Installing BCC]). It displays kernel
+tracepoints or USDT probes and their formats (for more information, see the
+[`tplist` usage demonstration]). There are slight binary naming differences
+between distributions. For example, on
+[Ubuntu the binary is called `tplist-bpfcc`][ubuntu binary].
+
+[Installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md
+[`tplist` usage demonstration]: https://github.com/iovisor/bcc/blob/master/tools/tplist_example.txt
+[ubuntu binary]: https://github.com/iovisor/bcc/blob/master/INSTALL.md#ubuntu---binary
+
+```
+$ tplist -l ./src/bitcoind -v
+b'net':b'outbound_message' [sema 0x0]
+ 1 location(s)
+ 6 argument(s)
+…
+```
diff --git a/doc/translation_process.md b/doc/translation_process.md
index f132693264..97a8fbfff2 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -63,17 +63,12 @@ username = USERNAME
The Transifex Bitcoin project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldn’t need to change anything.
### Synchronising translations
-To assist in updating translations, a helper script is available in the [maintainer-tools repo](https://github.com/bitcoin-core/bitcoin-maintainer-tools).
-1. `python3 ../bitcoin-maintainer-tools/update-translations.py`
-2. `git add` new translations from `src/qt/locale/`
-3. Update `src/qt/bitcoin_locale.qrc` manually or via
-```bash
-git ls-files src/qt/locale/*ts|xargs -n1 basename|sed 's/\(bitcoin_\(.*\)\).ts/ <file alias="\2">locale\/\1.qm<\/file>/'
+To assist in updating translations, a helper script is available in the [maintainer-tools repo](https://github.com/bitcoin-core/bitcoin-maintainer-tools). To use it and commit the result, simply do:
+
```
-4. Update `src/Makefile.qt_locale.include` manually or via
-```bash
-git ls-files src/qt/locale/*ts|xargs -n1 basename|sed 's/\(bitcoin_\(.*\)\).ts/ qt\/locale\/\1.ts \\/'
+python3 ../bitcoin-maintainer-tools/update-translations.py
+git commit -a
```
**Do not directly download translations** one by one from the Transifex website, as we do a few post-processing steps before committing the translations.
diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py
index b14c80171e..c6d9b652b8 100755
--- a/share/rpcauth/rpcauth.py
+++ b/share/rpcauth/rpcauth.py
@@ -5,7 +5,6 @@
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
-from binascii import hexlify
from getpass import getpass
from os import urandom
@@ -13,7 +12,7 @@ import hmac
def generate_salt(size):
"""Create size byte hex salt"""
- return hexlify(urandom(size)).decode()
+ return urandom(size).hex()
def generate_password():
"""Create 32 byte b64 password"""
diff --git a/src/.clang-tidy b/src/.clang-tidy
new file mode 100644
index 0000000000..27616ad072
--- /dev/null
+++ b/src/.clang-tidy
@@ -0,0 +1,2 @@
+Checks: '-*,bugprone-argument-comment'
+WarningsAsErrors: bugprone-argument-comment
diff --git a/src/Makefile.am b/src/Makefile.am
index 37ba5ad75b..6f8245de8a 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -3,7 +3,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Pattern rule to print variables, e.g. make print-top_srcdir
-print-%:
+print-%: FORCE
@echo '$*'='$($*)'
DIST_SUBDIRS = secp256k1 univalue
@@ -548,6 +548,7 @@ libbitcoin_common_a_SOURCES = \
key.cpp \
key_io.cpp \
merkleblock.cpp \
+ net_types.cpp \
netaddress.cpp \
netbase.cpp \
net_permissions.cpp \
@@ -814,23 +815,23 @@ clean-local:
check-symbols: $(bin_PROGRAMS)
if TARGET_DARWIN
@echo "Checking macOS dynamic libraries..."
- $(AM_V_at) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
endif
if TARGET_WINDOWS
@echo "Checking Windows dynamic libraries..."
- $(AM_V_at) OBJDUMP=$(OBJDUMP) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
endif
-if GLIBC_BACK_COMPAT
+if TARGET_LINUX
@echo "Checking glibc back compat..."
- $(AM_V_at) CPPFILT=$(CPPFILT) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
+ $(AM_V_at) CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
endif
check-security: $(bin_PROGRAMS)
if HARDEN
@echo "Checking binary security..."
- $(AM_V_at) OBJDUMP=$(OBJDUMP) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/security-check.py $(bin_PROGRAMS)
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/security-check.py $(bin_PROGRAMS)
endif
libbitcoin_ipc_mpgen_input = \
@@ -842,9 +843,11 @@ EXTRA_DIST += $(libbitcoin_ipc_mpgen_input)
if BUILD_MULTIPROCESS
LIBBITCOIN_IPC=libbitcoin_ipc.a
libbitcoin_ipc_a_SOURCES = \
+ ipc/capnp/context.h \
ipc/capnp/init-types.h \
ipc/capnp/protocol.cpp \
ipc/capnp/protocol.h \
+ ipc/context.h \
ipc/exception.h \
ipc/interfaces.cpp \
ipc/process.cpp \
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 56b8ca8ce6..2a8e4a0aac 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -35,6 +35,7 @@ bench_bench_bitcoin_SOURCES = \
bench/mempool_stress.cpp \
bench/nanobench.h \
bench/nanobench.cpp \
+ bench/peer_eviction.cpp \
bench/rpc_blockchain.cpp \
bench/rpc_mempool.cpp \
bench/util_time.cpp \
diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include
index 8a28f4f249..ce1f93f11f 100644
--- a/src/Makefile.leveldb.include
+++ b/src/Makefile.leveldb.include
@@ -22,6 +22,7 @@ LEVELDB_CPPFLAGS_INT += -DHAVE_SNAPPY=0 -DHAVE_CRC32C=1
LEVELDB_CPPFLAGS_INT += -DHAVE_FDATASYNC=@HAVE_FDATASYNC@
LEVELDB_CPPFLAGS_INT += -DHAVE_FULLFSYNC=@HAVE_FULLFSYNC@
LEVELDB_CPPFLAGS_INT += -DHAVE_O_CLOEXEC=@HAVE_O_CLOEXEC@
+LEVELDB_CPPFLAGS_INT += -DFALLTHROUGH_INTENDED=[[fallthrough]]
if WORDS_BIGENDIAN
LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=1
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index a1821cafe3..6f450bbc74 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -40,9 +40,9 @@ QT_MOC_CPP = \
qt/moc_askpassphrasedialog.cpp \
qt/moc_createwalletdialog.cpp \
qt/moc_bantablemodel.cpp \
+ qt/moc_bitcoin.cpp \
qt/moc_bitcoinaddressvalidator.cpp \
qt/moc_bitcoinamountfield.cpp \
- qt/moc_bitcoin.cpp \
qt/moc_bitcoingui.cpp \
qt/moc_bitcoinunits.cpp \
qt/moc_clientmodel.cpp \
@@ -51,6 +51,7 @@ QT_MOC_CPP = \
qt/moc_csvmodelwriter.cpp \
qt/moc_editaddressdialog.cpp \
qt/moc_guiutil.cpp \
+ qt/moc_initexecutor.cpp \
qt/moc_intro.cpp \
qt/moc_macdockiconhandler.cpp \
qt/moc_macnotificationhandler.cpp \
@@ -109,9 +110,9 @@ BITCOIN_QT_H = \
qt/addresstablemodel.h \
qt/askpassphrasedialog.h \
qt/bantablemodel.h \
+ qt/bitcoin.h \
qt/bitcoinaddressvalidator.h \
qt/bitcoinamountfield.h \
- qt/bitcoin.h \
qt/bitcoingui.h \
qt/bitcoinunits.h \
qt/clientmodel.h \
@@ -122,6 +123,7 @@ BITCOIN_QT_H = \
qt/editaddressdialog.h \
qt/guiconstants.h \
qt/guiutil.h \
+ qt/initexecutor.h \
qt/intro.h \
qt/macdockiconhandler.h \
qt/macnotificationhandler.h \
@@ -227,6 +229,7 @@ BITCOIN_QT_BASE_CPP = \
qt/clientmodel.cpp \
qt/csvmodelwriter.cpp \
qt/guiutil.cpp \
+ qt/initexecutor.cpp \
qt/intro.cpp \
qt/modaloverlay.cpp \
qt/networkstyle.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index fc2fd80166..a85a359960 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -152,6 +152,7 @@ BITCOIN_TESTS =\
if ENABLE_WALLET
BITCOIN_TESTS += \
wallet/test/psbt_wallet_tests.cpp \
+ wallet/test/spend_tests.cpp \
wallet/test/wallet_tests.cpp \
wallet/test/walletdb_tests.cpp \
wallet/test/wallet_crypto_tests.cpp \
@@ -170,6 +171,8 @@ endif
BITCOIN_TEST_SUITE += \
+ wallet/test/util.cpp \
+ wallet/test/util.h \
wallet/test/wallet_test_fixture.cpp \
wallet/test/wallet_test_fixture.h \
wallet/test/init_test_fixture.cpp \
@@ -201,10 +204,9 @@ if ENABLE_FUZZ_BINARY
test_fuzz_fuzz_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_fuzz_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_fuzz_LDADD = $(FUZZ_SUITE_LD_COMMON)
-test_fuzz_fuzz_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON)
+test_fuzz_fuzz_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) $(RUNTIME_LDFLAGS)
test_fuzz_fuzz_SOURCES = \
test/fuzz/addition_overflow.cpp \
- test/fuzz/addrdb.cpp \
test/fuzz/addrman.cpp \
test/fuzz/asmap.cpp \
test/fuzz/asmap_direct.cpp \
@@ -335,8 +337,8 @@ bitcoin_test_clean : FORCE
check-local: $(BITCOIN_TESTS:.cpp=.cpp.test)
if BUILD_BITCOIN_TX
- @echo "Running test/util/bitcoin-util-test.py..."
- $(PYTHON) $(top_builddir)/test/util/bitcoin-util-test.py
+ @echo "Running test/util/test_runner.py..."
+ $(PYTHON) $(top_builddir)/test/util/test_runner.py
endif
@echo "Running test/util/rpcauth-test.py..."
$(PYTHON) $(top_builddir)/test/util/rpcauth-test.py
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index b8fd019bab..345dbdfb16 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -19,64 +19,7 @@
#include <util/settings.h>
#include <util/system.h>
-CBanEntry::CBanEntry(const UniValue& json)
- : nVersion(json["version"].get_int()), nCreateTime(json["ban_created"].get_int64()),
- nBanUntil(json["banned_until"].get_int64())
-{
-}
-
-UniValue CBanEntry::ToJson() const
-{
- UniValue json(UniValue::VOBJ);
- json.pushKV("version", nVersion);
- json.pushKV("ban_created", nCreateTime);
- json.pushKV("banned_until", nBanUntil);
- return json;
-}
-
namespace {
-
-static const char* BANMAN_JSON_ADDR_KEY = "address";
-
-/**
- * Convert a `banmap_t` object to a JSON array.
- * @param[in] bans Bans list to convert.
- * @return a JSON array, similar to the one returned by the `listbanned` RPC. Suitable for
- * passing to `BanMapFromJson()`.
- */
-UniValue BanMapToJson(const banmap_t& bans)
-{
- UniValue bans_json(UniValue::VARR);
- for (const auto& it : bans) {
- const auto& address = it.first;
- const auto& ban_entry = it.second;
- UniValue j = ban_entry.ToJson();
- j.pushKV(BANMAN_JSON_ADDR_KEY, address.ToString());
- bans_json.push_back(j);
- }
- return bans_json;
-}
-
-/**
- * Convert a JSON array to a `banmap_t` object.
- * @param[in] bans_json JSON to convert, must be as returned by `BanMapToJson()`.
- * @param[out] bans Bans list to create from the JSON.
- * @throws std::runtime_error if the JSON does not have the expected fields or they contain
- * unparsable values.
- */
-void BanMapFromJson(const UniValue& bans_json, banmap_t& bans)
-{
- for (const auto& ban_entry_json : bans_json.getValues()) {
- CSubNet subnet;
- const auto& subnet_str = ban_entry_json[BANMAN_JSON_ADDR_KEY].get_str();
- if (!LookupSubNet(subnet_str, subnet)) {
- throw std::runtime_error(
- strprintf("Cannot parse banned address or subnet: %s", subnet_str));
- }
- bans.insert_or_assign(subnet, CBanEntry{ban_entry_json});
- }
-}
-
template <typename Stream, typename Data>
bool SerializeDB(Stream& stream, const Data& data)
{
@@ -197,17 +140,16 @@ bool CBanDB::Write(const banmap_t& banSet)
return false;
}
-bool CBanDB::Read(banmap_t& banSet, bool& dirty)
+bool CBanDB::Read(banmap_t& banSet)
{
- // If the JSON banlist does not exist, then try to read the non-upgraded banlist.dat.
+ if (fs::exists(m_banlist_dat)) {
+ LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", m_banlist_dat);
+ }
+ // If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
- // If this succeeds then we need to flush to disk in order to create the JSON banlist.
- dirty = true;
- return DeserializeFileDB(m_banlist_dat, banSet, CLIENT_VERSION);
+ return false;
}
- dirty = false;
-
std::map<std::string, util::SettingsValue> settings;
std::vector<std::string> errors;
@@ -245,12 +187,7 @@ bool CAddrDB::Read(CAddrMan& addr)
bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers)
{
- bool ret = DeserializeDB(ssPeers, addr, false);
- if (!ret) {
- // Ensure addrman is left in a clean state
- addr.Clear();
- }
- return ret;
+ return DeserializeDB(ssPeers, addr, false);
}
void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors)
diff --git a/src/addrdb.h b/src/addrdb.h
index 399103c991..26b1c5880f 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -8,62 +8,14 @@
#include <fs.h>
#include <net_types.h> // For banmap_t
-#include <serialize.h>
#include <univalue.h>
-#include <string>
#include <vector>
class CAddress;
class CAddrMan;
class CDataStream;
-class CBanEntry
-{
-public:
- static const int CURRENT_VERSION=1;
- int nVersion;
- int64_t nCreateTime;
- int64_t nBanUntil;
-
- CBanEntry()
- {
- SetNull();
- }
-
- explicit CBanEntry(int64_t nCreateTimeIn)
- {
- SetNull();
- nCreateTime = nCreateTimeIn;
- }
-
- /**
- * Create a ban entry from JSON.
- * @param[in] json A JSON representation of a ban entry, as created by `ToJson()`.
- * @throw std::runtime_error if the JSON does not have the expected fields.
- */
- explicit CBanEntry(const UniValue& json);
-
- SERIALIZE_METHODS(CBanEntry, obj)
- {
- uint8_t ban_reason = 2; //! For backward compatibility
- READWRITE(obj.nVersion, obj.nCreateTime, obj.nBanUntil, ban_reason);
- }
-
- void SetNull()
- {
- nVersion = CBanEntry::CURRENT_VERSION;
- nCreateTime = 0;
- nBanUntil = 0;
- }
-
- /**
- * Generate a JSON representation of this ban entry.
- * @return JSON suitable for passing to the `CBanEntry(const UniValue&)` constructor.
- */
- UniValue ToJson() const;
-};
-
/** Access to the (IP) address database (peers.dat) */
class CAddrDB
{
@@ -76,7 +28,7 @@ public:
static bool Read(CAddrMan& addr, CDataStream& ssPeers);
};
-/** Access to the banlist databases (banlist.json and banlist.dat) */
+/** Access to the banlist database (banlist.json) */
class CBanDB
{
private:
@@ -95,11 +47,9 @@ public:
* Read the banlist from disk.
* @param[out] banSet The loaded list. Set if `true` is returned, otherwise it is left
* in an undefined state.
- * @param[out] dirty Indicates whether the loaded list needs flushing to disk. Set if
- * `true` is returned, otherwise it is left in an undefined state.
* @return true on success
*/
- bool Read(banmap_t& banSet, bool& dirty);
+ bool Read(banmap_t& banSet);
};
/**
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 8f702b5a8c..717cadaedf 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -15,6 +15,27 @@
#include <unordered_map>
#include <unordered_set>
+/** Over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread */
+static constexpr uint32_t ADDRMAN_TRIED_BUCKETS_PER_GROUP{8};
+/** Over how many buckets entries with new addresses originating from a single group are spread */
+static constexpr uint32_t ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP{64};
+/** Maximum number of times an address can be added to the new table */
+static constexpr int32_t ADDRMAN_NEW_BUCKETS_PER_ADDRESS{8};
+/** How old addresses can maximally be */
+static constexpr int64_t ADDRMAN_HORIZON_DAYS{30};
+/** After how many failed attempts we give up on a new node */
+static constexpr int32_t ADDRMAN_RETRIES{3};
+/** How many successive failures are allowed ... */
+static constexpr int32_t ADDRMAN_MAX_FAILURES{10};
+/** ... in at least this many days */
+static constexpr int64_t ADDRMAN_MIN_FAIL_DAYS{7};
+/** How recent a successful connection should be before we allow an address to be evicted from tried */
+static constexpr int64_t ADDRMAN_REPLACEMENT_HOURS{4};
+/** The maximum number of tried addr collisions to store */
+static constexpr size_t ADDRMAN_SET_TRIED_COLLISION_SIZE{10};
+/** The maximum time we'll spend trying to resolve a tried table collision, in seconds */
+static constexpr int64_t ADDRMAN_TEST_WINDOW{40*60}; // 40 minutes
+
int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const
{
uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetKey()).GetCheapHash();
@@ -77,6 +98,303 @@ double CAddrInfo::GetChance(int64_t nNow) const
return fChance;
}
+CAddrMan::CAddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio)
+ : insecure_rand{deterministic}
+ , nKey{deterministic ? uint256{1} : insecure_rand.rand256()}
+ , m_consistency_check_ratio{consistency_check_ratio}
+ , m_asmap{std::move(asmap)}
+{
+ for (auto& bucket : vvNew) {
+ for (auto& entry : bucket) {
+ entry = -1;
+ }
+ }
+ for (auto& bucket : vvTried) {
+ for (auto& entry : bucket) {
+ entry = -1;
+ }
+ }
+}
+
+template <typename Stream>
+void CAddrMan::Serialize(Stream& s_) const
+{
+ LOCK(cs);
+
+ /**
+ * Serialized format.
+ * * format version byte (@see `Format`)
+ * * lowest compatible format version byte. This is used to help old software decide
+ * whether to parse the file. For example:
+ * * Bitcoin Core version N knows how to parse up to format=3. If a new format=4 is
+ * introduced in version N+1 that is compatible with format=3 and it is known that
+ * version N will be able to parse it, then version N+1 will write
+ * (format=4, lowest_compatible=3) in the first two bytes of the file, and so
+ * version N will still try to parse it.
+ * * Bitcoin Core version N+2 introduces a new incompatible format=5. It will write
+ * (format=5, lowest_compatible=5) and so any versions that do not know how to parse
+ * format=5 will not try to read the file.
+ * * nKey
+ * * nNew
+ * * nTried
+ * * number of "new" buckets XOR 2**30
+ * * all new addresses (total count: nNew)
+ * * all tried addresses (total count: nTried)
+ * * for each new bucket:
+ * * number of elements
+ * * for each element: index in the serialized "all new addresses"
+ * * asmap checksum
+ *
+ * 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it
+ * as incompatible. This is necessary because it did not check the version number on
+ * deserialization.
+ *
+ * vvNew, vvTried, mapInfo, mapAddr and vRandom are never encoded explicitly;
+ * they are instead reconstructed from the other information.
+ *
+ * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
+ * changes to the ADDRMAN_ parameters without breaking the on-disk structure.
+ *
+ * We don't use SERIALIZE_METHODS since the serialization and deserialization code has
+ * very little in common.
+ */
+
+ // Always serialize in the latest version (FILE_FORMAT).
+
+ OverrideStream<Stream> s(&s_, s_.GetType(), s_.GetVersion() | ADDRV2_FORMAT);
+
+ s << static_cast<uint8_t>(FILE_FORMAT);
+
+ // Increment `lowest_compatible` iff a newly introduced format is incompatible with
+ // the previous one.
+ static constexpr uint8_t lowest_compatible = Format::V3_BIP155;
+ s << static_cast<uint8_t>(INCOMPATIBILITY_BASE + lowest_compatible);
+
+ s << nKey;
+ s << nNew;
+ s << nTried;
+
+ int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
+ s << nUBuckets;
+ std::unordered_map<int, int> mapUnkIds;
+ int nIds = 0;
+ for (const auto& entry : mapInfo) {
+ mapUnkIds[entry.first] = nIds;
+ const CAddrInfo &info = entry.second;
+ if (info.nRefCount) {
+ assert(nIds != nNew); // this means nNew was wrong, oh ow
+ s << info;
+ nIds++;
+ }
+ }
+ nIds = 0;
+ for (const auto& entry : mapInfo) {
+ const CAddrInfo &info = entry.second;
+ if (info.fInTried) {
+ assert(nIds != nTried); // this means nTried was wrong, oh ow
+ s << info;
+ nIds++;
+ }
+ }
+ for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) {
+ int nSize = 0;
+ for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
+ if (vvNew[bucket][i] != -1)
+ nSize++;
+ }
+ s << nSize;
+ for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
+ if (vvNew[bucket][i] != -1) {
+ int nIndex = mapUnkIds[vvNew[bucket][i]];
+ s << nIndex;
+ }
+ }
+ }
+ // Store asmap checksum after bucket entries so that it
+ // can be ignored by older clients for backward compatibility.
+ uint256 asmap_checksum;
+ if (m_asmap.size() != 0) {
+ asmap_checksum = SerializeHash(m_asmap);
+ }
+ s << asmap_checksum;
+}
+
+template <typename Stream>
+void CAddrMan::Unserialize(Stream& s_)
+{
+ LOCK(cs);
+
+ assert(vRandom.empty());
+
+ Format format;
+ s_ >> Using<CustomUintFormatter<1>>(format);
+
+ int stream_version = s_.GetVersion();
+ if (format >= Format::V3_BIP155) {
+ // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
+ // unserialize methods know that an address in addrv2 format is coming.
+ stream_version |= ADDRV2_FORMAT;
+ }
+
+ OverrideStream<Stream> s(&s_, s_.GetType(), stream_version);
+
+ uint8_t compat;
+ s >> compat;
+ const uint8_t lowest_compatible = compat - INCOMPATIBILITY_BASE;
+ if (lowest_compatible > FILE_FORMAT) {
+ throw std::ios_base::failure(strprintf(
+ "Unsupported format of addrman database: %u. It is compatible with formats >=%u, "
+ "but the maximum supported by this version of %s is %u.",
+ uint8_t{format}, uint8_t{lowest_compatible}, PACKAGE_NAME, uint8_t{FILE_FORMAT}));
+ }
+
+ s >> nKey;
+ s >> nNew;
+ s >> nTried;
+ int nUBuckets = 0;
+ s >> nUBuckets;
+ if (format >= Format::V1_DETERMINISTIC) {
+ nUBuckets ^= (1 << 30);
+ }
+
+ if (nNew > ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE || nNew < 0) {
+ throw std::ios_base::failure(
+ strprintf("Corrupt CAddrMan serialization: nNew=%d, should be in [0, %d]",
+ nNew,
+ ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE));
+ }
+
+ if (nTried > ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE || nTried < 0) {
+ throw std::ios_base::failure(
+ strprintf("Corrupt CAddrMan serialization: nTried=%d, should be in [0, %d]",
+ nTried,
+ ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE));
+ }
+
+ // Deserialize entries from the new table.
+ for (int n = 0; n < nNew; n++) {
+ CAddrInfo &info = mapInfo[n];
+ s >> info;
+ mapAddr[info] = n;
+ info.nRandomPos = vRandom.size();
+ vRandom.push_back(n);
+ }
+ nIdCount = nNew;
+
+ // Deserialize entries from the tried table.
+ int nLost = 0;
+ for (int n = 0; n < nTried; n++) {
+ CAddrInfo info;
+ s >> info;
+ int nKBucket = info.GetTriedBucket(nKey, m_asmap);
+ int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket);
+ if (info.IsValid()
+ && vvTried[nKBucket][nKBucketPos] == -1) {
+ info.nRandomPos = vRandom.size();
+ info.fInTried = true;
+ vRandom.push_back(nIdCount);
+ mapInfo[nIdCount] = info;
+ mapAddr[info] = nIdCount;
+ vvTried[nKBucket][nKBucketPos] = nIdCount;
+ nIdCount++;
+ } else {
+ nLost++;
+ }
+ }
+ nTried -= nLost;
+
+ // Store positions in the new table buckets to apply later (if possible).
+ // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets,
+ // so we store all bucket-entry_index pairs to iterate through later.
+ std::vector<std::pair<int, int>> bucket_entries;
+
+ for (int bucket = 0; bucket < nUBuckets; ++bucket) {
+ int num_entries{0};
+ s >> num_entries;
+ for (int n = 0; n < num_entries; ++n) {
+ int entry_index{0};
+ s >> entry_index;
+ if (entry_index >= 0 && entry_index < nNew) {
+ bucket_entries.emplace_back(bucket, entry_index);
+ }
+ }
+ }
+
+ // If the bucket count and asmap checksum haven't changed, then attempt
+ // to restore the entries to the buckets/positions they were in before
+ // serialization.
+ uint256 supplied_asmap_checksum;
+ if (m_asmap.size() != 0) {
+ supplied_asmap_checksum = SerializeHash(m_asmap);
+ }
+ uint256 serialized_asmap_checksum;
+ if (format >= Format::V2_ASMAP) {
+ s >> serialized_asmap_checksum;
+ }
+ const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT &&
+ serialized_asmap_checksum == supplied_asmap_checksum};
+
+ if (!restore_bucketing) {
+ LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n");
+ }
+
+ for (auto bucket_entry : bucket_entries) {
+ int bucket{bucket_entry.first};
+ const int entry_index{bucket_entry.second};
+ CAddrInfo& info = mapInfo[entry_index];
+
+ // Don't store the entry in the new bucket if it's not a valid address for our addrman
+ if (!info.IsValid()) continue;
+
+ // The entry shouldn't appear in more than
+ // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip
+ // this bucket_entry.
+ if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue;
+
+ int bucket_position = info.GetBucketPosition(nKey, true, bucket);
+ if (restore_bucketing && vvNew[bucket][bucket_position] == -1) {
+ // Bucketing has not changed, using existing bucket positions for the new table
+ vvNew[bucket][bucket_position] = entry_index;
+ ++info.nRefCount;
+ } else {
+ // In case the new table data cannot be used (bucket count wrong or new asmap),
+ // try to give them a reference based on their primary source address.
+ bucket = info.GetNewBucket(nKey, m_asmap);
+ bucket_position = info.GetBucketPosition(nKey, true, bucket);
+ if (vvNew[bucket][bucket_position] == -1) {
+ vvNew[bucket][bucket_position] = entry_index;
+ ++info.nRefCount;
+ }
+ }
+ }
+
+ // Prune new entries with refcount 0 (as a result of collisions or invalid address).
+ int nLostUnk = 0;
+ for (auto it = mapInfo.cbegin(); it != mapInfo.cend(); ) {
+ if (it->second.fInTried == false && it->second.nRefCount == 0) {
+ const auto itCopy = it++;
+ Delete(itCopy->first);
+ ++nLostUnk;
+ } else {
+ ++it;
+ }
+ }
+ if (nLost + nLostUnk > 0) {
+ LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions or invalid addresses\n", nLostUnk, nLost);
+ }
+
+ Check();
+}
+
+// explicit instantiation
+template void CAddrMan::Serialize(CHashWriter& s) const;
+template void CAddrMan::Serialize(CAutoFile& s) const;
+template void CAddrMan::Serialize(CDataStream& s) const;
+template void CAddrMan::Unserialize(CAutoFile& s);
+template void CAddrMan::Unserialize(CHashVerifier<CAutoFile>& s);
+template void CAddrMan::Unserialize(CDataStream& s);
+template void CAddrMan::Unserialize(CHashVerifier<CDataStream>& s);
+
CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId)
{
AssertLockHeld(cs);
@@ -106,7 +424,7 @@ CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, in
return &mapInfo[nId];
}
-void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2)
+void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
{
AssertLockHeld(cs);
@@ -118,11 +436,13 @@ void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2)
int nId1 = vRandom[nRndPos1];
int nId2 = vRandom[nRndPos2];
- assert(mapInfo.count(nId1) == 1);
- assert(mapInfo.count(nId2) == 1);
+ const auto it_1{mapInfo.find(nId1)};
+ const auto it_2{mapInfo.find(nId2)};
+ assert(it_1 != mapInfo.end());
+ assert(it_2 != mapInfo.end());
- mapInfo[nId1].nRandomPos = nRndPos2;
- mapInfo[nId2].nRandomPos = nRndPos1;
+ it_1->second.nRandomPos = nRndPos2;
+ it_2->second.nRandomPos = nRndPos1;
vRandom[nRndPos1] = nId2;
vRandom[nRndPos2] = nId1;
@@ -378,7 +698,7 @@ void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime)
}
}
-CAddrInfo CAddrMan::Select_(bool newOnly)
+CAddrInfo CAddrMan::Select_(bool newOnly) const
{
AssertLockHeld(cs);
@@ -401,8 +721,9 @@ CAddrInfo CAddrMan::Select_(bool newOnly)
nKBucketPos = (nKBucketPos + insecure_rand.randbits(ADDRMAN_BUCKET_SIZE_LOG2)) % ADDRMAN_BUCKET_SIZE;
}
int nId = vvTried[nKBucket][nKBucketPos];
- assert(mapInfo.count(nId) == 1);
- CAddrInfo& info = mapInfo[nId];
+ const auto it_found{mapInfo.find(nId)};
+ assert(it_found != mapInfo.end());
+ const CAddrInfo& info{it_found->second};
if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30))
return info;
fChanceFactor *= 1.2;
@@ -418,8 +739,9 @@ CAddrInfo CAddrMan::Select_(bool newOnly)
nUBucketPos = (nUBucketPos + insecure_rand.randbits(ADDRMAN_BUCKET_SIZE_LOG2)) % ADDRMAN_BUCKET_SIZE;
}
int nId = vvNew[nUBucket][nUBucketPos];
- assert(mapInfo.count(nId) == 1);
- CAddrInfo& info = mapInfo[nId];
+ const auto it_found{mapInfo.find(nId)};
+ assert(it_found != mapInfo.end());
+ const CAddrInfo& info{it_found->second};
if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30))
return info;
fChanceFactor *= 1.2;
@@ -427,11 +749,16 @@ CAddrInfo CAddrMan::Select_(bool newOnly)
}
}
-#ifdef DEBUG_ADDRMAN
-int CAddrMan::Check_()
+int CAddrMan::Check_() const
{
AssertLockHeld(cs);
+ // Run consistency checks 1 in m_consistency_check_ratio times if enabled
+ if (m_consistency_check_ratio == 0) return 0;
+ if (insecure_rand.randrange(m_consistency_check_ratio) >= 1) return 0;
+
+ LogPrint(BCLog::ADDRMAN, "Addrman checks started: new %i, tried %i, total %u\n", nNew, nTried, vRandom.size());
+
std::unordered_set<int> setTried;
std::unordered_map<int, int> mapNew;
@@ -454,8 +781,10 @@ int CAddrMan::Check_()
return -4;
mapNew[n] = info.nRefCount;
}
- if (mapAddr[info] != n)
+ const auto it{mapAddr.find(info)};
+ if (it == mapAddr.end() || it->second != n) {
return -5;
+ }
if (info.nRandomPos < 0 || (size_t)info.nRandomPos >= vRandom.size() || vRandom[info.nRandomPos] != n)
return -14;
if (info.nLastTry < 0)
@@ -471,15 +800,18 @@ int CAddrMan::Check_()
for (int n = 0; n < ADDRMAN_TRIED_BUCKET_COUNT; n++) {
for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
- if (vvTried[n][i] != -1) {
- if (!setTried.count(vvTried[n][i]))
- return -11;
- if (mapInfo[vvTried[n][i]].GetTriedBucket(nKey, m_asmap) != n)
- return -17;
- if (mapInfo[vvTried[n][i]].GetBucketPosition(nKey, false, n) != i)
- return -18;
- setTried.erase(vvTried[n][i]);
- }
+ if (vvTried[n][i] != -1) {
+ if (!setTried.count(vvTried[n][i]))
+ return -11;
+ const auto it{mapInfo.find(vvTried[n][i])};
+ if (it == mapInfo.end() || it->second.GetTriedBucket(nKey, m_asmap) != n) {
+ return -17;
+ }
+ if (it->second.GetBucketPosition(nKey, false, n) != i) {
+ return -18;
+ }
+ setTried.erase(vvTried[n][i]);
+ }
}
}
@@ -488,8 +820,10 @@ int CAddrMan::Check_()
if (vvNew[n][i] != -1) {
if (!mapNew.count(vvNew[n][i]))
return -12;
- if (mapInfo[vvNew[n][i]].GetBucketPosition(nKey, true, n) != i)
+ const auto it{mapInfo.find(vvNew[n][i])};
+ if (it == mapInfo.end() || it->second.GetBucketPosition(nKey, true, n) != i) {
return -19;
+ }
if (--mapNew[vvNew[n][i]] == 0)
mapNew.erase(vvNew[n][i]);
}
@@ -503,11 +837,11 @@ int CAddrMan::Check_()
if (nKey.IsNull())
return -16;
+ LogPrint(BCLog::ADDRMAN, "Addrman checks completed successfully\n");
return 0;
}
-#endif
-void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network)
+void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) const
{
AssertLockHeld(cs);
@@ -527,9 +861,10 @@ void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size
int nRndPos = insecure_rand.randrange(vRandom.size() - n) + n;
SwapRandom(n, nRndPos);
- assert(mapInfo.count(vRandom[n]) == 1);
+ const auto it{mapInfo.find(vRandom[n])};
+ assert(it != mapInfo.end());
- const CAddrInfo& ai = mapInfo[vRandom[n]];
+ const CAddrInfo& ai{it->second};
// Filter by network (optional)
if (network != std::nullopt && ai.GetNetClass() != network) continue;
diff --git a/src/addrman.h b/src/addrman.h
index 665e253192..74bfe9748b 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -26,6 +26,9 @@
#include <unordered_map>
#include <vector>
+/** Default for -checkaddrman */
+static constexpr int32_t DEFAULT_ADDRMAN_CONSISTENCY_CHECKS{0};
+
/**
* Extended statistics about a CAddress
*/
@@ -55,9 +58,10 @@ private:
bool fInTried{false};
//! position in vRandom
- int nRandomPos{-1};
+ mutable int nRandomPos{-1};
friend class CAddrMan;
+ friend class CAddrManDeterministic;
public:
@@ -104,68 +108,40 @@ public:
* * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
*
* To that end:
- * * Addresses are organized into buckets.
- * * Addresses that have not yet been tried go into 1024 "new" buckets.
- * * Based on the address range (/16 for IPv4) of the source of information, 64 buckets are selected at random.
+ * * Addresses are organized into buckets that can each store up to 64 entries.
+ * * Addresses to which our node has not successfully connected go into 1024 "new" buckets.
+ * * Based on the address range (/16 for IPv4) of the source of information, or if an asmap is provided,
+ * the AS it belongs to (for IPv4/IPv6), 64 buckets are selected at random.
* * The actual bucket is chosen from one of these, based on the range in which the address itself is located.
+ * * The position in the bucket is chosen based on the full address.
* * One single address can occur in up to 8 different buckets to increase selection chances for addresses that
* are seen frequently. The chance for increasing this multiplicity decreases exponentially.
- * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
- * ones) is removed from it first.
+ * * When adding a new address to an occupied position of a bucket, it will not replace the existing entry
+ * unless that address is also stored in another bucket or it doesn't meet one of several quality criteria
+ * (see IsTerrible for exact criteria).
* * Addresses of nodes that are known to be accessible go into 256 "tried" buckets.
* * Each address range selects at random 8 of these buckets.
* * The actual bucket is chosen from one of these, based on the full address.
- * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
- * tried ones) is evicted from it, back to the "new" buckets.
+ * * When adding a new good address to an occupied position of a bucket, a FEELER connection to the
+ * old address is attempted. The old entry is only replaced and moved back to the "new" buckets if this
+ * attempt was unsuccessful.
* * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
* be observable by adversaries.
- * * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
- * consistency checks for the entire data structure.
+ * * Several indexes are kept for high performance. Setting m_consistency_check_ratio with the -checkaddrman
+ * configuration option will introduce (expensive) consistency checks for the entire data structure.
*/
-//! total number of buckets for tried addresses
-#define ADDRMAN_TRIED_BUCKET_COUNT_LOG2 8
-
-//! total number of buckets for new addresses
-#define ADDRMAN_NEW_BUCKET_COUNT_LOG2 10
-
-//! maximum allowed number of entries in buckets for new and tried addresses
-#define ADDRMAN_BUCKET_SIZE_LOG2 6
-
-//! over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread
-#define ADDRMAN_TRIED_BUCKETS_PER_GROUP 8
-
-//! over how many buckets entries with new addresses originating from a single group are spread
-#define ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP 64
-
-//! in how many buckets for entries with new addresses a single address may occur
-#define ADDRMAN_NEW_BUCKETS_PER_ADDRESS 8
-
-//! how old addresses can maximally be
-#define ADDRMAN_HORIZON_DAYS 30
-
-//! after how many failed attempts we give up on a new node
-#define ADDRMAN_RETRIES 3
+/** Total number of buckets for tried addresses */
+static constexpr int32_t ADDRMAN_TRIED_BUCKET_COUNT_LOG2{8};
+static constexpr int ADDRMAN_TRIED_BUCKET_COUNT{1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2};
-//! how many successive failures are allowed ...
-#define ADDRMAN_MAX_FAILURES 10
+/** Total number of buckets for new addresses */
+static constexpr int32_t ADDRMAN_NEW_BUCKET_COUNT_LOG2{10};
+static constexpr int ADDRMAN_NEW_BUCKET_COUNT{1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2};
-//! ... in at least this many days
-#define ADDRMAN_MIN_FAIL_DAYS 7
-
-//! how recent a successful connection should be before we allow an address to be evicted from tried
-#define ADDRMAN_REPLACEMENT_HOURS 4
-
-//! Convenience
-#define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2)
-#define ADDRMAN_NEW_BUCKET_COUNT (1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2)
-#define ADDRMAN_BUCKET_SIZE (1 << ADDRMAN_BUCKET_SIZE_LOG2)
-
-//! the maximum number of tried addr collisions to store
-#define ADDRMAN_SET_TRIED_COLLISION_SIZE 10
-
-//! the maximum time we'll spend trying to resolve a tried table collision, in seconds
-static const int64_t ADDRMAN_TEST_WINDOW = 40*60; // 40 minutes
+/** Maximum allowed number of entries in buckets for new and tried addresses */
+static constexpr int32_t ADDRMAN_BUCKET_SIZE_LOG2{6};
+static constexpr int ADDRMAN_BUCKET_SIZE{1 << ADDRMAN_BUCKET_SIZE_LOG2};
/**
* Stochastical (IP) address manager
@@ -173,315 +149,16 @@ static const int64_t ADDRMAN_TEST_WINDOW = 40*60; // 40 minutes
class CAddrMan
{
public:
- // Compressed IP->ASN mapping, loaded from a file when a node starts.
- // Should be always empty if no file was provided.
- // This mapping is then used for bucketing nodes in Addrman.
- //
- // If asmap is provided, nodes will be bucketed by
- // AS they belong to, in order to make impossible for a node
- // to connect to several nodes hosted in a single AS.
- // This is done in response to Erebus attack, but also to generally
- // diversify the connections every node creates,
- // especially useful when a large fraction of nodes
- // operate under a couple of cloud providers.
- //
- // If a new asmap was provided, the existing records
- // would be re-bucketed accordingly.
- std::vector<bool> m_asmap;
-
// Read asmap from provided binary file
static std::vector<bool> DecodeAsmap(fs::path path);
- /**
- * Serialized format.
- * * format version byte (@see `Format`)
- * * lowest compatible format version byte. This is used to help old software decide
- * whether to parse the file. For example:
- * * Bitcoin Core version N knows how to parse up to format=3. If a new format=4 is
- * introduced in version N+1 that is compatible with format=3 and it is known that
- * version N will be able to parse it, then version N+1 will write
- * (format=4, lowest_compatible=3) in the first two bytes of the file, and so
- * version N will still try to parse it.
- * * Bitcoin Core version N+2 introduces a new incompatible format=5. It will write
- * (format=5, lowest_compatible=5) and so any versions that do not know how to parse
- * format=5 will not try to read the file.
- * * nKey
- * * nNew
- * * nTried
- * * number of "new" buckets XOR 2**30
- * * all new addresses (total count: nNew)
- * * all tried addresses (total count: nTried)
- * * for each new bucket:
- * * number of elements
- * * for each element: index in the serialized "all new addresses"
- * * asmap checksum
- *
- * 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it
- * as incompatible. This is necessary because it did not check the version number on
- * deserialization.
- *
- * vvNew, vvTried, mapInfo, mapAddr and vRandom are never encoded explicitly;
- * they are instead reconstructed from the other information.
- *
- * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
- * changes to the ADDRMAN_ parameters without breaking the on-disk structure.
- *
- * We don't use SERIALIZE_METHODS since the serialization and deserialization code has
- * very little in common.
- */
template <typename Stream>
- void Serialize(Stream& s_) const
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
-
- // Always serialize in the latest version (FILE_FORMAT).
-
- OverrideStream<Stream> s(&s_, s_.GetType(), s_.GetVersion() | ADDRV2_FORMAT);
-
- s << static_cast<uint8_t>(FILE_FORMAT);
-
- // Increment `lowest_compatible` iff a newly introduced format is incompatible with
- // the previous one.
- static constexpr uint8_t lowest_compatible = Format::V3_BIP155;
- s << static_cast<uint8_t>(INCOMPATIBILITY_BASE + lowest_compatible);
-
- s << nKey;
- s << nNew;
- s << nTried;
-
- int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
- s << nUBuckets;
- std::unordered_map<int, int> mapUnkIds;
- int nIds = 0;
- for (const auto& entry : mapInfo) {
- mapUnkIds[entry.first] = nIds;
- const CAddrInfo &info = entry.second;
- if (info.nRefCount) {
- assert(nIds != nNew); // this means nNew was wrong, oh ow
- s << info;
- nIds++;
- }
- }
- nIds = 0;
- for (const auto& entry : mapInfo) {
- const CAddrInfo &info = entry.second;
- if (info.fInTried) {
- assert(nIds != nTried); // this means nTried was wrong, oh ow
- s << info;
- nIds++;
- }
- }
- for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) {
- int nSize = 0;
- for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
- if (vvNew[bucket][i] != -1)
- nSize++;
- }
- s << nSize;
- for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
- if (vvNew[bucket][i] != -1) {
- int nIndex = mapUnkIds[vvNew[bucket][i]];
- s << nIndex;
- }
- }
- }
- // Store asmap checksum after bucket entries so that it
- // can be ignored by older clients for backward compatibility.
- uint256 asmap_checksum;
- if (m_asmap.size() != 0) {
- asmap_checksum = SerializeHash(m_asmap);
- }
- s << asmap_checksum;
- }
+ void Serialize(Stream& s_) const EXCLUSIVE_LOCKS_REQUIRED(!cs);
template <typename Stream>
- void Unserialize(Stream& s_)
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
+ void Unserialize(Stream& s_) EXCLUSIVE_LOCKS_REQUIRED(!cs);
- assert(vRandom.empty());
-
- Format format;
- s_ >> Using<CustomUintFormatter<1>>(format);
-
- int stream_version = s_.GetVersion();
- if (format >= Format::V3_BIP155) {
- // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
- // unserialize methods know that an address in addrv2 format is coming.
- stream_version |= ADDRV2_FORMAT;
- }
-
- OverrideStream<Stream> s(&s_, s_.GetType(), stream_version);
-
- uint8_t compat;
- s >> compat;
- const uint8_t lowest_compatible = compat - INCOMPATIBILITY_BASE;
- if (lowest_compatible > FILE_FORMAT) {
- throw std::ios_base::failure(strprintf(
- "Unsupported format of addrman database: %u. It is compatible with formats >=%u, "
- "but the maximum supported by this version of %s is %u.",
- format, lowest_compatible, PACKAGE_NAME, static_cast<uint8_t>(FILE_FORMAT)));
- }
-
- s >> nKey;
- s >> nNew;
- s >> nTried;
- int nUBuckets = 0;
- s >> nUBuckets;
- if (format >= Format::V1_DETERMINISTIC) {
- nUBuckets ^= (1 << 30);
- }
-
- if (nNew > ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE) {
- throw std::ios_base::failure("Corrupt CAddrMan serialization, nNew exceeds limit.");
- }
-
- if (nTried > ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE) {
- throw std::ios_base::failure("Corrupt CAddrMan serialization, nTried exceeds limit.");
- }
-
- // Deserialize entries from the new table.
- for (int n = 0; n < nNew; n++) {
- CAddrInfo &info = mapInfo[n];
- s >> info;
- mapAddr[info] = n;
- info.nRandomPos = vRandom.size();
- vRandom.push_back(n);
- }
- nIdCount = nNew;
-
- // Deserialize entries from the tried table.
- int nLost = 0;
- for (int n = 0; n < nTried; n++) {
- CAddrInfo info;
- s >> info;
- int nKBucket = info.GetTriedBucket(nKey, m_asmap);
- int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket);
- if (vvTried[nKBucket][nKBucketPos] == -1) {
- info.nRandomPos = vRandom.size();
- info.fInTried = true;
- vRandom.push_back(nIdCount);
- mapInfo[nIdCount] = info;
- mapAddr[info] = nIdCount;
- vvTried[nKBucket][nKBucketPos] = nIdCount;
- nIdCount++;
- } else {
- nLost++;
- }
- }
- nTried -= nLost;
-
- // Store positions in the new table buckets to apply later (if possible).
- // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets,
- // so we store all bucket-entry_index pairs to iterate through later.
- std::vector<std::pair<int, int>> bucket_entries;
-
- for (int bucket = 0; bucket < nUBuckets; ++bucket) {
- int num_entries{0};
- s >> num_entries;
- for (int n = 0; n < num_entries; ++n) {
- int entry_index{0};
- s >> entry_index;
- if (entry_index >= 0 && entry_index < nNew) {
- bucket_entries.emplace_back(bucket, entry_index);
- }
- }
- }
-
- // If the bucket count and asmap checksum haven't changed, then attempt
- // to restore the entries to the buckets/positions they were in before
- // serialization.
- uint256 supplied_asmap_checksum;
- if (m_asmap.size() != 0) {
- supplied_asmap_checksum = SerializeHash(m_asmap);
- }
- uint256 serialized_asmap_checksum;
- if (format >= Format::V2_ASMAP) {
- s >> serialized_asmap_checksum;
- }
- const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT &&
- serialized_asmap_checksum == supplied_asmap_checksum};
-
- if (!restore_bucketing) {
- LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n");
- }
-
- for (auto bucket_entry : bucket_entries) {
- int bucket{bucket_entry.first};
- const int entry_index{bucket_entry.second};
- CAddrInfo& info = mapInfo[entry_index];
-
- // The entry shouldn't appear in more than
- // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip
- // this bucket_entry.
- if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue;
-
- int bucket_position = info.GetBucketPosition(nKey, true, bucket);
- if (restore_bucketing && vvNew[bucket][bucket_position] == -1) {
- // Bucketing has not changed, using existing bucket positions for the new table
- vvNew[bucket][bucket_position] = entry_index;
- ++info.nRefCount;
- } else {
- // In case the new table data cannot be used (bucket count wrong or new asmap),
- // try to give them a reference based on their primary source address.
- bucket = info.GetNewBucket(nKey, m_asmap);
- bucket_position = info.GetBucketPosition(nKey, true, bucket);
- if (vvNew[bucket][bucket_position] == -1) {
- vvNew[bucket][bucket_position] = entry_index;
- ++info.nRefCount;
- }
- }
- }
-
- // Prune new entries with refcount 0 (as a result of collisions).
- int nLostUnk = 0;
- for (auto it = mapInfo.cbegin(); it != mapInfo.cend(); ) {
- if (it->second.fInTried == false && it->second.nRefCount == 0) {
- const auto itCopy = it++;
- Delete(itCopy->first);
- ++nLostUnk;
- } else {
- ++it;
- }
- }
- if (nLost + nLostUnk > 0) {
- LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost);
- }
-
- Check();
- }
-
- void Clear()
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- std::vector<int>().swap(vRandom);
- nKey = insecure_rand.rand256();
- for (size_t bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) {
- for (size_t entry = 0; entry < ADDRMAN_BUCKET_SIZE; entry++) {
- vvNew[bucket][entry] = -1;
- }
- }
- for (size_t bucket = 0; bucket < ADDRMAN_TRIED_BUCKET_COUNT; bucket++) {
- for (size_t entry = 0; entry < ADDRMAN_BUCKET_SIZE; entry++) {
- vvTried[bucket][entry] = -1;
- }
- }
-
- nIdCount = 0;
- nTried = 0;
- nNew = 0;
- nLastGood = 1; //Initially at 1 so that "never" is strictly worse.
- mapInfo.clear();
- mapAddr.clear();
- }
-
- CAddrMan()
- {
- Clear();
- }
+ explicit CAddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio);
~CAddrMan()
{
@@ -496,22 +173,7 @@ public:
return vRandom.size();
}
- //! Add a single address.
- bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0)
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- bool fRet = false;
- Check();
- fRet |= Add_(addr, source, nTimePenalty);
- Check();
- if (fRet) {
- LogPrint(BCLog::ADDRMAN, "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
- }
- return fRet;
- }
-
- //! Add multiple addresses.
+ //! Add addresses to addrman's new table.
bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0)
EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
@@ -528,12 +190,12 @@ public:
}
//! Mark an entry as accessible.
- void Good(const CService &addr, bool test_before_evict = true, int64_t nTime = GetAdjustedTime())
+ void Good(const CService &addr, int64_t nTime = GetAdjustedTime())
EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
LOCK(cs);
Check();
- Good_(addr, test_before_evict, nTime);
+ Good_(addr, /* test_before_evict */ true, nTime);
Check();
}
@@ -571,7 +233,7 @@ public:
/**
* Choose an address to connect to.
*/
- CAddrInfo Select(bool newOnly = false)
+ CAddrInfo Select(bool newOnly = false) const
EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
LOCK(cs);
@@ -588,7 +250,7 @@ public:
* @param[in] max_pct Maximum percentage of addresses to return (0 = all).
* @param[in] network Select only addresses of this network (nullopt = all).
*/
- std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network)
+ std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
EXCLUSIVE_LOCKS_REQUIRED(!cs)
{
LOCK(cs);
@@ -618,17 +280,18 @@ public:
Check();
}
-protected:
- //! secret key to randomize bucket select with
- uint256 nKey;
-
- //! Source of random numbers for randomization in inner loops
- FastRandomContext insecure_rand;
+ const std::vector<bool>& GetAsmap() const { return m_asmap; }
private:
//! A mutex to protect the inner data structures.
mutable Mutex cs;
+ //! Source of random numbers for randomization in inner loops
+ mutable FastRandomContext insecure_rand GUARDED_BY(cs);
+
+ //! secret key to randomize bucket select with
+ uint256 nKey;
+
//! Serialization versions.
enum Format : uint8_t {
V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88
@@ -652,7 +315,7 @@ private:
static constexpr uint8_t INCOMPATIBILITY_BASE = 32;
//! last used nId
- int nIdCount GUARDED_BY(cs);
+ int nIdCount GUARDED_BY(cs){0};
//! table with information about all nIds
std::unordered_map<int, CAddrInfo> mapInfo GUARDED_BY(cs);
@@ -661,35 +324,55 @@ private:
std::unordered_map<CNetAddr, int, CNetAddrHash> mapAddr GUARDED_BY(cs);
//! randomly-ordered vector of all nIds
- std::vector<int> vRandom GUARDED_BY(cs);
+ //! This is mutable because it is unobservable outside the class, so any
+ //! changes to it (even in const methods) are also unobservable.
+ mutable std::vector<int> vRandom GUARDED_BY(cs);
// number of "tried" entries
- int nTried GUARDED_BY(cs);
+ int nTried GUARDED_BY(cs){0};
//! list of "tried" buckets
int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
//! number of (unique) "new" entries
- int nNew GUARDED_BY(cs);
+ int nNew GUARDED_BY(cs){0};
//! list of "new" buckets
int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
- //! last time Good was called (memory only)
- int64_t nLastGood GUARDED_BY(cs);
+ //! last time Good was called (memory only). Initially set to 1 so that "never" is strictly worse.
+ int64_t nLastGood GUARDED_BY(cs){1};
//! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions.
std::set<int> m_tried_collisions;
+ /** Perform consistency checks every m_consistency_check_ratio operations (if non-zero). */
+ const int32_t m_consistency_check_ratio;
+
+ // Compressed IP->ASN mapping, loaded from a file when a node starts.
+ // Should be always empty if no file was provided.
+ // This mapping is then used for bucketing nodes in Addrman.
+ //
+ // If asmap is provided, nodes will be bucketed by
+ // AS they belong to, in order to make impossible for a node
+ // to connect to several nodes hosted in a single AS.
+ // This is done in response to Erebus attack, but also to generally
+ // diversify the connections every node creates,
+ // especially useful when a large fraction of nodes
+ // operate under a couple of cloud providers.
+ //
+ // If a new asmap was provided, the existing records
+ // would be re-bucketed accordingly.
+ const std::vector<bool> m_asmap;
+
//! Find an entry.
CAddrInfo* Find(const CNetAddr& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
- //! find an entry, creating it if necessary.
- //! nTime and nServices of the found node are updated, if necessary.
+ //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom.
CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Swap two elements in vRandom.
- void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) const EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Move an entry from the "new" table(s) to the "tried" table
void MakeTried(CAddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -710,7 +393,7 @@ private:
void Attempt_(const CService &addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Select an address to connect to, if newOnly is set to true, only the new table is selected from.
- CAddrInfo Select_(bool newOnly) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ CAddrInfo Select_(bool newOnly) const EXCLUSIVE_LOCKS_REQUIRED(cs);
//! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions.
void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -719,22 +402,19 @@ private:
CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Consistency check
- void Check()
- EXCLUSIVE_LOCKS_REQUIRED(cs)
+ void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs)
{
-#ifdef DEBUG_ADDRMAN
AssertLockHeld(cs);
+
const int err = Check_();
if (err) {
LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err);
+ assert(false);
}
-#endif
}
-#ifdef DEBUG_ADDRMAN
//! Perform consistency check. Returns an error code or zero.
- int Check_() EXCLUSIVE_LOCKS_REQUIRED(cs);
-#endif
+ int Check_() const EXCLUSIVE_LOCKS_REQUIRED(cs);
/**
* Return all or many randomly selected addresses, optionally by network.
@@ -744,7 +424,7 @@ private:
* @param[in] max_pct Maximum percentage of addresses to return (0 = all).
* @param[in] network Select only addresses of this network (nullopt = all).
*/
- void GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/** We have successfully connected to this peer. Calling this function
* updates the CAddress's nTime, which is used in our IsTerrible()
@@ -763,6 +443,7 @@ private:
void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs);
friend class CAddrManTest;
+ friend class CAddrManDeterministic;
};
#endif // BITCOIN_ADDRMAN_H
diff --git a/src/banman.cpp b/src/banman.cpp
index d2437e6733..c64a48a05a 100644
--- a/src/banman.cpp
+++ b/src/banman.cpp
@@ -18,7 +18,7 @@ BanMan::BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t
if (m_client_interface) m_client_interface->InitMessage(_("Loading banlist…").translated);
int64_t n_start = GetTimeMillis();
- if (m_ban_db.Read(m_banned, m_is_dirty)) {
+ if (m_ban_db.Read(m_banned)) {
SweepBanned(); // sweep out unused entries
LogPrint(BCLog::NET, "Loaded %d banned node addresses/subnets %dms\n", m_banned.size(),
diff --git a/src/banman.h b/src/banman.h
index 8c75d4037e..8a03a9e3fc 100644
--- a/src/banman.h
+++ b/src/banman.h
@@ -88,7 +88,7 @@ private:
RecursiveMutex m_cs_banned;
banmap_t m_banned GUARDED_BY(m_cs_banned);
- bool m_is_dirty GUARDED_BY(m_cs_banned);
+ bool m_is_dirty GUARDED_BY(m_cs_banned){false};
CClientUIInterface* m_client_interface = nullptr;
CBanDB m_ban_db;
const int64_t m_default_ban_time;
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index b7bd8a3261..8fbb68c04c 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -72,17 +72,15 @@ static void AddrManAdd(benchmark::Bench& bench)
{
CreateAddresses();
- CAddrMan addrman;
-
bench.run([&] {
+ CAddrMan addrman{/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0};
AddAddressesToAddrMan(addrman);
- addrman.Clear();
});
}
static void AddrManSelect(benchmark::Bench& bench)
{
- CAddrMan addrman;
+ CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
FillAddrMan(addrman);
@@ -94,7 +92,7 @@ static void AddrManSelect(benchmark::Bench& bench)
static void AddrManGetAddr(benchmark::Bench& bench)
{
- CAddrMan addrman;
+ CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
FillAddrMan(addrman);
@@ -112,10 +110,12 @@ static void AddrManGood(benchmark::Bench& bench)
* we want to do the same amount of work in every loop iteration. */
bench.epochs(5).epochIterations(1);
+ const size_t addrman_count{bench.epochs() * bench.epochIterations()};
- std::vector<CAddrMan> addrmans(bench.epochs() * bench.epochIterations());
- for (auto& addrman : addrmans) {
- FillAddrMan(addrman);
+ std::vector<std::unique_ptr<CAddrMan>> addrmans(addrman_count);
+ for (size_t i{0}; i < addrman_count; ++i) {
+ addrmans[i] = std::make_unique<CAddrMan>(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ FillAddrMan(*addrmans[i]);
}
auto markSomeAsGood = [](CAddrMan& addrman) {
@@ -130,7 +130,7 @@ static void AddrManGood(benchmark::Bench& bench)
uint64_t i = 0;
bench.run([&] {
- markSomeAsGood(addrmans.at(i));
+ markSomeAsGood(*addrmans.at(i));
++i;
});
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index 22f06d8cb8..c4fcd80e33 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -18,16 +18,19 @@
/*
* Usage:
-static void CODE_TO_TIME(benchmark::Bench& bench)
+static void NameOfYourBenchmarkFunction(benchmark::Bench& bench)
{
- ... do any setup needed...
- nanobench::Config().run([&] {
- ... do stuff you want to time...
+ ...do any setup needed...
+
+ bench.run([&] {
+ ...do stuff you want to time; refer to src/bench/nanobench.h
+ for more information and the options that can be passed here...
});
- ... do any cleanup needed...
+
+ ...do any cleanup needed...
}
-BENCHMARK(CODE_TO_TIME);
+BENCHMARK(NameOfYourBenchmarkFunction);
*/
@@ -55,7 +58,8 @@ public:
static void RunAll(const Args& args);
};
-}
+} // namespace benchmark
+
// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo);
#define BENCHMARK(n) \
benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n);
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index 135659f87f..aab777cac1 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -16,11 +16,11 @@ static void SetupBenchArgs(ArgsManager& argsman)
{
SetupHelpOptions(argsman);
- argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-asymptote=n1,n2,n3,...", "Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-asymptote=n1,n2,n3,...", strprintf("Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
}
// parses a comma separated list like "10,20,30,50"
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index 5beb833b48..934b574f8b 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -6,6 +6,7 @@
#include <interfaces/chain.h>
#include <node/context.h>
#include <wallet/coinselection.h>
+#include <wallet/spend.h>
#include <wallet/wallet.h>
#include <set>
@@ -17,7 +18,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector<st
tx.nLockTime = nextLockTime++; // so all transactions get different hashes
tx.vout.resize(1);
tx.vout[0].nValue = nValue;
- wtxs.push_back(std::make_unique<CWalletTx>(&wallet, MakeTransactionRef(std::move(tx))));
+ wtxs.push_back(std::make_unique<CWalletTx>(MakeTransactionRef(std::move(tx))));
}
// Simple benchmark for wallet coin selection. Note that it maybe be necessary
@@ -45,18 +46,18 @@ static void CoinSelection(benchmark::Bench& bench)
// Create coins
std::vector<COutput> coins;
for (const auto& wtx : wtxs) {
- coins.emplace_back(wtx.get(), 0 /* iIn */, 6 * 24 /* nDepthIn */, true /* spendable */, true /* solvable */, true /* safe */);
+ coins.emplace_back(wallet, *wtx, 0 /* iIn */, 6 * 24 /* nDepthIn */, true /* spendable */, true /* solvable */, true /* safe */);
}
const CoinEligibilityFilter filter_standard(1, 6, 0);
const CoinSelectionParams coin_selection_params(/* change_output_size= */ 34,
/* change_spend_size= */ 148, /* effective_feerate= */ CFeeRate(0),
/* long_term_feerate= */ CFeeRate(0), /* discard_feerate= */ CFeeRate(0),
- /* tx_no_inputs_size= */ 0, /* avoid_partial= */ false);
+ /* tx_noinputs_size= */ 0, /* avoid_partial= */ false);
bench.run([&] {
std::set<CInputCoin> setCoinsRet;
CAmount nValueRet;
- bool success = wallet.AttemptSelection(1003 * COIN, filter_standard, coins, setCoinsRet, nValueRet, coin_selection_params);
+ bool success = AttemptSelection(wallet, 1003 * COIN, filter_standard, coins, setCoinsRet, nValueRet, coin_selection_params);
assert(success);
assert(nValueRet == 1003 * COIN);
assert(setCoinsRet.size() == 2);
@@ -75,9 +76,9 @@ static void add_coin(const CAmount& nValue, int nInput, std::vector<OutputGroup>
CMutableTransaction tx;
tx.vout.resize(nInput + 1);
tx.vout[nInput].nValue = nValue;
- std::unique_ptr<CWalletTx> wtx = std::make_unique<CWalletTx>(&testWallet, MakeTransactionRef(std::move(tx)));
+ std::unique_ptr<CWalletTx> wtx = std::make_unique<CWalletTx>(MakeTransactionRef(std::move(tx)));
set.emplace_back();
- set.back().Insert(COutput(wtx.get(), nInput, 0, true, true, true).GetInputCoin(), 0, true, 0, 0, false);
+ set.back().Insert(COutput(testWallet, *wtx, nInput, 0, true, true, true).GetInputCoin(), 0, true, 0, 0, false);
wtxn.emplace_back(std::move(wtx));
}
// Copied from src/wallet/test/coinselector_tests.cpp
diff --git a/src/bench/peer_eviction.cpp b/src/bench/peer_eviction.cpp
new file mode 100644
index 0000000000..46fd9d999e
--- /dev/null
+++ b/src/bench/peer_eviction.cpp
@@ -0,0 +1,157 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bench/bench.h>
+#include <net.h>
+#include <netaddress.h>
+#include <random.h>
+#include <test/util/net.h>
+#include <test/util/setup_common.h>
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+static void EvictionProtectionCommon(
+ benchmark::Bench& bench,
+ int num_candidates,
+ std::function<void(NodeEvictionCandidate&)> candidate_setup_fn)
+{
+ using Candidates = std::vector<NodeEvictionCandidate>;
+ FastRandomContext random_context{true};
+ bench.warmup(100).epochIterations(1100);
+
+ Candidates candidates{GetRandomNodeEvictionCandidates(num_candidates, random_context)};
+ for (auto& c : candidates) {
+ candidate_setup_fn(c);
+ }
+
+ std::vector<Candidates> copies{
+ static_cast<size_t>(bench.epochs() * bench.epochIterations()), candidates};
+ size_t i{0};
+ bench.run([&] {
+ ProtectEvictionCandidatesByRatio(copies.at(i));
+ ++i;
+ });
+}
+
+/* Benchmarks */
+
+static void EvictionProtection0Networks250Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 250 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_network = NET_IPV4;
+ });
+}
+
+static void EvictionProtection1Networks250Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 250 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = false;
+ if (c.id >= 130 && c.id < 240) { // 110 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection2Networks250Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 250 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = false;
+ if (c.id >= 90 && c.id < 160) { // 70 Tor
+ c.m_network = NET_ONION;
+ } else if (c.id >= 170 && c.id < 250) { // 80 I2P
+ c.m_network = NET_I2P;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks050Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 50 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = (c.id == 28 || c.id == 47); // 2 localhost
+ if (c.id >= 30 && c.id < 47) { // 17 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 24 && c.id < 28) { // 4 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks100Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 100 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = (c.id >= 55 && c.id < 60); // 5 localhost
+ if (c.id >= 70 && c.id < 80) { // 10 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 80 && c.id < 96) { // 16 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+static void EvictionProtection3Networks250Candidates(benchmark::Bench& bench)
+{
+ EvictionProtectionCommon(
+ bench,
+ 250 /* num_candidates */,
+ [](NodeEvictionCandidate& c) {
+ c.nTimeConnected = c.id;
+ c.m_is_local = (c.id >= 140 && c.id < 160); // 20 localhost
+ if (c.id >= 170 && c.id < 180) { // 10 I2P
+ c.m_network = NET_I2P;
+ } else if (c.id >= 190 && c.id < 240) { // 50 Tor
+ c.m_network = NET_ONION;
+ } else {
+ c.m_network = NET_IPV4;
+ }
+ });
+}
+
+// Candidate numbers used for the benchmarks:
+// - 50 candidates simulates a possible use of -maxconnections
+// - 100 candidates approximates an average node with default settings
+// - 250 candidates is the number of peers reported by operators of busy nodes
+
+// No disadvantaged networks, with 250 eviction candidates.
+BENCHMARK(EvictionProtection0Networks250Candidates);
+
+// 1 disadvantaged network (Tor) with 250 eviction candidates.
+BENCHMARK(EvictionProtection1Networks250Candidates);
+
+// 2 disadvantaged networks (I2P, Tor) with 250 eviction candidates.
+BENCHMARK(EvictionProtection2Networks250Candidates);
+
+// 3 disadvantaged networks (I2P/localhost/Tor) with 50/100/250 eviction candidates.
+BENCHMARK(EvictionProtection3Networks050Candidates);
+BENCHMARK(EvictionProtection3Networks100Candidates);
+BENCHMARK(EvictionProtection3Networks250Candidates);
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index 39e74b9b2b..928aa7573c 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -21,7 +21,7 @@ static void VerifyScriptBench(benchmark::Bench& bench)
const ECCVerifyHandle verify_handle;
ECC_Start();
- const int flags = SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH;
+ const uint32_t flags{SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH};
const int witnessversion = 0;
// Key pair.
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index 362b7c1e15..a205d8b6e7 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -9,6 +9,7 @@
#include <test/util/setup_common.h>
#include <test/util/wallet.h>
#include <validationinterface.h>
+#include <wallet/receive.h>
#include <wallet/wallet.h>
#include <optional>
@@ -35,11 +36,11 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b
}
SyncWithValidationInterfaceQueue();
- auto bal = wallet.GetBalance(); // Cache
+ auto bal = GetBalance(wallet); // Cache
bench.run([&] {
if (set_dirty) wallet.MarkDirty();
- bal = wallet.GetBalance();
+ bal = GetBalance(wallet);
if (add_mine) assert(bal.m_mine_trusted > 0);
if (add_watchonly) assert(bal.m_watchonly_trusted > 0);
});
diff --git a/src/bitcoin-cli-res.rc b/src/bitcoin-cli-res.rc
index 405a302261..d9e5dcf7fd 100644
--- a/src/bitcoin-cli-res.rc
+++ b/src/bitcoin-cli-res.rc
@@ -2,9 +2,7 @@
#include "clientversion.h" // holds the needed client version information
#define VER_PRODUCTVERSION CLIENT_VERSION_MAJOR,CLIENT_VERSION_MINOR,CLIENT_VERSION_BUILD
-#define VER_PRODUCTVERSION_STR STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
#define VER_FILEVERSION VER_PRODUCTVERSION
-#define VER_FILEVERSION_STR VER_PRODUCTVERSION_STR
VS_VERSION_INFO VERSIONINFO
FILEVERSION VER_FILEVERSION
@@ -18,13 +16,13 @@ BEGIN
BEGIN
VALUE "CompanyName", "Bitcoin"
VALUE "FileDescription", "bitcoin-cli (JSON-RPC client for " PACKAGE_NAME ")"
- VALUE "FileVersion", VER_FILEVERSION_STR
+ VALUE "FileVersion", PACKAGE_VERSION
VALUE "InternalName", "bitcoin-cli"
VALUE "LegalCopyright", COPYRIGHT_STR
VALUE "LegalTrademarks1", "Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php."
VALUE "OriginalFilename", "bitcoin-cli.exe"
VALUE "ProductName", "bitcoin-cli"
- VALUE "ProductVersion", VER_PRODUCTVERSION_STR
+ VALUE "ProductVersion", PACKAGE_VERSION
END
END
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 7a5f945511..297f3066ff 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -9,6 +9,7 @@
#include <chainparamsbase.h>
#include <clientversion.h>
+#include <policy/feerate.h>
#include <rpc/client.h>
#include <rpc/mining.h>
#include <rpc/protocol.h>
@@ -28,6 +29,10 @@
#include <string>
#include <tuple>
+#ifndef WIN32
+#include <unistd.h>
+#endif
+
#include <event2/buffer.h>
#include <event2/keyvalq_struct.h>
#include <support/events.h>
@@ -48,6 +53,9 @@ static constexpr int8_t UNKNOWN_NETWORK{-1};
/** Default number of blocks to generate for RPC generatetoaddress. */
static const std::string DEFAULT_NBLOCKS = "1";
+/** Default -color setting. */
+static const std::string DEFAULT_COLOR_SETTING{"auto"};
+
static void SetupCliArgs(ArgsManager& argsman)
{
SetupHelpOptions(argsman);
@@ -66,6 +74,7 @@ static void SetupCliArgs(ArgsManager& argsman)
argsman.AddArg("-netinfo", "Get network peer connection information from the remote server. An optional integer argument from 0 to 4 can be passed for different peers listings (default: 0). Pass \"help\" for detailed help documentation.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
SetupChainParamsBaseOptions(argsman);
+ argsman.AddArg("-color=<when>", strprintf("Color setting for CLI output (default: %s). Valid values: always, auto (add color codes when standard output is connected to a terminal and OS is not WIN32), never.", DEFAULT_COLOR_SETTING), ArgsManager::ALLOW_STRING, OptionsCategory::OPTIONS);
argsman.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -338,7 +347,9 @@ public:
result.pushKV("difficulty", batch[ID_BLOCKCHAININFO]["result"]["difficulty"]);
result.pushKV("chain", UniValue(batch[ID_BLOCKCHAININFO]["result"]["chain"]));
if (!batch[ID_WALLETINFO]["result"].isNull()) {
+ result.pushKV("has_wallet", true);
result.pushKV("keypoolsize", batch[ID_WALLETINFO]["result"]["keypoolsize"]);
+ result.pushKV("walletname", batch[ID_WALLETINFO]["result"]["walletname"]);
if (!batch[ID_WALLETINFO]["result"]["unlocked_until"].isNull()) {
result.pushKV("unlocked_until", batch[ID_WALLETINFO]["result"]["unlocked_until"]);
}
@@ -375,7 +386,9 @@ private:
bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; }
bool m_is_asmap_on{false};
size_t m_max_addr_length{0};
- size_t m_max_age_length{3};
+ size_t m_max_addr_processed_length{5};
+ size_t m_max_addr_rate_limited_length{6};
+ size_t m_max_age_length{5};
size_t m_max_id_length{2};
struct Peer {
std::string addr;
@@ -385,6 +398,8 @@ private:
std::string age;
double min_ping;
double ping;
+ int64_t addr_processed;
+ int64_t addr_rate_limited;
int64_t last_blck;
int64_t last_recv;
int64_t last_send;
@@ -392,6 +407,7 @@ private:
int id;
int mapped_as;
int version;
+ bool is_addr_relay_enabled;
bool is_bip152_hb_from;
bool is_bip152_hb_to;
bool is_block_relay;
@@ -472,6 +488,8 @@ public:
const int peer_id{peer["id"].get_int()};
const int mapped_as{peer["mapped_as"].isNull() ? 0 : peer["mapped_as"].get_int()};
const int version{peer["version"].get_int()};
+ const int64_t addr_processed{peer["addr_processed"].isNull() ? 0 : peer["addr_processed"].get_int64()};
+ const int64_t addr_rate_limited{peer["addr_rate_limited"].isNull() ? 0 : peer["addr_rate_limited"].get_int64()};
const int64_t conn_time{peer["conntime"].get_int64()};
const int64_t last_blck{peer["last_block"].get_int64()};
const int64_t last_recv{peer["lastrecv"].get_int64()};
@@ -482,10 +500,13 @@ public:
const std::string addr{peer["addr"].get_str()};
const std::string age{conn_time == 0 ? "" : ToString((m_time_now - conn_time) / 60)};
const std::string sub_version{peer["subver"].get_str()};
+ const bool is_addr_relay_enabled{peer["addr_relay_enabled"].isNull() ? false : peer["addr_relay_enabled"].get_bool()};
const bool is_bip152_hb_from{peer["bip152_hb_from"].get_bool()};
const bool is_bip152_hb_to{peer["bip152_hb_to"].get_bool()};
- m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_bip152_hb_from, is_bip152_hb_to, is_block_relay, is_outbound});
+ m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, addr_processed, addr_rate_limited, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_addr_relay_enabled, is_bip152_hb_from, is_bip152_hb_to, is_block_relay, is_outbound});
m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length);
+ m_max_addr_processed_length = std::max(ToString(addr_processed).length(), m_max_addr_processed_length);
+ m_max_addr_rate_limited_length = std::max(ToString(addr_rate_limited).length(), m_max_addr_rate_limited_length);
m_max_age_length = std::max(age.length(), m_max_age_length);
m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length);
m_is_asmap_on |= (mapped_as != 0);
@@ -493,39 +514,46 @@ public:
}
// Generate report header.
- std::string result{strprintf("%s %s%s - %i%s\n\n", PACKAGE_NAME, FormatFullVersion(), ChainToString(), networkinfo["protocolversion"].get_int(), networkinfo["subversion"].get_str())};
+ std::string result{strprintf("%s client %s%s - server %i%s\n\n", PACKAGE_NAME, FormatFullVersion(), ChainToString(), networkinfo["protocolversion"].get_int(), networkinfo["subversion"].get_str())};
// Report detailed peer connections list sorted by direction and minimum ping time.
if (DetailsRequested() && !m_peers.empty()) {
std::sort(m_peers.begin(), m_peers.end());
- result += strprintf("<-> type net mping ping send recv txn blk hb %*s ", m_max_age_length, "age");
+ result += strprintf("<-> type net mping ping send recv txn blk hb %*s%*s%*s ",
+ m_max_addr_processed_length, "addrp",
+ m_max_addr_rate_limited_length, "addrl",
+ m_max_age_length, "age");
if (m_is_asmap_on) result += " asmap ";
result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : "");
for (const Peer& peer : m_peers) {
std::string version{ToString(peer.version) + peer.sub_version};
result += strprintf(
- "%3s %6s %5s%7s%7s%5s%5s%5s%5s %2s %*s%*i %*s %-*s%s\n",
+ "%3s %6s %5s%7s%7s%5s%5s%5s%5s %2s %*s%*s%*s%*i %*s %-*s%s\n",
peer.is_outbound ? "out" : "in",
ConnectionTypeForNetinfo(peer.conn_type),
peer.network,
PingTimeToString(peer.min_ping),
PingTimeToString(peer.ping),
- peer.last_send == 0 ? "" : ToString(m_time_now - peer.last_send),
- peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv),
- peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60),
- peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60),
+ peer.last_send ? ToString(m_time_now - peer.last_send) : "",
+ peer.last_recv ? ToString(m_time_now - peer.last_recv) : "",
+ peer.last_trxn ? ToString((m_time_now - peer.last_trxn) / 60) : peer.is_block_relay ? "*" : "",
+ peer.last_blck ? ToString((m_time_now - peer.last_blck) / 60) : "",
strprintf("%s%s", peer.is_bip152_hb_to ? "." : " ", peer.is_bip152_hb_from ? "*" : " "),
+ m_max_addr_processed_length, // variable spacing
+ peer.addr_processed ? ToString(peer.addr_processed) : peer.is_addr_relay_enabled ? "" : ".",
+ m_max_addr_rate_limited_length, // variable spacing
+ peer.addr_rate_limited ? ToString(peer.addr_rate_limited) : "",
m_max_age_length, // variable spacing
peer.age,
m_is_asmap_on ? 7 : 0, // variable spacing
- m_is_asmap_on && peer.mapped_as != 0 ? ToString(peer.mapped_as) : "",
+ m_is_asmap_on && peer.mapped_as ? ToString(peer.mapped_as) : "",
m_max_id_length, // variable spacing
peer.id,
IsAddressSelected() ? m_max_addr_length : 0, // variable spacing
IsAddressSelected() ? peer.addr : "",
IsVersionSelected() && version != "0" ? version : "");
}
- result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min");
+ result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min");
}
// Report peer connection totals by type.
@@ -599,10 +627,14 @@ public:
" send Time since last message sent to the peer, in seconds\n"
" recv Time since last message received from the peer, in seconds\n"
" txn Time since last novel transaction received from the peer and accepted into our mempool, in minutes\n"
+ " \"*\" - the peer requested we not relay transactions to it (relaytxes is false)\n"
" blk Time since last novel block passing initial validity checks received from the peer, in minutes\n"
" hb High-bandwidth BIP152 compact block relay\n"
" \".\" (to) - we selected the peer as a high-bandwidth peer\n"
" \"*\" (from) - the peer selected us as a high-bandwidth peer\n"
+ " addrp Total number of addresses processed, excluding those dropped due to rate limiting\n"
+ " \".\" - we do not relay addresses to this peer (addr_relay_enabled is false)\n"
+ " addrl Total number of addresses dropped due to rate limiting\n"
" age Duration of connection to the peer, in minutes\n"
" asmap Mapped AS (Autonomous System) number in the BGP route to the peer, used for diversifying\n"
" peer selection (only displayed if the -asmap config option is set)\n"
@@ -874,6 +906,133 @@ static void GetWalletBalances(UniValue& result)
}
/**
+ * GetProgressBar constructs a progress bar with 5% intervals.
+ *
+ * @param[in] progress The proportion of the progress bar to be filled between 0 and 1.
+ * @param[out] progress_bar String representation of the progress bar.
+ */
+static void GetProgressBar(double progress, std::string& progress_bar)
+{
+ if (progress < 0 || progress > 1) return;
+
+ static constexpr double INCREMENT{0.05};
+ static const std::string COMPLETE_BAR{"\u2592"};
+ static const std::string INCOMPLETE_BAR{"\u2591"};
+
+ for (int i = 0; i < progress / INCREMENT; ++i) {
+ progress_bar += COMPLETE_BAR;
+ }
+
+ for (int i = 0; i < (1 - progress) / INCREMENT; ++i) {
+ progress_bar += INCOMPLETE_BAR;
+ }
+}
+
+/**
+ * ParseGetInfoResult takes in -getinfo result in UniValue object and parses it
+ * into a user friendly UniValue string to be printed on the console.
+ * @param[out] result Reference to UniValue result containing the -getinfo output.
+ */
+static void ParseGetInfoResult(UniValue& result)
+{
+ if (!find_value(result, "error").isNull()) return;
+
+ std::string RESET, GREEN, BLUE, YELLOW, MAGENTA, CYAN;
+ bool should_colorize = false;
+
+#ifndef WIN32
+ if (isatty(fileno(stdout))) {
+ // By default, only print colored text if OS is not WIN32 and stdout is connected to a terminal.
+ should_colorize = true;
+ }
+#endif
+
+ if (gArgs.IsArgSet("-color")) {
+ const std::string color{gArgs.GetArg("-color", DEFAULT_COLOR_SETTING)};
+ if (color == "always") {
+ should_colorize = true;
+ } else if (color == "never") {
+ should_colorize = false;
+ } else if (color != "auto") {
+ throw std::runtime_error("Invalid value for -color option. Valid values: always, auto, never.");
+ }
+ }
+
+ if (should_colorize) {
+ RESET = "\x1B[0m";
+ GREEN = "\x1B[32m";
+ BLUE = "\x1B[34m";
+ YELLOW = "\x1B[33m";
+ MAGENTA = "\x1B[35m";
+ CYAN = "\x1B[36m";
+ }
+
+ std::string result_string = strprintf("%sChain: %s%s\n", BLUE, result["chain"].getValStr(), RESET);
+ result_string += strprintf("Blocks: %s\n", result["blocks"].getValStr());
+ result_string += strprintf("Headers: %s\n", result["headers"].getValStr());
+
+ const double ibd_progress{result["verificationprogress"].get_real()};
+ std::string ibd_progress_bar;
+ // Display the progress bar only if IBD progress is less than 99%
+ if (ibd_progress < 0.99) {
+ GetProgressBar(ibd_progress, ibd_progress_bar);
+ // Add padding between progress bar and IBD progress
+ ibd_progress_bar += " ";
+ }
+
+ result_string += strprintf("Verification progress: %s%.4f%%\n", ibd_progress_bar, ibd_progress * 100);
+ result_string += strprintf("Difficulty: %s\n\n", result["difficulty"].getValStr());
+
+ result_string += strprintf(
+ "%sNetwork: in %s, out %s, total %s%s\n",
+ GREEN,
+ result["connections"]["in"].getValStr(),
+ result["connections"]["out"].getValStr(),
+ result["connections"]["total"].getValStr(),
+ RESET);
+ result_string += strprintf("Version: %s\n", result["version"].getValStr());
+ result_string += strprintf("Time offset (s): %s\n", result["timeoffset"].getValStr());
+ const std::string proxy = result["proxy"].getValStr();
+ result_string += strprintf("Proxy: %s\n", proxy.empty() ? "N/A" : proxy);
+ result_string += strprintf("Min tx relay fee rate (%s/kvB): %s\n\n", CURRENCY_UNIT, result["relayfee"].getValStr());
+
+ if (!result["has_wallet"].isNull()) {
+ const std::string walletname = result["walletname"].getValStr();
+ result_string += strprintf("%sWallet: %s%s\n", MAGENTA, walletname.empty() ? "\"\"" : walletname, RESET);
+
+ result_string += strprintf("Keypool size: %s\n", result["keypoolsize"].getValStr());
+ if (!result["unlocked_until"].isNull()) {
+ result_string += strprintf("Unlocked until: %s\n", result["unlocked_until"].getValStr());
+ }
+ result_string += strprintf("Transaction fee rate (-paytxfee) (%s/kvB): %s\n\n", CURRENCY_UNIT, result["paytxfee"].getValStr());
+ }
+ if (!result["balance"].isNull()) {
+ result_string += strprintf("%sBalance:%s %s\n\n", CYAN, RESET, result["balance"].getValStr());
+ }
+
+ if (!result["balances"].isNull()) {
+ result_string += strprintf("%sBalances%s\n", CYAN, RESET);
+
+ size_t max_balance_length{10};
+
+ for (const std::string& wallet : result["balances"].getKeys()) {
+ max_balance_length = std::max(result["balances"][wallet].getValStr().length(), max_balance_length);
+ }
+
+ for (const std::string& wallet : result["balances"].getKeys()) {
+ result_string += strprintf("%*s %s\n",
+ max_balance_length,
+ result["balances"][wallet].getValStr(),
+ wallet.empty() ? "\"\"" : wallet);
+ }
+ result_string += "\n";
+ }
+
+ result_string += strprintf("%sWarnings:%s %s", YELLOW, RESET, result["warnings"].getValStr());
+ result.setStr(result_string);
+}
+
+/**
* Call RPC getnewaddress.
* @returns getnewaddress response as a UniValue object.
*/
@@ -994,9 +1153,13 @@ static int CommandLineRPC(int argc, char *argv[])
UniValue result = find_value(reply, "result");
const UniValue& error = find_value(reply, "error");
if (error.isNull()) {
- if (gArgs.IsArgSet("-getinfo") && !gArgs.IsArgSet("-rpcwallet")) {
- GetWalletBalances(result); // fetch multiwallet balances and append to result
+ if (gArgs.GetBoolArg("-getinfo", false)) {
+ if (!gArgs.IsArgSet("-rpcwallet")) {
+ GetWalletBalances(result); // fetch multiwallet balances and append to result
+ }
+ ParseGetInfoResult(result);
}
+
ParseResult(result, strPrint);
} else {
ParseError(error, strPrint, nRet);
diff --git a/src/bitcoin-tx-res.rc b/src/bitcoin-tx-res.rc
index b545ce9dbe..46e4fc9274 100644
--- a/src/bitcoin-tx-res.rc
+++ b/src/bitcoin-tx-res.rc
@@ -2,9 +2,7 @@
#include "clientversion.h" // holds the needed client version information
#define VER_PRODUCTVERSION CLIENT_VERSION_MAJOR,CLIENT_VERSION_MINOR,CLIENT_VERSION_BUILD
-#define VER_PRODUCTVERSION_STR STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
#define VER_FILEVERSION VER_PRODUCTVERSION
-#define VER_FILEVERSION_STR VER_PRODUCTVERSION_STR
VS_VERSION_INFO VERSIONINFO
FILEVERSION VER_FILEVERSION
@@ -18,13 +16,13 @@ BEGIN
BEGIN
VALUE "CompanyName", "Bitcoin"
VALUE "FileDescription", "bitcoin-tx (CLI Bitcoin transaction editor utility)"
- VALUE "FileVersion", VER_FILEVERSION_STR
+ VALUE "FileVersion", PACKAGE_VERSION
VALUE "InternalName", "bitcoin-tx"
VALUE "LegalCopyright", COPYRIGHT_STR
VALUE "LegalTrademarks1", "Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php."
VALUE "OriginalFilename", "bitcoin-tx.exe"
VALUE "ProductName", "bitcoin-tx"
- VALUE "ProductVersion", VER_PRODUCTVERSION_STR
+ VALUE "ProductVersion", PACKAGE_VERSION
END
END
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 3fc87ae1ff..58c51bd8e0 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -188,10 +188,11 @@ static void RegisterLoad(const std::string& strInput)
static CAmount ExtractAndValidateValue(const std::string& strValue)
{
- CAmount value;
- if (!ParseMoney(strValue, value))
+ if (std::optional<CAmount> parsed = ParseMoney(strValue)) {
+ return parsed.value();
+ } else {
throw std::runtime_error("invalid TX output value");
- return value;
+ }
}
static void MutateTxVersion(CMutableTransaction& tx, const std::string& cmdVal)
@@ -771,9 +772,7 @@ static std::string readStdin()
if (ferror(stdin))
throw std::runtime_error("error reading stdin");
- boost::algorithm::trim_right(ret);
-
- return ret;
+ return TrimString(ret);
}
static int CommandLineRawTx(int argc, char* argv[])
diff --git a/src/bitcoin-util-res.rc b/src/bitcoin-util-res.rc
index 3f0fa8ab6d..0de8c5befa 100644
--- a/src/bitcoin-util-res.rc
+++ b/src/bitcoin-util-res.rc
@@ -2,9 +2,7 @@
#include "clientversion.h" // holds the needed client version information
#define VER_PRODUCTVERSION CLIENT_VERSION_MAJOR,CLIENT_VERSION_MINOR,CLIENT_VERSION_BUILD
-#define VER_PRODUCTVERSION_STR STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
#define VER_FILEVERSION VER_PRODUCTVERSION
-#define VER_FILEVERSION_STR VER_PRODUCTVERSION_STR
VS_VERSION_INFO VERSIONINFO
FILEVERSION VER_FILEVERSION
@@ -18,13 +16,13 @@ BEGIN
BEGIN
VALUE "CompanyName", "Bitcoin"
VALUE "FileDescription", "bitcoin-util (CLI Bitcoin utility)"
- VALUE "FileVersion", VER_FILEVERSION_STR
+ VALUE "FileVersion", PACKAGE_VERSION
VALUE "InternalName", "bitcoin-util"
VALUE "LegalCopyright", COPYRIGHT_STR
VALUE "LegalTrademarks1", "Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php."
VALUE "OriginalFilename", "bitcoin-util.exe"
VALUE "ProductName", "bitcoin-util"
- VALUE "ProductVersion", VER_PRODUCTVERSION_STR
+ VALUE "ProductVersion", PACKAGE_VERSION
END
END
diff --git a/src/bitcoin-wallet-res.rc b/src/bitcoin-wallet-res.rc
index 59346ab8f6..d86ffbd9f1 100644
--- a/src/bitcoin-wallet-res.rc
+++ b/src/bitcoin-wallet-res.rc
@@ -2,9 +2,7 @@
#include "clientversion.h" // holds the needed client version information
#define VER_PRODUCTVERSION CLIENT_VERSION_MAJOR,CLIENT_VERSION_MINOR,CLIENT_VERSION_BUILD
-#define VER_PRODUCTVERSION_STR STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
#define VER_FILEVERSION VER_PRODUCTVERSION
-#define VER_FILEVERSION_STR VER_PRODUCTVERSION_STR
VS_VERSION_INFO VERSIONINFO
FILEVERSION VER_FILEVERSION
@@ -18,13 +16,13 @@ BEGIN
BEGIN
VALUE "CompanyName", "Bitcoin"
VALUE "FileDescription", "bitcoin-wallet (CLI tool for " PACKAGE_NAME " wallets)"
- VALUE "FileVersion", VER_FILEVERSION_STR
+ VALUE "FileVersion", PACKAGE_VERSION
VALUE "InternalName", "bitcoin-wallet"
VALUE "LegalCopyright", COPYRIGHT_STR
VALUE "LegalTrademarks1", "Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php."
VALUE "OriginalFilename", "bitcoin-wallet.exe"
VALUE "ProductName", "bitcoin-wallet"
- VALUE "ProductVersion", VER_PRODUCTVERSION_STR
+ VALUE "ProductVersion", PACKAGE_VERSION
END
END
diff --git a/src/bitcoind-res.rc b/src/bitcoind-res.rc
index a98b50c899..353761dfa7 100644
--- a/src/bitcoind-res.rc
+++ b/src/bitcoind-res.rc
@@ -2,9 +2,7 @@
#include "clientversion.h" // holds the needed client version information
#define VER_PRODUCTVERSION CLIENT_VERSION_MAJOR,CLIENT_VERSION_MINOR,CLIENT_VERSION_BUILD
-#define VER_PRODUCTVERSION_STR STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
#define VER_FILEVERSION VER_PRODUCTVERSION
-#define VER_FILEVERSION_STR VER_PRODUCTVERSION_STR
VS_VERSION_INFO VERSIONINFO
FILEVERSION VER_FILEVERSION
@@ -18,13 +16,13 @@ BEGIN
BEGIN
VALUE "CompanyName", "Bitcoin"
VALUE "FileDescription", "bitcoind (Bitcoin node with a JSON-RPC server)"
- VALUE "FileVersion", VER_FILEVERSION_STR
+ VALUE "FileVersion", PACKAGE_VERSION
VALUE "InternalName", "bitcoind"
VALUE "LegalCopyright", COPYRIGHT_STR
VALUE "LegalTrademarks1", "Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php."
VALUE "OriginalFilename", "bitcoind.exe"
VALUE "ProductName", "bitcoind"
- VALUE "ProductVersion", VER_PRODUCTVERSION_STR
+ VALUE "ProductVersion", PACKAGE_VERSION
END
END
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 58a27e053b..4cc37560a3 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -91,8 +91,8 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 709632; // Approximately November 12th, 2021
- consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000001533efd8d716a517fe2c5008");
- consensus.defaultAssumeValid = uint256S("0x0000000000000000000b9d2ec5a352ecba0592946514a92f14319dc2b367fc72"); // 654683
+ consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000001fa4663bbbe19f82de910280");
+ consensus.defaultAssumeValid = uint256S("0x00000000000000000008a89e854d57e5667df88f1cdef6fde2fbca1de5b639ad"); // 691719
/**
* The message start string is designed to be unlikely to occur in normal data.
@@ -105,7 +105,7 @@ public:
pchMessageStart[3] = 0xd9;
nDefaultPort = 8333;
nPruneAfterHeight = 100000;
- m_assumed_blockchain_size = 350;
+ m_assumed_blockchain_size = 420;
m_assumed_chain_state_size = 6;
genesis = CreateGenesisBlock(1231006505, 2083236893, 0x1d00ffff, 1, 50 * COIN);
@@ -166,10 +166,10 @@ public:
};
chainTxData = ChainTxData{
- // Data from RPC: getchaintxstats 4096 0000000000000000000b9d2ec5a352ecba0592946514a92f14319dc2b367fc72
- /* nTime */ 1603995752,
- /* nTxCount */ 582083445,
- /* dTxRate */ 3.508976121410527,
+ // Data from RPC: getchaintxstats 4096 00000000000000000008a89e854d57e5667df88f1cdef6fde2fbca1de5b639ad
+ /* nTime */ 1626697539,
+ /* nTxCount */ 656509474,
+ /* dTxRate */ 2.424920418708139,
};
}
};
@@ -210,8 +210,8 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay
- consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000001db6ec4ac88cf2272c6");
- consensus.defaultAssumeValid = uint256S("0x000000000000006433d1efec504c53ca332b64963c425395515b01977bd7b3b0"); // 1864000
+ consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000005180c3bd8290da33a1a");
+ consensus.defaultAssumeValid = uint256S("0x0000000000004ae2f3896ca8ecd41c460a35bf6184e145d91558cece1c688a76"); // 2010000
pchMessageStart[0] = 0x0b;
pchMessageStart[1] = 0x11;
@@ -261,10 +261,10 @@ public:
};
chainTxData = ChainTxData{
- // Data from RPC: getchaintxstats 4096 000000000000006433d1efec504c53ca332b64963c425395515b01977bd7b3b0
- /* nTime */ 1603359686,
- /* nTxCount */ 58090238,
- /* dTxRate */ 0.1232886622799463,
+ // Data from RPC: getchaintxstats 4096 0000000000004ae2f3896ca8ecd41c460a35bf6184e145d91558cece1c688a76
+ /* nTime */ 1625727096,
+ /* nTxCount */ 60408943,
+ /* dTxRate */ 0.08379062270367649,
};
}
};
@@ -284,15 +284,15 @@ public:
vSeeds.emplace_back("2a01:7c8:d005:390::5");
vSeeds.emplace_back("v7ajjeirttkbnt32wpy3c6w3emwnfr3fkla7hpxcfokr3ysd3kqtzmqd.onion:38333");
- consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000000000000019fd16269a");
- consensus.defaultAssumeValid = uint256S("0x0000002a1de0f46379358c1fd09906f7ac59adf3712323ed90eb59e4c183c020"); // 9434
+ consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000000000008546553c03");
+ consensus.defaultAssumeValid = uint256S("0x000000187d4440e5bff91488b700a140441e089a8aaea707414982460edbfe54"); // 47200
m_assumed_blockchain_size = 1;
m_assumed_chain_state_size = 0;
chainTxData = ChainTxData{
- // Data from RPC: getchaintxstats 4096 0000002a1de0f46379358c1fd09906f7ac59adf3712323ed90eb59e4c183c020
- /* nTime */ 1603986000,
- /* nTxCount */ 9582,
- /* dTxRate */ 0.00159272030651341,
+ // Data from RPC: getchaintxstats 4096 000000187d4440e5bff91488b700a140441e089a8aaea707414982460edbfe54
+ /* nTime */ 1626696658,
+ /* nTxCount */ 387761,
+ /* dTxRate */ 0.04035946932424404,
};
} else {
const auto signet_challenge = args.GetArgs("-signetchallenge");
@@ -390,10 +390,10 @@ public:
consensus.signet_challenge.clear();
consensus.nSubsidyHalvingInterval = 150;
consensus.BIP16Exception = uint256();
- consensus.BIP34Height = 500; // BIP34 activated on regtest (Used in functional tests)
+ consensus.BIP34Height = 2; // BIP34 activated on regtest (Block at height 1 not enforced for testing purposes)
consensus.BIP34Hash = uint256();
- consensus.BIP65Height = 1351; // BIP65 activated on regtest (Used in functional tests)
- consensus.BIP66Height = 1251; // BIP66 activated on regtest (Used in functional tests)
+ consensus.BIP65Height = 111; // BIP65 activated on regtest (Block at height 110 and earlier not enforced for testing purposes)
+ consensus.BIP66Height = 102; // BIP66 activated on regtest (Block at height 101 and earlier not enforced for testing purposes)
consensus.CSVHeight = 432; // CSV activated on regtest (Used in rpc activation tests)
consensus.SegwitHeight = 0; // SEGWIT is always activated on regtest unless overridden
consensus.MinBIP9WarningHeight = 0;
@@ -435,7 +435,8 @@ public:
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
vFixedSeeds.clear(); //!< Regtest mode doesn't have any fixed seeds.
- vSeeds.clear(); //!< Regtest mode doesn't have any DNS seeds.
+ vSeeds.clear();
+ vSeeds.emplace_back("dummySeed.invalid.");
fDefaultConsistencyChecks = true;
fRequireStandard = true;
@@ -490,11 +491,8 @@ void CRegTestParams::UpdateActivationParametersFromArgs(const ArgsManager& args)
{
if (args.IsArgSet("-segwitheight")) {
int64_t height = args.GetArg("-segwitheight", consensus.SegwitHeight);
- if (height < -1 || height >= std::numeric_limits<int>::max()) {
- throw std::runtime_error(strprintf("Activation height %ld for segwit is out of valid range. Use -1 to disable segwit.", height));
- } else if (height == -1) {
- LogPrintf("Segwit disabled for testing\n");
- height = std::numeric_limits<int>::max();
+ if (height < 0 || height >= std::numeric_limits<int>::max()) {
+ throw std::runtime_error(strprintf("Activation height %ld for segwit is out of valid range.", height));
}
consensus.SegwitHeight = static_cast<int>(height);
}
diff --git a/src/chainparams.h b/src/chainparams.h
index 5c2351eea6..4faa6f8d06 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -8,11 +8,13 @@
#include <chainparamsbase.h>
#include <consensus/params.h>
+#include <netaddress.h>
#include <primitives/block.h>
#include <protocol.h>
#include <util/hash_type.h>
#include <memory>
+#include <string>
#include <vector>
typedef std::map<int, uint256> MapCheckpoints;
@@ -80,6 +82,15 @@ public:
const Consensus::Params& GetConsensus() const { return consensus; }
const CMessageHeader::MessageStartChars& MessageStart() const { return pchMessageStart; }
uint16_t GetDefaultPort() const { return nDefaultPort; }
+ uint16_t GetDefaultPort(Network net) const
+ {
+ return net == NET_I2P ? I2P_SAM31_PORT : GetDefaultPort();
+ }
+ uint16_t GetDefaultPort(const std::string& addr) const
+ {
+ CNetAddr a;
+ return a.SetSpecial(addr) ? GetDefaultPort(a.GetNetwork()) : GetDefaultPort();
+ }
const CBlock& GenesisBlock() const { return genesis; }
/** Default value for -checkmempool and -checkblockindex argument */
diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp
index e71b4bc859..79c1bc25bc 100644
--- a/src/chainparamsbase.cpp
+++ b/src/chainparamsbase.cpp
@@ -20,7 +20,7 @@ void SetupChainParamsBaseOptions(ArgsManager& argsman)
argsman.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, signet, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
argsman.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. "
"This is intended for regression testing tools and app development. Equivalent to -chain=regtest.", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
- argsman.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-segwitheight=<n>", "Set the activation height of segwit. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
argsman.AddArg("-vbparams=deployment:start:end[:min_activation_height]", "Use given start/end times and min_activation_height for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
argsman.AddArg("-signet", "Use the signet chain. Equivalent to -chain=signet. Note that the network is defined by the -signetchallenge parameter", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
diff --git a/src/chainparamsseeds.h b/src/chainparamsseeds.h
index 08587e2b37..953a09d5e7 100644
--- a/src/chainparamsseeds.h
+++ b/src/chainparamsseeds.h
@@ -683,14 +683,22 @@ static const uint8_t chainparams_seed_main[] = {
0x04,0x20,0x98,0xc6,0x44,0x27,0x90,0x41,0xa6,0x98,0xf9,0x25,0x6c,0x59,0x0f,0x06,0x6d,0x44,0x59,0x0e,0xb2,0x46,0xb0,0xa4,0x37,0x88,0x69,0x8f,0xc1,0x32,0xcd,0x9f,0x15,0xd7,0x20,0x8d,
0x04,0x20,0xaa,0x3a,0x16,0x86,0xea,0x59,0x09,0x04,0x78,0xe5,0x10,0x92,0xe1,0x1d,0xad,0xf7,0x56,0x2b,0xac,0xb0,0x97,0x29,0x63,0x30,0xf4,0x1b,0xcf,0xde,0xf3,0x28,0x0a,0x29,0x20,0x8d,
0x04,0x20,0xbc,0x27,0xae,0x89,0xc1,0x67,0x73,0x0a,0x08,0x02,0xdf,0xb7,0xcc,0x94,0xc7,0x9f,0xf4,0x72,0x7a,0x9b,0x20,0x0c,0x5c,0x11,0x3d,0x22,0xd6,0x13,0x88,0x66,0x74,0xbf,0x20,0x8d,
- 0x05,0x20,0xfe,0x97,0xba,0x09,0x2a,0xa4,0x85,0x10,0xa1,0x04,0x7b,0x88,0x7a,0x5a,0x06,0x53,0x71,0x93,0x3b,0xf9,0xa2,0x2f,0xd9,0xe3,0x8f,0xa5,0xa2,0xac,0x1e,0x6c,0x6c,0x8c,0x20,0x8d,
- 0x05,0x20,0x17,0x0c,0x56,0xce,0x72,0xa5,0xa0,0xe6,0x23,0x06,0xa3,0xc7,0x08,0x43,0x18,0xee,0x3a,0x46,0x35,0x5d,0x17,0xf6,0x78,0x96,0xa0,0x9c,0x51,0xef,0xbe,0x23,0xfd,0x71,0x20,0x8d,
- 0x05,0x20,0x31,0x0f,0x30,0x0b,0x9d,0x70,0x0c,0x7c,0xf7,0x98,0x7e,0x1c,0xf4,0x33,0xdc,0x64,0x17,0xf7,0x00,0x7a,0x0c,0x04,0xb5,0x83,0xfc,0x5f,0xa6,0x52,0x39,0x79,0x63,0x87,0x20,0x8d,
- 0x05,0x20,0x3e,0xe3,0xe0,0xa9,0xbc,0xf4,0x2e,0x59,0xd9,0x20,0xee,0xdf,0x74,0x61,0x4d,0x99,0x0c,0x5c,0x15,0x30,0x9b,0x72,0x16,0x79,0x15,0xf4,0x7a,0xca,0x34,0xcc,0x81,0x99,0x20,0x8d,
- 0x05,0x20,0x3b,0x42,0x1c,0x25,0xf7,0xbf,0x79,0xed,0x6d,0x7d,0xef,0x65,0x30,0x7d,0xee,0x16,0x37,0x22,0x72,0x43,0x33,0x28,0x40,0xa3,0xaa,0xf4,0x48,0x49,0x67,0xb1,0x4b,0xfd,0x20,0x8d,
- 0x05,0x20,0x7a,0x65,0xf7,0x47,0x42,0x9d,0x66,0x42,0x3b,0xb3,0xa7,0x03,0x6c,0x46,0x78,0x19,0x28,0x78,0x1e,0xa3,0x7c,0x67,0x44,0xb7,0x83,0x05,0xe3,0xfe,0xa5,0xe4,0x0a,0x6e,0x20,0x8d,
- 0x05,0x20,0xb5,0x83,0x6f,0xb6,0x11,0xd8,0x0e,0xa8,0x57,0xda,0x15,0x20,0x5b,0x1a,0x6d,0x21,0x15,0x5a,0xbd,0xb4,0x17,0x11,0xc2,0xfb,0x0e,0xfc,0xde,0xe8,0x26,0x56,0xa8,0xac,0x20,0x8d,
- 0x05,0x20,0xcc,0xaf,0x6c,0x3b,0xd0,0x13,0x76,0x23,0xc3,0x36,0xbb,0x64,0x4a,0x4a,0x06,0x93,0x69,0x6d,0xb0,0x10,0x6e,0x66,0xa4,0x61,0xf8,0x2d,0xe7,0x80,0x72,0x4d,0x53,0x94,0x20,0x8d,
+ 0x05,0x20,0xe1,0xd6,0xb8,0xfa,0xdd,0xeb,0x03,0x32,0x30,0x3b,0x20,0x6a,0xbc,0xaf,0x99,0x4f,0xa0,0xa2,0x72,0x48,0xfe,0x44,0xe0,0xf6,0x03,0xc1,0xbd,0xb6,0x24,0xd0,0xf6,0xb8,0x00,0x00,
+ 0x05,0x20,0xf4,0xb7,0xb4,0xcd,0xf5,0xb6,0x54,0x82,0x27,0x6d,0x29,0x7b,0x06,0x7f,0x52,0x59,0xa0,0xb4,0xdc,0xf7,0x6f,0xb4,0x71,0xcf,0xcc,0xfb,0x6b,0x86,0xc2,0x57,0x80,0xc6,0x00,0x00,
+ 0x05,0x20,0x07,0x61,0x26,0xd7,0x6c,0x05,0xbf,0xf6,0x2d,0x8c,0xca,0xc4,0x65,0xd3,0xd3,0xb2,0x49,0xe9,0xcc,0x53,0x1e,0xca,0x77,0x84,0xb6,0x10,0x5e,0xc2,0x5a,0xfe,0x28,0xb3,0x00,0x00,
+ 0x05,0x20,0x0a,0x26,0x27,0x45,0xb1,0x1e,0xfc,0x27,0x03,0x32,0x0e,0x65,0x9e,0x3c,0x64,0x0e,0x33,0x50,0x3d,0x6c,0x90,0x17,0x0e,0x29,0xee,0x5a,0x58,0xdf,0x08,0xde,0xbf,0x73,0x00,0x00,
+ 0x05,0x20,0x17,0x0c,0x56,0xce,0x72,0xa5,0xa0,0xe6,0x23,0x06,0xa3,0xc7,0x08,0x43,0x18,0xee,0x3a,0x46,0x35,0x5d,0x17,0xf6,0x78,0x96,0xa0,0x9c,0x51,0xef,0xbe,0x23,0xfd,0x71,0x00,0x00,
+ 0x05,0x20,0x19,0xe7,0x0d,0x3f,0xfe,0x9e,0x0e,0x8e,0x73,0x40,0x40,0xc3,0xba,0x8f,0x41,0xaf,0xf1,0x7b,0xa6,0x83,0x1b,0xc3,0xa4,0xe0,0x6d,0x6c,0x57,0xa7,0x36,0x5d,0x09,0xce,0x00,0x00,
+ 0x05,0x20,0x31,0x0f,0x30,0x0b,0x9d,0x70,0x0c,0x7c,0xf7,0x98,0x7e,0x1c,0xf4,0x33,0xdc,0x64,0x17,0xf7,0x00,0x7a,0x0c,0x04,0xb5,0x83,0xfc,0x5f,0xa6,0x52,0x39,0x79,0x63,0x87,0x00,0x00,
+ 0x05,0x20,0x3e,0xe3,0xe0,0xa9,0xbc,0xf4,0x2e,0x59,0xd9,0x20,0xee,0xdf,0x74,0x61,0x4d,0x99,0x0c,0x5c,0x15,0x30,0x9b,0x72,0x16,0x79,0x15,0xf4,0x7a,0xca,0x34,0xcc,0x81,0x99,0x00,0x00,
+ 0x05,0x20,0x3b,0x42,0x1c,0x25,0xf7,0xbf,0x79,0xed,0x6d,0x7d,0xef,0x65,0x30,0x7d,0xee,0x16,0x37,0x22,0x72,0x43,0x33,0x28,0x40,0xa3,0xaa,0xf4,0x48,0x49,0x67,0xb1,0x4b,0xfd,0x00,0x00,
+ 0x05,0x20,0x46,0xce,0x21,0x81,0x48,0xce,0xa7,0x8a,0x98,0xca,0xb1,0x0b,0x51,0xa5,0xc8,0xff,0x39,0xc5,0x1a,0xa3,0xd3,0x02,0x32,0xa3,0x29,0xad,0x79,0xb8,0x7f,0x34,0x51,0x33,0x00,0x00,
+ 0x05,0x20,0x4e,0x77,0x2e,0x12,0x91,0x67,0x6b,0x94,0xc4,0x92,0x2f,0x19,0x67,0x7d,0xcd,0x47,0x02,0xad,0xf8,0x60,0x72,0xed,0x73,0xf1,0x10,0x99,0x2c,0x05,0x61,0x66,0x55,0xd9,0x00,0x00,
+ 0x05,0x20,0x53,0x94,0xa6,0x3e,0x14,0x82,0xd4,0xf9,0xd3,0xa7,0x53,0x33,0x05,0xce,0x72,0x64,0xed,0x74,0x09,0x63,0x8f,0x24,0xef,0xda,0x12,0xa1,0x55,0xe0,0xd8,0xbb,0xd3,0x58,0x00,0x00,
+ 0x05,0x20,0x55,0x62,0x32,0x7d,0x82,0x32,0x4f,0x9d,0xdf,0x24,0x5c,0xed,0x8e,0x1a,0x5a,0x8d,0xc6,0x50,0xb4,0x32,0xd5,0x85,0xef,0xb0,0xfa,0x7c,0xf9,0xbb,0x25,0x89,0x6b,0x03,0x00,0x00,
+ 0x05,0x20,0x91,0xcf,0xa2,0x5b,0x04,0x33,0x69,0x66,0xb0,0x72,0x27,0x54,0xbe,0xcd,0xd8,0x08,0xeb,0x95,0x55,0x5a,0xc2,0x79,0x91,0x3a,0xd9,0xf2,0x2c,0x73,0x9f,0x78,0x50,0xca,0x00,0x00,
+ 0x05,0x20,0xb5,0x83,0x6f,0xb6,0x11,0xd8,0x0e,0xa8,0x57,0xda,0x15,0x20,0x5b,0x1a,0x6d,0x21,0x15,0x5a,0xbd,0xb4,0x17,0x11,0xc2,0xfb,0x0e,0xfc,0xde,0xe8,0x26,0x56,0xa8,0xac,0x00,0x00,
+ 0x05,0x20,0xcc,0xaf,0x6c,0x3b,0xd0,0x13,0x76,0x23,0xc3,0x36,0xbb,0x64,0x4a,0x4a,0x06,0x93,0x69,0x6d,0xb0,0x10,0x6e,0x66,0xa4,0x61,0xf8,0x2d,0xe7,0x80,0x72,0x4d,0x53,0x94,0x00,0x00,
};
static const uint8_t chainparams_seed_test[] = {
diff --git a/src/clientversion.cpp b/src/clientversion.cpp
index 29c38e2d3b..f97e4097e8 100644
--- a/src/clientversion.cpp
+++ b/src/clientversion.cpp
@@ -30,8 +30,10 @@ const std::string CLIENT_NAME("Satoshi");
#define BUILD_DESC BUILD_GIT_TAG
#define BUILD_SUFFIX ""
#else
- #define BUILD_DESC "v" STRINGIZE(CLIENT_VERSION_MAJOR) "." STRINGIZE(CLIENT_VERSION_MINOR) "." STRINGIZE(CLIENT_VERSION_BUILD)
- #ifdef BUILD_GIT_COMMIT
+ #define BUILD_DESC "v" PACKAGE_VERSION
+ #if CLIENT_VERSION_IS_RELEASE
+ #define BUILD_SUFFIX ""
+ #elif defined(BUILD_GIT_COMMIT)
#define BUILD_SUFFIX "-" BUILD_GIT_COMMIT
#elif defined(GIT_COMMIT_ID)
#define BUILD_SUFFIX "-g" GIT_COMMIT_ID
@@ -40,8 +42,6 @@ const std::string CLIENT_NAME("Satoshi");
#endif
#endif
-const std::string CLIENT_BUILD(BUILD_DESC BUILD_SUFFIX);
-
static std::string FormatVersion(int nVersion)
{
return strprintf("%d.%d.%d", nVersion / 10000, (nVersion / 100) % 100, nVersion % 100);
@@ -49,6 +49,7 @@ static std::string FormatVersion(int nVersion)
std::string FormatFullVersion()
{
+ static const std::string CLIENT_BUILD(BUILD_DESC BUILD_SUFFIX);
return CLIENT_BUILD;
}
diff --git a/src/clientversion.h b/src/clientversion.h
index 0ed3f68094..a3e6233437 100644
--- a/src/clientversion.h
+++ b/src/clientversion.h
@@ -36,7 +36,6 @@ static const int CLIENT_VERSION =
+ 1 * CLIENT_VERSION_BUILD;
extern const std::string CLIENT_NAME;
-extern const std::string CLIENT_BUILD;
std::string FormatFullVersion();
diff --git a/src/consensus/params.h b/src/consensus/params.h
index 174f4677fa..77bf7fd0d8 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -11,8 +11,11 @@
namespace Consensus {
-enum BuriedDeployment : int16_t
-{
+/**
+ * A buried deployment is one where the height of the activation has been hardcoded into
+ * the client implementation long after the consensus change has activated. See BIP 90.
+ */
+enum BuriedDeployment : int16_t {
// buried deployments get negative values to avoid overlap with DeploymentPos
DEPLOYMENT_HEIGHTINCB = std::numeric_limits<int16_t>::min(),
DEPLOYMENT_CLTV,
@@ -20,16 +23,15 @@ enum BuriedDeployment : int16_t
DEPLOYMENT_CSV,
DEPLOYMENT_SEGWIT,
};
-constexpr bool ValidDeployment(BuriedDeployment dep) { return DEPLOYMENT_HEIGHTINCB <= dep && dep <= DEPLOYMENT_SEGWIT; }
+constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_SEGWIT; }
-enum DeploymentPos : uint16_t
-{
+enum DeploymentPos : uint16_t {
DEPLOYMENT_TESTDUMMY,
DEPLOYMENT_TAPROOT, // Deployment of Schnorr/Taproot (BIPs 340-342)
// NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp
MAX_VERSION_BITS_DEPLOYMENTS
};
-constexpr bool ValidDeployment(DeploymentPos dep) { return DEPLOYMENT_TESTDUMMY <= dep && dep <= DEPLOYMENT_TAPROOT; }
+constexpr bool ValidDeployment(DeploymentPos dep) { return dep < MAX_VERSION_BITS_DEPLOYMENTS; }
/**
* Struct for each individual consensus rule change using BIP9.
diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp
index 88d8da6ed5..0ab790ccdc 100644
--- a/src/consensus/tx_verify.cpp
+++ b/src/consensus/tx_verify.cpp
@@ -144,7 +144,7 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in
return nSigOps;
}
-int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags)
+int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, uint32_t flags)
{
int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR;
diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h
index d5fd43e131..264433c33d 100644
--- a/src/consensus/tx_verify.h
+++ b/src/consensus/tx_verify.h
@@ -49,10 +49,10 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& ma
* Compute total signature operation cost of a transaction.
* @param[in] tx Transaction for which we are computing the cost
* @param[in] inputs Map of previous transactions that have outputs we're spending
- * @param[out] flags Script verification flags
+ * @param[in] flags Script verification flags
* @return Total signature operation cost of tx
*/
-int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags);
+int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, uint32_t flags);
/**
* Check if transaction is final and can be included in a block with the
diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp
index 0582a60c4f..b73b22a2b8 100644
--- a/src/crypto/chacha_poly_aead.cpp
+++ b/src/crypto/chacha_poly_aead.cpp
@@ -31,8 +31,9 @@ ChaCha20Poly1305AEAD::ChaCha20Poly1305AEAD(const unsigned char* K_1, size_t K_1_
{
assert(K_1_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
assert(K_2_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_main.SetKey(K_1, CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_header.SetKey(K_2, CHACHA20_POLY1305_AEAD_KEY_LEN);
+
+ m_chacha_header.SetKey(K_1, CHACHA20_POLY1305_AEAD_KEY_LEN);
+ m_chacha_main.SetKey(K_2, CHACHA20_POLY1305_AEAD_KEY_LEN);
// set the cached sequence number to uint64 max which hints for an unset cache.
// we can't hit uint64 max since the rekey rule (which resets the sequence number) is 1GB
diff --git a/src/deploymentstatus.cpp b/src/deploymentstatus.cpp
index 9007800421..bba86639a3 100644
--- a/src/deploymentstatus.cpp
+++ b/src/deploymentstatus.cpp
@@ -7,6 +7,8 @@
#include <consensus/params.h>
#include <versionbits.h>
+#include <type_traits>
+
VersionBitsCache g_versionbitscache;
/* Basic sanity checking for BuriedDeployment/DeploymentPos enums and
@@ -15,3 +17,18 @@ VersionBitsCache g_versionbitscache;
static_assert(ValidDeployment(Consensus::DEPLOYMENT_TESTDUMMY), "sanity check of DeploymentPos failed (TESTDUMMY not valid)");
static_assert(!ValidDeployment(Consensus::MAX_VERSION_BITS_DEPLOYMENTS), "sanity check of DeploymentPos failed (MAX value considered valid)");
static_assert(!ValidDeployment(static_cast<Consensus::BuriedDeployment>(Consensus::DEPLOYMENT_TESTDUMMY)), "sanity check of BuriedDeployment failed (overlaps with DeploymentPos)");
+
+/* ValidDeployment only checks upper bounds for ensuring validity.
+ * This checks that the lowest possible value or the type is also a
+ * (specific) valid deployment so that lower bounds don't need to be checked.
+ */
+
+template<typename T, T x>
+static constexpr bool is_minimum()
+{
+ using U = typename std::underlying_type<T>::type;
+ return x == std::numeric_limits<U>::min();
+}
+
+static_assert(is_minimum<Consensus::BuriedDeployment, Consensus::DEPLOYMENT_HEIGHTINCB>(), "heightincb is not minimum value for BuriedDeployment");
+static_assert(is_minimum<Consensus::DeploymentPos, Consensus::DEPLOYMENT_TESTDUMMY>(), "testdummy is not minimum value for DeploymentPos");
diff --git a/src/deploymentstatus.h b/src/deploymentstatus.h
index 84c5e54698..f95c5996f5 100644
--- a/src/deploymentstatus.h
+++ b/src/deploymentstatus.h
@@ -49,7 +49,7 @@ inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::Buried
inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::DeploymentPos dep)
{
assert(Consensus::ValidDeployment(dep));
- return params.vDeployments[dep].nTimeout != 0;
+ return params.vDeployments[dep].nStartTime != Consensus::BIP9Deployment::NEVER_ACTIVE;
}
#endif // BITCOIN_DEPLOYMENTSTATUS_H
diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp
index 95886d3138..2d897f4c40 100644
--- a/src/dummywallet.cpp
+++ b/src/dummywallet.cpp
@@ -28,6 +28,7 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
"-addresstype",
"-avoidpartialspends",
"-changetype",
+ "-consolidatefeerate=<amt>",
"-disablewallet",
"-discardfee=<amt>",
"-fallbackfee=<amt>",
diff --git a/src/fs.cpp b/src/fs.cpp
index 4f20ca4d28..b9b3c46d8d 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -154,7 +154,10 @@ std::string get_filesystem_error_message(const fs::filesystem_error& e)
#ifdef __GLIBCXX__
// reference: https://github.com/gcc-mirror/gcc/blob/gcc-7_3_0-release/libstdc%2B%2B-v3/include/std/fstream#L270
-
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wswitch"
+#endif
static std::string openmodeToStr(std::ios_base::openmode mode)
{
switch (mode & ~std::ios_base::ate) {
@@ -192,6 +195,9 @@ static std::string openmodeToStr(std::ios_base::openmode mode)
return std::string();
}
}
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
void ifstream::open(const fs::path& p, std::ios_base::openmode mode)
{
@@ -242,7 +248,11 @@ void ofstream::close()
}
#else // __GLIBCXX__
+#if BOOST_VERSION >= 107700
+static_assert(sizeof(*BOOST_FILESYSTEM_C_STR(fs::path())) == sizeof(wchar_t),
+#else
static_assert(sizeof(*fs::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t),
+#endif // BOOST_VERSION >= 107700
"Warning: This build is using boost::filesystem ofstream and ifstream "
"implementations which will fail to open paths containing multibyte "
"characters. You should delete this static_assert to ignore this warning, "
diff --git a/src/hash.cpp b/src/hash.cpp
index cc46043c2b..3465caa3a9 100644
--- a/src/hash.cpp
+++ b/src/hash.cpp
@@ -47,8 +47,10 @@ unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vData
switch (vDataToHash.size() & 3) {
case 3:
k1 ^= tail[2] << 16;
+ [[fallthrough]];
case 2:
k1 ^= tail[1] << 8;
+ [[fallthrough]];
case 1:
k1 ^= tail[0];
k1 *= c1;
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index e11e4acb5c..9ae592be79 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -10,6 +10,7 @@
#include <rpc/protocol.h>
#include <rpc/server.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/system.h>
#include <util/translation.h>
#include <walletinitinterface.h>
@@ -22,7 +23,7 @@
#include <set>
#include <string>
-#include <boost/algorithm/string.hpp> // boost::trim
+#include <boost/algorithm/string.hpp>
/** WWW-Authenticate to present with 401 Unauthorized response */
static const char* WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\"";
@@ -130,8 +131,7 @@ static bool RPCAuthorized(const std::string& strAuth, std::string& strAuthUserna
return false;
if (strAuth.substr(0, 6) != "Basic ")
return false;
- std::string strUserPass64 = strAuth.substr(6);
- boost::trim(strUserPass64);
+ std::string strUserPass64 = TrimString(strAuth.substr(6));
std::string strUserPass = DecodeBase64(strUserPass64);
if (strUserPass.find(':') != std::string::npos)
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 2ae164633b..5e7e42fb77 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -159,7 +159,7 @@ bool Session::Accept(Connection& conn)
const std::string& peer_dest =
conn.sock->RecvUntilTerminator('\n', MAX_WAIT_FOR_IO, *m_interrupt, MAX_MSG_SIZE);
- conn.peer = CService(DestB64ToAddr(peer_dest), Params().GetDefaultPort());
+ conn.peer = CService(DestB64ToAddr(peer_dest), I2P_SAM31_PORT);
return true;
}
@@ -172,6 +172,13 @@ bool Session::Accept(Connection& conn)
bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error)
{
+ // Refuse connecting to arbitrary ports. We don't specify any destination port to the SAM proxy
+ // when connecting (SAM 3.1 does not use ports) and it forces/defaults it to I2P_SAM31_PORT.
+ if (to.GetPort() != I2P_SAM31_PORT) {
+ proxy_error = false;
+ return false;
+ }
+
proxy_error = true;
std::string session_id;
@@ -366,7 +373,7 @@ void Session::CreateIfNotCreatedAlready()
SendRequestAndGetReply(*sock, strprintf("SESSION CREATE STYLE=STREAM ID=%s DESTINATION=%s",
session_id, private_key_b64));
- m_my_addr = CService(DestBinToAddr(MyDestination()), Params().GetDefaultPort());
+ m_my_addr = CService(DestBinToAddr(MyDestination()), I2P_SAM31_PORT);
m_session_id = session_id;
m_control_sock = std::move(sock);
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index 8c6046489b..9ab9209ca4 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -24,14 +24,14 @@ struct DBVal {
uint64_t bogo_size;
CAmount total_amount;
CAmount total_subsidy;
- CAmount block_unspendable_amount;
- CAmount block_prevout_spent_amount;
- CAmount block_new_outputs_ex_coinbase_amount;
- CAmount block_coinbase_amount;
- CAmount unspendables_genesis_block;
- CAmount unspendables_bip30;
- CAmount unspendables_scripts;
- CAmount unspendables_unclaimed_rewards;
+ CAmount total_unspendable_amount;
+ CAmount total_prevout_spent_amount;
+ CAmount total_new_outputs_ex_coinbase_amount;
+ CAmount total_coinbase_amount;
+ CAmount total_unspendables_genesis_block;
+ CAmount total_unspendables_bip30;
+ CAmount total_unspendables_scripts;
+ CAmount total_unspendables_unclaimed_rewards;
SERIALIZE_METHODS(DBVal, obj)
{
@@ -40,14 +40,14 @@ struct DBVal {
READWRITE(obj.bogo_size);
READWRITE(obj.total_amount);
READWRITE(obj.total_subsidy);
- READWRITE(obj.block_unspendable_amount);
- READWRITE(obj.block_prevout_spent_amount);
- READWRITE(obj.block_new_outputs_ex_coinbase_amount);
- READWRITE(obj.block_coinbase_amount);
- READWRITE(obj.unspendables_genesis_block);
- READWRITE(obj.unspendables_bip30);
- READWRITE(obj.unspendables_scripts);
- READWRITE(obj.unspendables_unclaimed_rewards);
+ READWRITE(obj.total_unspendable_amount);
+ READWRITE(obj.total_prevout_spent_amount);
+ READWRITE(obj.total_new_outputs_ex_coinbase_amount);
+ READWRITE(obj.total_coinbase_amount);
+ READWRITE(obj.total_unspendables_genesis_block);
+ READWRITE(obj.total_unspendables_bip30);
+ READWRITE(obj.total_unspendables_scripts);
+ READWRITE(obj.total_unspendables_unclaimed_rewards);
}
};
@@ -122,9 +122,12 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
uint256 expected_block_hash{pindex->pprev->GetBlockHash()};
if (read_out.first != expected_block_hash) {
+ LogPrintf("WARNING: previous block header belongs to unexpected block %s; expected %s\n",
+ read_out.first.ToString(), expected_block_hash.ToString());
+
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
- return error("%s: previous block header belongs to unexpected block %s; expected %s",
- __func__, read_out.first.ToString(), expected_block_hash.ToString());
+ return error("%s: previous block header not found; expected %s",
+ __func__, expected_block_hash.ToString());
}
}
@@ -138,29 +141,29 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
// Skip duplicate txid coinbase transactions (BIP30).
if (is_bip30_block && tx->IsCoinBase()) {
- m_block_unspendable_amount += block_subsidy;
- m_unspendables_bip30 += block_subsidy;
+ m_total_unspendable_amount += block_subsidy;
+ m_total_unspendables_bip30 += block_subsidy;
continue;
}
- for (size_t j = 0; j < tx->vout.size(); ++j) {
+ for (uint32_t j = 0; j < tx->vout.size(); ++j) {
const CTxOut& out{tx->vout[j]};
Coin coin{out, pindex->nHeight, tx->IsCoinBase()};
- COutPoint outpoint{tx->GetHash(), static_cast<uint32_t>(j)};
+ COutPoint outpoint{tx->GetHash(), j};
// Skip unspendable coins
if (coin.out.scriptPubKey.IsUnspendable()) {
- m_block_unspendable_amount += coin.out.nValue;
- m_unspendables_scripts += coin.out.nValue;
+ m_total_unspendable_amount += coin.out.nValue;
+ m_total_unspendables_scripts += coin.out.nValue;
continue;
}
m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin)));
if (tx->IsCoinBase()) {
- m_block_coinbase_amount += coin.out.nValue;
+ m_total_coinbase_amount += coin.out.nValue;
} else {
- m_block_new_outputs_ex_coinbase_amount += coin.out.nValue;
+ m_total_new_outputs_ex_coinbase_amount += coin.out.nValue;
}
++m_transaction_output_count;
@@ -178,7 +181,7 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin)));
- m_block_prevout_spent_amount += coin.out.nValue;
+ m_total_prevout_spent_amount += coin.out.nValue;
--m_transaction_output_count;
m_total_amount -= coin.out.nValue;
@@ -188,17 +191,17 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
}
} else {
// genesis block
- m_block_unspendable_amount += block_subsidy;
- m_unspendables_genesis_block += block_subsidy;
+ m_total_unspendable_amount += block_subsidy;
+ m_total_unspendables_genesis_block += block_subsidy;
}
// If spent prevouts + block subsidy are still a higher amount than
// new outputs + coinbase + current unspendable amount this means
// the miner did not claim the full block reward. Unclaimed block
// rewards are also unspendable.
- const CAmount unclaimed_rewards{(m_block_prevout_spent_amount + m_total_subsidy) - (m_block_new_outputs_ex_coinbase_amount + m_block_coinbase_amount + m_block_unspendable_amount)};
- m_block_unspendable_amount += unclaimed_rewards;
- m_unspendables_unclaimed_rewards += unclaimed_rewards;
+ const CAmount unclaimed_rewards{(m_total_prevout_spent_amount + m_total_subsidy) - (m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount)};
+ m_total_unspendable_amount += unclaimed_rewards;
+ m_total_unspendables_unclaimed_rewards += unclaimed_rewards;
std::pair<uint256, DBVal> value;
value.first = pindex->GetBlockHash();
@@ -206,20 +209,23 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
value.second.bogo_size = m_bogo_size;
value.second.total_amount = m_total_amount;
value.second.total_subsidy = m_total_subsidy;
- value.second.block_unspendable_amount = m_block_unspendable_amount;
- value.second.block_prevout_spent_amount = m_block_prevout_spent_amount;
- value.second.block_new_outputs_ex_coinbase_amount = m_block_new_outputs_ex_coinbase_amount;
- value.second.block_coinbase_amount = m_block_coinbase_amount;
- value.second.unspendables_genesis_block = m_unspendables_genesis_block;
- value.second.unspendables_bip30 = m_unspendables_bip30;
- value.second.unspendables_scripts = m_unspendables_scripts;
- value.second.unspendables_unclaimed_rewards = m_unspendables_unclaimed_rewards;
+ value.second.total_unspendable_amount = m_total_unspendable_amount;
+ value.second.total_prevout_spent_amount = m_total_prevout_spent_amount;
+ value.second.total_new_outputs_ex_coinbase_amount = m_total_new_outputs_ex_coinbase_amount;
+ value.second.total_coinbase_amount = m_total_coinbase_amount;
+ value.second.total_unspendables_genesis_block = m_total_unspendables_genesis_block;
+ value.second.total_unspendables_bip30 = m_total_unspendables_bip30;
+ value.second.total_unspendables_scripts = m_total_unspendables_scripts;
+ value.second.total_unspendables_unclaimed_rewards = m_total_unspendables_unclaimed_rewards;
uint256 out;
m_muhash.Finalize(out);
value.second.muhash = out;
- return m_db->Write(DBHeightKey(pindex->nHeight), value) && m_db->Write(DB_MUHASH, m_muhash);
+ CDBBatch batch(*m_db);
+ batch.Write(DBHeightKey(pindex->nHeight), value);
+ batch.Write(DB_MUHASH, m_muhash);
+ return m_db->WriteBatch(batch);
}
static bool CopyHeightIndexToHashIndex(CDBIterator& db_it, CDBBatch& batch,
@@ -317,14 +323,14 @@ bool CoinStatsIndex::LookUpStats(const CBlockIndex* block_index, CCoinsStats& co
coins_stats.nBogoSize = entry.bogo_size;
coins_stats.nTotalAmount = entry.total_amount;
coins_stats.total_subsidy = entry.total_subsidy;
- coins_stats.block_unspendable_amount = entry.block_unspendable_amount;
- coins_stats.block_prevout_spent_amount = entry.block_prevout_spent_amount;
- coins_stats.block_new_outputs_ex_coinbase_amount = entry.block_new_outputs_ex_coinbase_amount;
- coins_stats.block_coinbase_amount = entry.block_coinbase_amount;
- coins_stats.unspendables_genesis_block = entry.unspendables_genesis_block;
- coins_stats.unspendables_bip30 = entry.unspendables_bip30;
- coins_stats.unspendables_scripts = entry.unspendables_scripts;
- coins_stats.unspendables_unclaimed_rewards = entry.unspendables_unclaimed_rewards;
+ coins_stats.total_unspendable_amount = entry.total_unspendable_amount;
+ coins_stats.total_prevout_spent_amount = entry.total_prevout_spent_amount;
+ coins_stats.total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount;
+ coins_stats.total_coinbase_amount = entry.total_coinbase_amount;
+ coins_stats.total_unspendables_genesis_block = entry.total_unspendables_genesis_block;
+ coins_stats.total_unspendables_bip30 = entry.total_unspendables_bip30;
+ coins_stats.total_unspendables_scripts = entry.total_unspendables_scripts;
+ coins_stats.total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards;
return true;
}
@@ -341,33 +347,31 @@ bool CoinStatsIndex::Init()
}
}
- if (BaseIndex::Init()) {
- const CBlockIndex* pindex{CurrentIndex()};
+ if (!BaseIndex::Init()) return false;
- if (pindex) {
- DBVal entry;
- if (!LookUpOne(*m_db, pindex, entry)) {
- return false;
- }
+ const CBlockIndex* pindex{CurrentIndex()};
- m_transaction_output_count = entry.transaction_output_count;
- m_bogo_size = entry.bogo_size;
- m_total_amount = entry.total_amount;
- m_total_subsidy = entry.total_subsidy;
- m_block_unspendable_amount = entry.block_unspendable_amount;
- m_block_prevout_spent_amount = entry.block_prevout_spent_amount;
- m_block_new_outputs_ex_coinbase_amount = entry.block_new_outputs_ex_coinbase_amount;
- m_block_coinbase_amount = entry.block_coinbase_amount;
- m_unspendables_genesis_block = entry.unspendables_genesis_block;
- m_unspendables_bip30 = entry.unspendables_bip30;
- m_unspendables_scripts = entry.unspendables_scripts;
- m_unspendables_unclaimed_rewards = entry.unspendables_unclaimed_rewards;
+ if (pindex) {
+ DBVal entry;
+ if (!LookUpOne(*m_db, pindex, entry)) {
+ return false;
}
- return true;
+ m_transaction_output_count = entry.transaction_output_count;
+ m_bogo_size = entry.bogo_size;
+ m_total_amount = entry.total_amount;
+ m_total_subsidy = entry.total_subsidy;
+ m_total_unspendable_amount = entry.total_unspendable_amount;
+ m_total_prevout_spent_amount = entry.total_prevout_spent_amount;
+ m_total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount;
+ m_total_coinbase_amount = entry.total_coinbase_amount;
+ m_total_unspendables_genesis_block = entry.total_unspendables_genesis_block;
+ m_total_unspendables_bip30 = entry.total_unspendables_bip30;
+ m_total_unspendables_scripts = entry.total_unspendables_scripts;
+ m_total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards;
}
- return false;
+ return true;
}
// Reverse a single block as part of a reorg
@@ -391,9 +395,12 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
uint256 expected_block_hash{pindex->pprev->GetBlockHash()};
if (read_out.first != expected_block_hash) {
+ LogPrintf("WARNING: previous block header belongs to unexpected block %s; expected %s\n",
+ read_out.first.ToString(), expected_block_hash.ToString());
+
if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) {
- return error("%s: previous block header belongs to unexpected block %s; expected %s",
- __func__, read_out.first.ToString(), expected_block_hash.ToString());
+ return error("%s: previous block header not found; expected %s",
+ __func__, expected_block_hash.ToString());
}
}
}
@@ -402,24 +409,24 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
for (size_t i = 0; i < block.vtx.size(); ++i) {
const auto& tx{block.vtx.at(i)};
- for (size_t j = 0; j < tx->vout.size(); ++j) {
+ for (uint32_t j = 0; j < tx->vout.size(); ++j) {
const CTxOut& out{tx->vout[j]};
- COutPoint outpoint{tx->GetHash(), static_cast<uint32_t>(j)};
+ COutPoint outpoint{tx->GetHash(), j};
Coin coin{out, pindex->nHeight, tx->IsCoinBase()};
// Skip unspendable coins
if (coin.out.scriptPubKey.IsUnspendable()) {
- m_block_unspendable_amount -= coin.out.nValue;
- m_unspendables_scripts -= coin.out.nValue;
+ m_total_unspendable_amount -= coin.out.nValue;
+ m_total_unspendables_scripts -= coin.out.nValue;
continue;
}
m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin)));
if (tx->IsCoinBase()) {
- m_block_coinbase_amount -= coin.out.nValue;
+ m_total_coinbase_amount -= coin.out.nValue;
} else {
- m_block_new_outputs_ex_coinbase_amount -= coin.out.nValue;
+ m_total_new_outputs_ex_coinbase_amount -= coin.out.nValue;
}
--m_transaction_output_count;
@@ -437,7 +444,7 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin)));
- m_block_prevout_spent_amount -= coin.out.nValue;
+ m_total_prevout_spent_amount -= coin.out.nValue;
m_transaction_output_count++;
m_total_amount += coin.out.nValue;
@@ -446,9 +453,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
}
}
- const CAmount unclaimed_rewards{(m_block_new_outputs_ex_coinbase_amount + m_block_coinbase_amount + m_block_unspendable_amount) - (m_block_prevout_spent_amount + m_total_subsidy)};
- m_block_unspendable_amount -= unclaimed_rewards;
- m_unspendables_unclaimed_rewards -= unclaimed_rewards;
+ const CAmount unclaimed_rewards{(m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount) - (m_total_prevout_spent_amount + m_total_subsidy)};
+ m_total_unspendable_amount -= unclaimed_rewards;
+ m_total_unspendables_unclaimed_rewards -= unclaimed_rewards;
// Check that the rolled back internal values are consistent with the DB read out
uint256 out;
@@ -459,14 +466,14 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex
Assert(m_total_amount == read_out.second.total_amount);
Assert(m_bogo_size == read_out.second.bogo_size);
Assert(m_total_subsidy == read_out.second.total_subsidy);
- Assert(m_block_unspendable_amount == read_out.second.block_unspendable_amount);
- Assert(m_block_prevout_spent_amount == read_out.second.block_prevout_spent_amount);
- Assert(m_block_new_outputs_ex_coinbase_amount == read_out.second.block_new_outputs_ex_coinbase_amount);
- Assert(m_block_coinbase_amount == read_out.second.block_coinbase_amount);
- Assert(m_unspendables_genesis_block == read_out.second.unspendables_genesis_block);
- Assert(m_unspendables_bip30 == read_out.second.unspendables_bip30);
- Assert(m_unspendables_scripts == read_out.second.unspendables_scripts);
- Assert(m_unspendables_unclaimed_rewards == read_out.second.unspendables_unclaimed_rewards);
+ Assert(m_total_unspendable_amount == read_out.second.total_unspendable_amount);
+ Assert(m_total_prevout_spent_amount == read_out.second.total_prevout_spent_amount);
+ Assert(m_total_new_outputs_ex_coinbase_amount == read_out.second.total_new_outputs_ex_coinbase_amount);
+ Assert(m_total_coinbase_amount == read_out.second.total_coinbase_amount);
+ Assert(m_total_unspendables_genesis_block == read_out.second.total_unspendables_genesis_block);
+ Assert(m_total_unspendables_bip30 == read_out.second.total_unspendables_bip30);
+ Assert(m_total_unspendables_scripts == read_out.second.total_unspendables_scripts);
+ Assert(m_total_unspendables_unclaimed_rewards == read_out.second.total_unspendables_unclaimed_rewards);
return m_db->Write(DB_MUHASH, m_muhash);
}
diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h
index 6149f9b4b3..a575b37c7c 100644
--- a/src/index/coinstatsindex.h
+++ b/src/index/coinstatsindex.h
@@ -25,14 +25,14 @@ private:
uint64_t m_bogo_size{0};
CAmount m_total_amount{0};
CAmount m_total_subsidy{0};
- CAmount m_block_unspendable_amount{0};
- CAmount m_block_prevout_spent_amount{0};
- CAmount m_block_new_outputs_ex_coinbase_amount{0};
- CAmount m_block_coinbase_amount{0};
- CAmount m_unspendables_genesis_block{0};
- CAmount m_unspendables_bip30{0};
- CAmount m_unspendables_scripts{0};
- CAmount m_unspendables_unclaimed_rewards{0};
+ CAmount m_total_unspendable_amount{0};
+ CAmount m_total_prevout_spent_amount{0};
+ CAmount m_total_new_outputs_ex_coinbase_amount{0};
+ CAmount m_total_coinbase_amount{0};
+ CAmount m_total_unspendables_genesis_block{0};
+ CAmount m_total_unspendables_bip30{0};
+ CAmount m_total_unspendables_scripts{0};
+ CAmount m_total_unspendables_unclaimed_rewards{0};
bool ReverseBlock(const CBlock& block, const CBlockIndex* pindex);
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 782e557478..cde9821f3d 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -204,7 +204,7 @@ bool TxIndex::Init()
// Attempt to migrate txindex from the old database to the new one. Even if
// chain_tip is null, the node could be reindexing and we still want to
// delete txindex records in the old database.
- if (!m_db->MigrateData(*pblocktree, m_chainstate->m_chain.GetLocator())) {
+ if (!m_db->MigrateData(*m_chainstate->m_blockman.m_block_tree_db, m_chainstate->m_chain.GetLocator())) {
return false;
}
diff --git a/src/init.cpp b/src/init.cpp
index ae96f510bc..b744298667 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -264,7 +264,6 @@ void Shutdown(NodeContext& node)
chainstate->ResetCoinsViews();
}
}
- pblocktree.reset();
}
for (const auto& client : node.chain_clients) {
client->stop();
@@ -427,7 +426,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-dnsseed", strprintf("Query for peer addresses via DNS lookup, if low on addresses (default: %u unless -connect used)", DEFAULT_DNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-fixedseeds", strprintf("Allow fixed seeds if DNS seeds don't provide peers (default: %u)", DEFAULT_FIXEDSEEDS), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
- argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u). This limit does not apply to connections manually added via -addnode or the addnode RPC, which have a separate limit of %u.", DEFAULT_MAX_PEER_CONNECTIONS, MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -442,7 +441,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port>. Nodes not using the default ports (default: %u, testnet: %u, signet: %u, regtest: %u) are unlikely to get incoming connections.", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port>. Nodes not using the default ports (default: %u, testnet: %u, signet: %u, regtest: %u) are unlikely to get incoming connections. Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -502,7 +501,8 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- argsman.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkaddrman=<n>", strprintf("Run addrman consistency checks every <n> operations. Use 0 to disable. (default: %u)", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkmempool=<n>", strprintf("Run mempool consistency checks every <n> transactions. Use 0 to disable. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block %s (default: %u)", defaultChainParams->Checkpoints().GetHeight(), DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
@@ -716,7 +716,7 @@ namespace { // Variables internal to initialization process only
int nMaxConnections;
int nUserMaxConnections;
int nFD;
-ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
+ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED | NODE_WITNESS);
int64_t peer_connect_timeout;
std::set<BlockFilterType> g_enabled_filter_types;
@@ -849,12 +849,22 @@ bool AppInitParameterInteraction(const ArgsManager& args)
return InitError(_("Prune mode is incompatible with -coinstatsindex."));
}
+ // If -forcednsseed is set to true, ensure -dnsseed has not been set to false
+ if (args.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED) && !args.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)){
+ return InitError(_("Cannot set -forcednsseed to true when setting -dnsseed to false."));
+ }
+
// -bind and -whitebind can't be set when not listening
size_t nUserBind = args.GetArgs("-bind").size() + args.GetArgs("-whitebind").size();
if (nUserBind != 0 && !args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
return InitError(Untranslated("Cannot set -bind or -whitebind together with -listen=0"));
}
+ // if listen=0, then disallow listenonion=1
+ if (!args.GetBoolArg("-listen", DEFAULT_LISTEN) && args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
+ return InitError(Untranslated("Cannot set -listen=0 together with -listenonion=1"));
+ }
+
// Make sure enough file descriptors are available
int nBind = std::max(nUserBind, size_t(1));
nUserMaxConnections = args.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
@@ -911,10 +921,11 @@ bool AppInitParameterInteraction(const ArgsManager& args)
// incremental relay fee sets the minimum feerate increase necessary for BIP 125 replacement in the mempool
// and the amount the mempool min fee increases above the feerate of txs evicted due to mempool limiting.
if (args.IsArgSet("-incrementalrelayfee")) {
- CAmount n = 0;
- if (!ParseMoney(args.GetArg("-incrementalrelayfee", ""), n))
+ if (std::optional<CAmount> inc_relay_fee = ParseMoney(args.GetArg("-incrementalrelayfee", ""))) {
+ ::incrementalRelayFee = CFeeRate{inc_relay_fee.value()};
+ } else {
return InitError(AmountErrMsg("incrementalrelayfee", args.GetArg("-incrementalrelayfee", "")));
- incrementalRelayFee = CFeeRate(n);
+ }
}
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
@@ -946,12 +957,12 @@ bool AppInitParameterInteraction(const ArgsManager& args)
}
if (args.IsArgSet("-minrelaytxfee")) {
- CAmount n = 0;
- if (!ParseMoney(args.GetArg("-minrelaytxfee", ""), n)) {
+ if (std::optional<CAmount> min_relay_fee = ParseMoney(args.GetArg("-minrelaytxfee", ""))) {
+ // High fee check is done afterward in CWallet::Create()
+ ::minRelayTxFee = CFeeRate{min_relay_fee.value()};
+ } else {
return InitError(AmountErrMsg("minrelaytxfee", args.GetArg("-minrelaytxfee", "")));
}
- // High fee check is done afterward in CWallet::Create()
- ::minRelayTxFee = CFeeRate(n);
} else if (incrementalRelayFee > ::minRelayTxFee) {
// Allow only setting incrementalRelayFee to control both
::minRelayTxFee = incrementalRelayFee;
@@ -961,18 +972,19 @@ bool AppInitParameterInteraction(const ArgsManager& args)
// Sanity check argument for min fee for including tx in block
// TODO: Harmonize which arguments need sanity checking and where that happens
if (args.IsArgSet("-blockmintxfee")) {
- CAmount n = 0;
- if (!ParseMoney(args.GetArg("-blockmintxfee", ""), n))
+ if (!ParseMoney(args.GetArg("-blockmintxfee", ""))) {
return InitError(AmountErrMsg("blockmintxfee", args.GetArg("-blockmintxfee", "")));
+ }
}
// Feerate used to define dust. Shouldn't be changed lightly as old
// implementations may inadvertently create non-standard transactions
if (args.IsArgSet("-dustrelayfee")) {
- CAmount n = 0;
- if (!ParseMoney(args.GetArg("-dustrelayfee", ""), n))
+ if (std::optional<CAmount> parsed = ParseMoney(args.GetArg("-dustrelayfee", ""))) {
+ dustRelayFee = CFeeRate{parsed.value()};
+ } else {
return InitError(AmountErrMsg("dustrelayfee", args.GetArg("-dustrelayfee", "")));
- dustRelayFee = CFeeRate(n);
+ }
}
fRequireStandard = !args.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
@@ -1159,8 +1171,52 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
fDiscover = args.GetBoolArg("-discover", true);
const bool ignores_incoming_txs{args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)};
- assert(!node.addrman);
- node.addrman = std::make_unique<CAddrMan>();
+ {
+ // Initialize addrman
+ assert(!node.addrman);
+
+ // Read asmap file if configured
+ std::vector<bool> asmap;
+ if (args.IsArgSet("-asmap")) {
+ fs::path asmap_path = fs::path(args.GetArg("-asmap", ""));
+ if (asmap_path.empty()) {
+ asmap_path = DEFAULT_ASMAP_FILENAME;
+ }
+ if (!asmap_path.is_absolute()) {
+ asmap_path = gArgs.GetDataDirNet() / asmap_path;
+ }
+ if (!fs::exists(asmap_path)) {
+ InitError(strprintf(_("Could not find asmap file %s"), asmap_path));
+ return false;
+ }
+ asmap = CAddrMan::DecodeAsmap(asmap_path);
+ if (asmap.size() == 0) {
+ InitError(strprintf(_("Could not parse asmap file %s"), asmap_path));
+ return false;
+ }
+ const uint256 asmap_version = SerializeHash(asmap);
+ LogPrintf("Using asmap version %s for IP bucketing\n", asmap_version.ToString());
+ } else {
+ LogPrintf("Using /16 prefix for IP bucketing\n");
+ }
+
+ auto check_addrman = std::clamp<int32_t>(args.GetArg("-checkaddrman", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), 0, 1000000);
+ node.addrman = std::make_unique<CAddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
+
+ // Load addresses from peers.dat
+ uiInterface.InitMessage(_("Loading P2P addresses…").translated);
+ int64_t nStart = GetTimeMillis();
+ CAddrDB adb;
+ if (adb.Read(*node.addrman)) {
+ LogPrintf("Loaded %i addresses from peers.dat %dms\n", node.addrman->size(), GetTimeMillis() - nStart);
+ } else {
+ // Addrman can be in an inconsistent state after failure, reset it
+ node.addrman = std::make_unique<CAddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
+ LogPrintf("Recreating peers.dat\n");
+ adb.Write(*node.addrman);
+ }
+ }
+
assert(!node.banman);
node.banman = std::make_unique<BanMan>(gArgs.GetDataDirNet() / "banlist", &uiInterface, args.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
assert(!node.connman);
@@ -1181,7 +1237,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
assert(!node.peerman);
node.peerman = PeerManager::make(chainparams, *node.connman, *node.addrman, node.banman.get(),
- *node.scheduler, chainman, *node.mempool, ignores_incoming_txs);
+ chainman, *node.mempool, ignores_incoming_txs);
RegisterValidationInterface(node.peerman.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1265,31 +1321,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
return InitError(ResolveErrMsg("externalip", strAddr));
}
- // Read asmap file if configured
- if (args.IsArgSet("-asmap")) {
- fs::path asmap_path = fs::path(args.GetArg("-asmap", ""));
- if (asmap_path.empty()) {
- asmap_path = DEFAULT_ASMAP_FILENAME;
- }
- if (!asmap_path.is_absolute()) {
- asmap_path = gArgs.GetDataDirNet() / asmap_path;
- }
- if (!fs::exists(asmap_path)) {
- InitError(strprintf(_("Could not find asmap file %s"), asmap_path));
- return false;
- }
- std::vector<bool> asmap = CAddrMan::DecodeAsmap(asmap_path);
- if (asmap.size() == 0) {
- InitError(strprintf(_("Could not parse asmap file %s"), asmap_path));
- return false;
- }
- const uint256 asmap_version = SerializeHash(asmap);
- node.connman->SetAsmap(std::move(asmap));
- LogPrintf("Using asmap version %s for IP bucketing\n", asmap_version.ToString());
- } else {
- LogPrintf("Using /16 prefix for IP bucketing\n");
- }
-
#if ENABLE_ZMQ
g_zmq_notification_interface = CZMQNotificationInterface::Create();
@@ -1349,12 +1380,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
const int64_t load_block_index_start_time = GetTimeMillis();
try {
LOCK(cs_main);
- chainman.InitializeChainstate(*Assert(node.mempool));
+ chainman.InitializeChainstate(Assert(node.mempool.get()));
chainman.m_total_coinstip_cache = nCoinCacheUsage;
chainman.m_total_coinsdb_cache = nCoinDBCache;
UnloadBlockIndex(node.mempool.get(), chainman);
+ auto& pblocktree{chainman.m_blockman.m_block_tree_db};
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
pblocktree.reset();
@@ -1588,12 +1620,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}
}
- if (DeploymentEnabled(chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
- // Advertise witness capabilities.
- // The option to not set NODE_WITNESS is only used in the tests and should be removed.
- nLocalServices = ServiceFlags(nLocalServices | NODE_WITNESS);
- }
-
// ********************************************************* Step 11: import blocks
if (!CheckDiskSpace(gArgs.GetDataDirNet())) {
@@ -1717,18 +1743,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
return InitError(ResolveErrMsg("bind", bind_arg));
}
- if (connOptions.onion_binds.empty()) {
- connOptions.onion_binds.push_back(DefaultOnionServiceTarget());
- }
-
- if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
- const auto bind_addr = connOptions.onion_binds.front();
- if (connOptions.onion_binds.size() > 1) {
- InitWarning(strprintf(_("More than one onion bind address is provided. Using %s for the automatically created Tor onion service."), bind_addr.ToStringIPPort()));
- }
- StartTorControl(bind_addr);
- }
-
for (const std::string& strBind : args.GetArgs("-whitebind")) {
NetWhitebindPermissions whitebind;
bilingual_str error;
@@ -1736,6 +1750,27 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
connOptions.vWhiteBinds.push_back(whitebind);
}
+ // If the user did not specify -bind= or -whitebind= then we bind
+ // on any address - 0.0.0.0 (IPv4) and :: (IPv6).
+ connOptions.bind_on_any = args.GetArgs("-bind").empty() && args.GetArgs("-whitebind").empty();
+
+ CService onion_service_target;
+ if (!connOptions.onion_binds.empty()) {
+ onion_service_target = connOptions.onion_binds.front();
+ } else {
+ onion_service_target = DefaultOnionServiceTarget();
+ connOptions.onion_binds.push_back(onion_service_target);
+ }
+
+ if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
+ if (connOptions.onion_binds.size() > 1) {
+ InitWarning(strprintf(_("More than one onion bind address is provided. Using %s "
+ "for the automatically created Tor onion service."),
+ onion_service_target.ToStringIPPort()));
+ }
+ StartTorControl(onion_service_target);
+ }
+
for (const auto& net : args.GetArgs("-whitelist")) {
NetWhitelistPermissions subnet;
bilingual_str error;
@@ -1786,6 +1821,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
banman->DumpBanlist();
}, DUMP_BANS_INTERVAL);
+ if (node.peerman) node.peerman->StartScheduledTasks(*node.scheduler);
+
#if HAVE_SYSTEM
StartupNotify(args);
#endif
diff --git a/src/init.h b/src/init.h
index b856468e5d..5af6930a16 100644
--- a/src/init.h
+++ b/src/init.h
@@ -20,9 +20,6 @@ struct NodeContext;
namespace interfaces {
struct BlockAndHeaderTipInfo;
}
-namespace boost {
-class thread_group;
-} // namespace boost
/** Interrupt threads */
void Interrupt(NodeContext& node);
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 7cac435e96..eceede3c8f 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -35,7 +35,9 @@ namespace interfaces {
class Handler;
class Wallet;
-//! Helper for findBlock to selectively return pieces of block data.
+//! Helper for findBlock to selectively return pieces of block data. If block is
+//! found, data will be returned by setting specified output variables. If block
+//! is not found, output variables will keep their previous values.
class FoundBlock
{
public:
@@ -60,6 +62,7 @@ public:
bool* m_in_active_chain = nullptr;
const FoundBlock* m_next_block = nullptr;
CBlock* m_data = nullptr;
+ mutable bool found = false;
};
//! Interface giving clients (wallet processes, maybe other analysis tools in
@@ -262,11 +265,18 @@ public:
//! Current RPC serialization flags.
virtual int rpcSerializationFlags() = 0;
+ //! Get settings value.
+ virtual util::SettingsValue getSetting(const std::string& arg) = 0;
+
+ //! Get list of settings values.
+ virtual std::vector<util::SettingsValue> getSettingsList(const std::string& arg) = 0;
+
//! Return <datadir>/settings.json setting value.
virtual util::SettingsValue getRwSetting(const std::string& name) = 0;
- //! Write a setting to <datadir>/settings.json.
- virtual bool updateRwSetting(const std::string& name, const util::SettingsValue& value) = 0;
+ //! Write a setting to <datadir>/settings.json. Optionally just update the
+ //! setting in memory and do not write the file.
+ virtual bool updateRwSetting(const std::string& name, const util::SettingsValue& value, bool write=true) = 0;
//! Synchronously send transactionAddedToMempool notifications about all
//! current mempool transactions to the specified handler and return after
diff --git a/src/interfaces/ipc.h b/src/interfaces/ipc.h
index e9e6c78053..963649fc9a 100644
--- a/src/interfaces/ipc.h
+++ b/src/interfaces/ipc.h
@@ -9,6 +9,10 @@
#include <memory>
#include <typeindex>
+namespace ipc {
+struct Context;
+} // namespace ipc
+
namespace interfaces {
class Init;
@@ -58,6 +62,9 @@ public:
addCleanup(typeid(Interface), &iface, std::move(cleanup));
}
+ //! IPC context struct accessor (see struct definition for more description).
+ virtual ipc::Context& context() = 0;
+
protected:
//! Internal implementation of public addCleanup method (above) as a
//! type-erased virtual function, since template functions can't be virtual.
diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h
index fb1febc11b..a85db04b8b 100644
--- a/src/interfaces/wallet.h
+++ b/src/interfaces/wallet.h
@@ -332,6 +332,9 @@ public:
//! loaded at startup or by RPC.
using LoadWalletFn = std::function<void(std::unique_ptr<Wallet> wallet)>;
virtual std::unique_ptr<Handler> handleLoadWallet(LoadWalletFn fn) = 0;
+
+ //! Return pointer to internal context, useful for testing.
+ virtual WalletContext* context() { return nullptr; }
};
//! Information about one wallet address.
@@ -410,7 +413,7 @@ struct WalletTxOut
//! Return implementation of Wallet interface. This function is defined in
//! dummywallet.cpp and throws if the wallet component is not compiled.
-std::unique_ptr<Wallet> MakeWallet(const std::shared_ptr<CWallet>& wallet);
+std::unique_ptr<Wallet> MakeWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet);
//! Return implementation of ChainClient interface for a wallet client. This
//! function will be undefined in builds where ENABLE_WALLET is false.
diff --git a/src/ipc/capnp/context.h b/src/ipc/capnp/context.h
new file mode 100644
index 0000000000..06e1614494
--- /dev/null
+++ b/src/ipc/capnp/context.h
@@ -0,0 +1,23 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_IPC_CAPNP_CONTEXT_H
+#define BITCOIN_IPC_CAPNP_CONTEXT_H
+
+#include <ipc/context.h>
+
+namespace ipc {
+namespace capnp {
+//! Cap'n Proto context struct. Generally the parent ipc::Context struct should
+//! be used instead of this struct to give all IPC protocols access to
+//! application state, so there aren't unnecessary differences between IPC
+//! protocols. But this specialized struct can be used to pass capnp-specific
+//! function and object types to capnp hooks.
+struct Context : ipc::Context
+{
+};
+} // namespace capnp
+} // namespace ipc
+
+#endif // BITCOIN_IPC_CAPNP_CONTEXT_H
diff --git a/src/ipc/capnp/protocol.cpp b/src/ipc/capnp/protocol.cpp
index 74c66c899a..37b57a9525 100644
--- a/src/ipc/capnp/protocol.cpp
+++ b/src/ipc/capnp/protocol.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <interfaces/init.h>
+#include <ipc/capnp/context.h>
#include <ipc/capnp/init.capnp.h>
#include <ipc/capnp/init.capnp.proxy.h>
#include <ipc/capnp/protocol.h>
@@ -54,7 +55,7 @@ public:
{
assert(!m_loop);
mp::g_thread_context.thread_name = mp::ThreadName(exe_name);
- m_loop.emplace(exe_name, &IpcLogFn, nullptr);
+ m_loop.emplace(exe_name, &IpcLogFn, &m_context);
mp::ServeStream<messages::Init>(*m_loop, fd, init);
m_loop->loop();
m_loop.reset();
@@ -63,13 +64,14 @@ public:
{
mp::ProxyTypeRegister::types().at(type)(iface).cleanup.emplace_back(std::move(cleanup));
}
+ Context& context() override { return m_context; }
void startLoop(const char* exe_name)
{
if (m_loop) return;
std::promise<void> promise;
m_loop_thread = std::thread([&] {
util::ThreadRename("capnp-loop");
- m_loop.emplace(exe_name, &IpcLogFn, nullptr);
+ m_loop.emplace(exe_name, &IpcLogFn, &m_context);
{
std::unique_lock<std::mutex> lock(m_loop->m_mutex);
m_loop->addClient(lock);
@@ -80,6 +82,7 @@ public:
});
promise.get_future().wait();
}
+ Context m_context;
std::thread m_loop_thread;
std::optional<mp::EventLoop> m_loop;
};
diff --git a/src/ipc/context.h b/src/ipc/context.h
new file mode 100644
index 0000000000..924d7d7450
--- /dev/null
+++ b/src/ipc/context.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_IPC_CONTEXT_H
+#define BITCOIN_IPC_CONTEXT_H
+
+namespace ipc {
+//! Context struct used to give IPC protocol implementations or implementation
+//! hooks access to application state, in case they need to run extra code that
+//! isn't needed within a single process, like code copying global state from an
+//! existing process to a new process when it's initialized, or code dealing
+//! with shared objects that are created or destroyed remotely.
+struct Context
+{
+};
+} // namespace ipc
+
+#endif // BITCOIN_IPC_CONTEXT_H
diff --git a/src/ipc/interfaces.cpp b/src/ipc/interfaces.cpp
index ad4b78ed81..580590fde9 100644
--- a/src/ipc/interfaces.cpp
+++ b/src/ipc/interfaces.cpp
@@ -60,6 +60,7 @@ public:
{
m_protocol->addCleanup(type, iface, std::move(cleanup));
}
+ Context& context() override { return m_protocol->context(); }
const char* m_exe_name;
const char* m_process_argv0;
interfaces::Init& m_init;
diff --git a/src/ipc/protocol.h b/src/ipc/protocol.h
index af955b0007..4cd892e411 100644
--- a/src/ipc/protocol.h
+++ b/src/ipc/protocol.h
@@ -12,6 +12,8 @@
#include <typeindex>
namespace ipc {
+struct Context;
+
//! IPC protocol interface for calling IPC methods over sockets.
//!
//! There may be different implementations of this interface for different IPC
@@ -33,6 +35,9 @@ public:
//! Add cleanup callback to interface that will run when the interface is
//! deleted.
virtual void addCleanup(std::type_index type, void* iface, std::function<void()> cleanup) = 0;
+
+ //! Context accessor.
+ virtual Context& context() = 0;
};
} // namespace ipc
diff --git a/src/key.cpp b/src/key.cpp
index dcad386e77..40df248e02 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -274,7 +274,7 @@ bool CKey::SignSchnorr(const uint256& hash, Span<unsigned char> sig, const uint2
uint256 tweak = XOnlyPubKey(pubkey_bytes).ComputeTapTweakHash(merkle_root->IsNull() ? nullptr : merkle_root);
if (!secp256k1_keypair_xonly_tweak_add(GetVerifyContext(), &keypair, tweak.data())) return false;
}
- bool ret = secp256k1_schnorrsig_sign(secp256k1_context_sign, sig.data(), hash.data(), &keypair, secp256k1_nonce_function_bip340, aux ? (void*)aux->data() : nullptr);
+ bool ret = secp256k1_schnorrsig_sign(secp256k1_context_sign, sig.data(), hash.data(), &keypair, aux ? (unsigned char*)aux->data() : nullptr);
memory_cleanse(&keypair, sizeof(keypair));
return ret;
}
@@ -357,6 +357,7 @@ void CExtKey::Decode(const unsigned char code[BIP32_EXTKEY_SIZE]) {
nChild = (code[5] << 24) | (code[6] << 16) | (code[7] << 8) | code[8];
memcpy(chaincode.begin(), code+9, 32);
key.Set(code+42, code+BIP32_EXTKEY_SIZE, true);
+ if ((nDepth == 0 && (nChild != 0 || ReadLE32(vchFingerprint) != 0)) || code[41] != 0) key = CKey();
}
bool ECC_InitSanityCheck() {
diff --git a/src/key.h b/src/key.h
index d47e54800c..92cbc1e899 100644
--- a/src/key.h
+++ b/src/key.h
@@ -133,10 +133,15 @@ public:
* optionally tweaked by *merkle_root. Additional nonce entropy can be provided through
* aux.
*
- * When merkle_root is not nullptr, this results in a signature with a modified key as
- * specified in BIP341:
- * - If merkle_root->IsNull(): key + H_TapTweak(pubkey)*G
- * - Otherwise: key + H_TapTweak(pubkey || *merkle_root)
+ * merkle_root is used to optionally perform tweaking of the private key, as specified
+ * in BIP341:
+ * - If merkle_root == nullptr: no tweaking is done, sign with key directly (this is
+ * used for signatures in BIP342 script).
+ * - If merkle_root->IsNull(): sign with key + H_TapTweak(pubkey) (this is used for
+ * key path spending when no scripts are present).
+ * - Otherwise: sign with key + H_TapTweak(pubkey || *merkle_root)
+ * (this is used for key path spending, with specific
+ * Merkle root of the script tree).
*/
bool SignSchnorr(const uint256& hash, Span<unsigned char> sig, const uint256* merkle_root = nullptr, const uint256* aux = nullptr) const;
diff --git a/src/logging.cpp b/src/logging.cpp
index e5187fd596..eb2c750296 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -8,6 +8,8 @@
#include <util/string.h>
#include <util/time.h>
+#include <algorithm>
+#include <array>
#include <mutex>
const char * const DEFAULT_DEBUGLOGFILE = "debug.log";
@@ -124,8 +126,7 @@ bool BCLog::Logger::DefaultShrinkDebugFile() const
return m_categories == BCLog::NONE;
}
-struct CLogCategoryDesc
-{
+struct CLogCategoryDesc {
BCLog::LogFlags flag;
std::string category;
};
@@ -158,6 +159,7 @@ const CLogCategoryDesc LogCategories[] =
{BCLog::VALIDATION, "validation"},
{BCLog::I2P, "i2p"},
{BCLog::IPC, "ipc"},
+ {BCLog::LOCK, "lock"},
{BCLog::ALL, "1"},
{BCLog::ALL, "all"},
};
@@ -179,15 +181,18 @@ bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str)
std::vector<LogCategory> BCLog::Logger::LogCategoriesList() const
{
+ // Sort log categories by alphabetical order.
+ std::array<CLogCategoryDesc, std::size(LogCategories)> categories;
+ std::copy(std::begin(LogCategories), std::end(LogCategories), categories.begin());
+ std::sort(categories.begin(), categories.end(), [](auto a, auto b) { return a.category < b.category; });
+
std::vector<LogCategory> ret;
- for (const CLogCategoryDesc& category_desc : LogCategories) {
- // Omit the special cases.
- if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL) {
- LogCategory catActive;
- catActive.category = category_desc.category;
- catActive.active = WillLogCategory(category_desc.flag);
- ret.push_back(catActive);
- }
+ for (const CLogCategoryDesc& category_desc : categories) {
+ if (category_desc.flag == BCLog::NONE || category_desc.flag == BCLog::ALL) continue;
+ LogCategory catActive;
+ catActive.category = category_desc.category;
+ catActive.active = WillLogCategory(category_desc.flag);
+ ret.push_back(catActive);
}
return ret;
}
@@ -237,7 +242,7 @@ namespace BCLog {
}
return ret;
}
-}
+} // namespace BCLog
void BCLog::Logger::LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line)
{
diff --git a/src/logging.h b/src/logging.h
index d04bc99268..53a89d28bd 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -59,6 +59,7 @@ namespace BCLog {
VALIDATION = (1 << 21),
I2P = (1 << 22),
IPC = (1 << 23),
+ LOCK = (1 << 24),
ALL = ~(uint32_t)0,
};
@@ -138,9 +139,9 @@ namespace BCLog {
bool DisableCategory(const std::string& str);
bool WillLogCategory(LogFlags category) const;
- /** Returns a vector of the log categories */
+ /** Returns a vector of the log categories in alphabetical order. */
std::vector<LogCategory> LogCategoriesList() const;
- /** Returns a string with the log categories */
+ /** Returns a string with the log categories in alphabetical order. */
std::string LogCategoriesString() const
{
return Join(LogCategoriesList(), ", ", [&](const LogCategory& i) { return i.category; });
diff --git a/src/logging/timer.h b/src/logging/timer.h
index 159920e397..647e3fa30e 100644
--- a/src/logging/timer.h
+++ b/src/logging/timer.h
@@ -58,12 +58,14 @@ public:
return strprintf("%s: %s", m_prefix, msg);
}
- std::string units = "";
+ if (std::is_same<TimeType, std::chrono::microseconds>::value) {
+ return strprintf("%s: %s (%iμs)", m_prefix, msg, end_time.count());
+ }
+
+ std::string units;
float divisor = 1;
- if (std::is_same<TimeType, std::chrono::microseconds>::value) {
- units = "μs";
- } else if (std::is_same<TimeType, std::chrono::milliseconds>::value) {
+ if (std::is_same<TimeType, std::chrono::milliseconds>::value) {
units = "ms";
divisor = 1000.;
} else if (std::is_same<TimeType, std::chrono::seconds>::value) {
@@ -93,6 +95,8 @@ private:
} // namespace BCLog
+#define LOG_TIME_MICROS_WITH_CATEGORY(end_msg, log_category) \
+ BCLog::Timer<std::chrono::microseconds> PASTE2(logging_timer, __COUNTER__)(__func__, end_msg, log_category)
#define LOG_TIME_MILLIS_WITH_CATEGORY(end_msg, log_category) \
BCLog::Timer<std::chrono::milliseconds> PASTE2(logging_timer, __COUNTER__)(__func__, end_msg, log_category)
#define LOG_TIME_SECONDS(end_msg) \
diff --git a/src/miner.cpp b/src/miner.cpp
index d9186a5d6d..168ade5507 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -73,11 +73,11 @@ static BlockAssembler::Options DefaultOptions()
// If -blockmaxweight is not given, limit to DEFAULT_BLOCK_MAX_WEIGHT
BlockAssembler::Options options;
options.nBlockMaxWeight = gArgs.GetArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT);
- CAmount n = 0;
- if (gArgs.IsArgSet("-blockmintxfee") && ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) {
- options.blockMinFeeRate = CFeeRate(n);
+ if (gArgs.IsArgSet("-blockmintxfee")) {
+ std::optional<CAmount> parsed = ParseMoney(gArgs.GetArg("-blockmintxfee", ""));
+ options.blockMinFeeRate = CFeeRate{parsed.value_or(DEFAULT_BLOCK_MIN_TX_FEE)};
} else {
- options.blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
+ options.blockMinFeeRate = CFeeRate{DEFAULT_BLOCK_MIN_TX_FEE};
}
return options;
}
diff --git a/src/net.cpp b/src/net.cpp
index 60059249ed..35376b89ac 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -25,6 +25,7 @@
#include <util/sock.h>
#include <util/strencodings.h>
#include <util/thread.h>
+#include <util/trace.h>
#include <util/translation.h>
#ifdef WIN32
@@ -330,7 +331,7 @@ CNode* CConnman::FindNode(const std::string& addrName)
{
LOCK(cs_vNodes);
for (CNode* pnode : vNodes) {
- if (pnode->GetAddrName() == addrName) {
+ if (pnode->m_addr_name == addrName) {
return pnode;
}
}
@@ -402,7 +403,8 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime)/3600.0);
// Resolve
- const uint16_t default_port{Params().GetDefaultPort()};
+ const uint16_t default_port{pszDest != nullptr ? Params().GetDefaultPort(pszDest) :
+ Params().GetDefaultPort()};
if (pszDest) {
std::vector<CService> resolved;
if (Lookup(pszDest, resolved, default_port, fNameLookup && !HaveNameProxy(), 256) && !resolved.empty()) {
@@ -412,14 +414,10 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
return nullptr;
}
// It is possible that we already have a connection to the IP/port pszDest resolved to.
- // In that case, drop the connection that was just created, and return the existing CNode instead.
- // Also store the name we used to connect in that CNode, so that future FindNode() calls to that
- // name catch this early.
+ // In that case, drop the connection that was just created.
LOCK(cs_vNodes);
CNode* pnode = FindNode(static_cast<CService>(addrConnect));
- if (pnode)
- {
- pnode->MaybeSetAddrName(std::string(pszDest));
+ if (pnode) {
LogPrintf("Failed to open new connection, already connected\n");
return nullptr;
}
@@ -532,19 +530,8 @@ std::string ConnectionTypeAsString(ConnectionType conn_type)
assert(false);
}
-std::string CNode::GetAddrName() const {
- LOCK(cs_addrName);
- return addrName;
-}
-
-void CNode::MaybeSetAddrName(const std::string& addrNameIn) {
- LOCK(cs_addrName);
- if (addrName.empty()) {
- addrName = addrNameIn;
- }
-}
-
-CService CNode::GetAddrLocal() const {
+CService CNode::GetAddrLocal() const
+{
LOCK(cs_addrLocal);
return addrLocal;
}
@@ -565,14 +552,14 @@ Network CNode::ConnectedThroughNetwork() const
#undef X
#define X(name) stats.name = name
-void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
+void CNode::CopyStats(CNodeStats& stats, const std::vector<bool>& asmap)
{
stats.nodeid = this->GetId();
X(nServices);
X(addr);
X(addrBind);
stats.m_network = ConnectedThroughNetwork();
- stats.m_mapped_as = addr.GetMappedAS(m_asmap);
+ stats.m_mapped_as = addr.GetMappedAS(asmap);
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_filter);
stats.fRelayTxes = m_tx_relay->fRelayTxes;
@@ -585,7 +572,7 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
X(nLastBlockTime);
X(nTimeConnected);
X(nTimeOffset);
- stats.addrName = GetAddrName();
+ X(m_addr_name);
X(nVersion);
{
LOCK(cs_SubVer);
@@ -936,14 +923,17 @@ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& evicti
size_t num_protected{0};
while (num_protected < max_protect_by_network) {
+ // Count the number of disadvantaged networks from which we have peers to protect.
+ auto num_networks = std::count_if(networks.begin(), networks.end(), [](const Net& n) { return n.count; });
+ if (num_networks == 0) {
+ break;
+ }
const size_t disadvantaged_to_protect{max_protect_by_network - num_protected};
- const size_t protect_per_network{
- std::max(disadvantaged_to_protect / networks.size(), static_cast<size_t>(1))};
-
+ const size_t protect_per_network{std::max(disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
// Early exit flag if there are no remaining candidates by disadvantaged network.
bool protected_at_least_one{false};
- for (const Net& n : networks) {
+ for (Net& n : networks) {
if (n.count == 0) continue;
const size_t before = eviction_candidates.size();
EraseLastKElements(eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
@@ -953,10 +943,12 @@ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& evicti
const size_t after = eviction_candidates.size();
if (before > after) {
protected_at_least_one = true;
- num_protected += before - after;
+ const size_t delta{before - after};
+ num_protected += delta;
if (num_protected >= max_protect_by_network) {
break;
}
+ n.count -= delta;
}
}
if (!protected_at_least_one) {
@@ -1206,16 +1198,29 @@ void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
bool CConnman::AddConnection(const std::string& address, ConnectionType conn_type)
{
- if (conn_type != ConnectionType::OUTBOUND_FULL_RELAY && conn_type != ConnectionType::BLOCK_RELAY) return false;
-
- const int max_connections = conn_type == ConnectionType::OUTBOUND_FULL_RELAY ? m_max_outbound_full_relay : m_max_outbound_block_relay;
+ std::optional<int> max_connections;
+ switch (conn_type) {
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::FEELER:
+ return false;
+ case ConnectionType::OUTBOUND_FULL_RELAY:
+ max_connections = m_max_outbound_full_relay;
+ break;
+ case ConnectionType::BLOCK_RELAY:
+ max_connections = m_max_outbound_block_relay;
+ break;
+ // no limit for ADDR_FETCH because -seednode has no limit either
+ case ConnectionType::ADDR_FETCH:
+ break;
+ } // no default case, so the compiler can warn about missing cases
// Count existing connections
int existing_connections = WITH_LOCK(cs_vNodes,
return std::count_if(vNodes.begin(), vNodes.end(), [conn_type](CNode* node) { return node->m_conn_type == conn_type; }););
// Max connections of specified type already exist
- if (existing_connections >= max_connections) return false;
+ if (max_connections != std::nullopt && existing_connections >= max_connections) return false;
// Max total outbound connections already exist
CSemaphoreGrant grant(*semOutbound, true);
@@ -1284,8 +1289,9 @@ void CConnman::NotifyNumConnectionsChanged()
}
if(vNodesSize != nPrevNodeCount) {
nPrevNodeCount = vNodesSize;
- if(clientInterface)
- clientInterface->NotifyNumConnectionsChanged(vNodesSize);
+ if (m_client_interface) {
+ m_client_interface->NotifyNumConnectionsChanged(vNodesSize);
+ }
}
}
@@ -1915,7 +1921,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
case ConnectionType::BLOCK_RELAY:
case ConnectionType::ADDR_FETCH:
case ConnectionType::FEELER:
- setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
+ setConnected.insert(pnode->addr.GetGroup(addrman.GetAsmap()));
} // no default case, so the compiler can warn about missing cases
}
}
@@ -1989,7 +1995,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
m_anchors.pop_back();
if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) ||
!HasAllDesirableServiceFlags(addr.nServices) ||
- setConnected.count(addr.GetGroup(addrman.m_asmap))) continue;
+ setConnected.count(addr.GetGroup(addrman.GetAsmap()))) continue;
addrConnect = addr;
LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToString());
break;
@@ -2029,7 +2035,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
// Require outbound connections, other than feelers, to be to distinct network groups
- if (!fFeeler && setConnected.count(addr.GetGroup(addrman.m_asmap))) {
+ if (!fFeeler && setConnected.count(addr.GetGroup(addrman.GetAsmap()))) {
break;
}
@@ -2059,8 +2065,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// from advertising themselves as a service on another host and
// port, causing a DoS attack as nodes around the network attempt
// to connect to it fruitlessly.
- if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50)
+ if (addr.GetPort() != Params().GetDefaultPort(addr.GetNetwork()) && nTries < 50) {
continue;
+ }
addrConnect = addr;
break;
@@ -2115,7 +2122,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const
if (pnode->addr.IsValid()) {
mapConnected[pnode->addr] = pnode->IsInboundConn();
}
- std::string addrName = pnode->GetAddrName();
+ std::string addrName{pnode->m_addr_name};
if (!addrName.empty()) {
mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr));
}
@@ -2123,7 +2130,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() const
}
for (const std::string& strAddNode : lAddresses) {
- CService service(LookupNumeric(strAddNode, Params().GetDefaultPort()));
+ CService service(LookupNumeric(strAddNode, Params().GetDefaultPort(strAddNode)));
AddedNodeInfo addedNode{strAddNode, CService(), false, false};
if (service.IsValid()) {
// strAddNode is an IP:port
@@ -2427,7 +2434,9 @@ void CConnman::SetNetworkActive(bool active)
fNetworkActive = active;
- uiInterface.NotifyNetworkActiveChanged(fNetworkActive);
+ if (m_client_interface) {
+ m_client_interface->NotifyNetworkActiveChanged(fNetworkActive);
+ }
}
CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, CAddrMan& addrman_in, bool network_active)
@@ -2452,8 +2461,8 @@ bool CConnman::Bind(const CService &addr, unsigned int flags, NetPermissionFlags
}
bilingual_str strError;
if (!BindListenPort(addr, strError, permissions)) {
- if ((flags & BF_REPORT_ERROR) && clientInterface) {
- clientInterface->ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR);
+ if ((flags & BF_REPORT_ERROR) && m_client_interface) {
+ m_client_interface->ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR);
}
return false;
}
@@ -2465,30 +2474,25 @@ bool CConnman::Bind(const CService &addr, unsigned int flags, NetPermissionFlags
return true;
}
-bool CConnman::InitBinds(
- const std::vector<CService>& binds,
- const std::vector<NetWhitebindPermissions>& whiteBinds,
- const std::vector<CService>& onion_binds)
+bool CConnman::InitBinds(const Options& options)
{
bool fBound = false;
- for (const auto& addrBind : binds) {
+ for (const auto& addrBind : options.vBinds) {
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR), NetPermissionFlags::None);
}
- for (const auto& addrBind : whiteBinds) {
+ for (const auto& addrBind : options.vWhiteBinds) {
fBound |= Bind(addrBind.m_service, (BF_EXPLICIT | BF_REPORT_ERROR), addrBind.m_flags);
}
- if (binds.empty() && whiteBinds.empty()) {
+ for (const auto& addr_bind : options.onion_binds) {
+ fBound |= Bind(addr_bind, BF_EXPLICIT | BF_DONT_ADVERTISE, NetPermissionFlags::None);
+ }
+ if (options.bind_on_any) {
struct in_addr inaddr_any;
inaddr_any.s_addr = htonl(INADDR_ANY);
struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT;
fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::None);
fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::None);
}
-
- for (const auto& addr_bind : onion_binds) {
- fBound |= Bind(addr_bind, BF_EXPLICIT | BF_DONT_ADVERTISE, NetPermissionFlags::None);
- }
-
return fBound;
}
@@ -2496,9 +2500,9 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
{
Init(connOptions);
- if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds, connOptions.onion_binds)) {
- if (clientInterface) {
- clientInterface->ThreadSafeMessageBox(
+ if (fListen && !InitBinds(connOptions)) {
+ if (m_client_interface) {
+ m_client_interface->ThreadSafeMessageBox(
_("Failed to listen on any port. Use -listen=0 if you want this."),
"", CClientUIInterface::MSG_ERROR);
}
@@ -2515,22 +2519,6 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
AddAddrFetch(strDest);
}
- if (clientInterface) {
- clientInterface->InitMessage(_("Loading P2P addresses…").translated);
- }
- // Load addresses from peers.dat
- int64_t nStart = GetTimeMillis();
- {
- CAddrDB adb;
- if (adb.Read(addrman))
- LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart);
- else {
- addrman.Clear(); // Addrman can be in an inconsistent state after failure, reset it
- LogPrintf("Recreating peers.dat\n");
- DumpAddresses();
- }
- }
-
if (m_use_addrman_outgoing) {
// Load addresses from anchors.dat
m_anchors = ReadAnchors(gArgs.GetDataDirNet() / ANCHORS_DATABASE_FILENAME);
@@ -2540,7 +2528,9 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size());
}
- uiInterface.InitMessage(_("Starting network threads…").translated);
+ if (m_client_interface) {
+ m_client_interface->InitMessage(_("Starting network threads…").translated);
+ }
fAddressesInitialized = true;
@@ -2578,8 +2568,8 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
threadOpenAddedConnections = std::thread(&util::TraceThread, "addcon", [this] { ThreadOpenAddedConnections(); });
if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) {
- if (clientInterface) {
- clientInterface->ThreadSafeMessageBox(
+ if (m_client_interface) {
+ m_client_interface->ThreadSafeMessageBox(
_("Cannot provide specific connections and have addrman find outgoing connections at the same."),
"", CClientUIInterface::MSG_ERROR);
}
@@ -2814,7 +2804,7 @@ void CConnman::GetNodeStats(std::vector<CNodeStats>& vstats) const
vstats.reserve(vNodes.size());
for (CNode* pnode : vNodes) {
vstats.emplace_back();
- pnode->copyStats(vstats.back(), addrman.m_asmap);
+ pnode->CopyStats(vstats.back(), addrman.GetAsmap());
}
}
@@ -2961,6 +2951,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const
: nTimeConnected(GetTimeSeconds()),
addr(addrIn),
addrBind(addrBindIn),
+ m_addr_name{addrNameIn.empty() ? addr.ToStringIPPort() : addrNameIn},
m_inbound_onion(inbound_onion),
nKeyedNetGroup(nKeyedNetGroupIn),
id(idIn),
@@ -2970,7 +2961,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const
{
if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND);
hSocket = hSocketIn;
- addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_tx_relay = std::make_unique<TxRelay>();
}
@@ -2980,7 +2970,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
if (fLogIPs) {
- LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id);
+ LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", m_addr_name, id);
} else {
LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
}
@@ -3002,11 +2992,20 @@ bool CConnman::NodeFullyConnected(const CNode* pnode)
void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
size_t nMessageSize = msg.data.size();
- LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.m_type), nMessageSize, pnode->GetId());
+ LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId());
if (gArgs.GetBoolArg("-capturemessages", false)) {
CaptureMessage(pnode->addr, msg.m_type, msg.data, /* incoming */ false);
}
+ TRACE6(net, outbound_message,
+ pnode->GetId(),
+ pnode->m_addr_name.c_str(),
+ pnode->ConnectionTypeAsString().c_str(),
+ msg.m_type.c_str(),
+ msg.data.size(),
+ msg.data.data()
+ );
+
// make sure we use the appropriate network transport format
std::vector<unsigned char> serializedHeader;
pnode->m_serializer->prepareForTransport(msg, serializedHeader);
@@ -3068,7 +3067,7 @@ CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const
uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& ad) const
{
- std::vector<unsigned char> vchNetGroup(ad.GetGroup(addrman.m_asmap));
+ std::vector<unsigned char> vchNetGroup(ad.GetGroup(addrman.GetAsmap()));
return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup.data(), vchNetGroup.size()).Finalize();
}
diff --git a/src/net.h b/src/net.h
index 01658e8973..a884a4521d 100644
--- a/src/net.h
+++ b/src/net.h
@@ -79,9 +79,9 @@ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60;
/** Number of file descriptors required for message capture **/
static const int NUM_FDS_MESSAGE_CAPTURE = 1;
-static const bool DEFAULT_FORCEDNSSEED = false;
-static const bool DEFAULT_DNSSEED = true;
-static const bool DEFAULT_FIXEDSEEDS = true;
+static constexpr bool DEFAULT_FORCEDNSSEED{false};
+static constexpr bool DEFAULT_DNSSEED{true};
+static constexpr bool DEFAULT_FIXEDSEEDS{true};
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
@@ -248,7 +248,7 @@ public:
int64_t nLastBlockTime;
int64_t nTimeConnected;
int64_t nTimeOffset;
- std::string addrName;
+ std::string m_addr_name;
int nVersion;
std::string cleanSubVer;
bool fInbound;
@@ -430,6 +430,7 @@ public:
const CAddress addr;
// Bind address of our side of the connection
const CAddress addrBind;
+ const std::string m_addr_name;
//! Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
const bool m_inbound_onion;
std::atomic<int> nVersion{0};
@@ -651,17 +652,13 @@ public:
void CloseSocketDisconnect();
- void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap);
+ void CopyStats(CNodeStats& stats, const std::vector<bool>& asmap);
ServiceFlags GetLocalServices() const
{
return nLocalServices;
}
- std::string GetAddrName() const;
- //! Sets the addrName only if it was not previously set
- void MaybeSetAddrName(const std::string& addrNameIn);
-
std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); }
/** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */
@@ -693,10 +690,7 @@ private:
//! service advertisements.
const ServiceFlags nLocalServices;
- std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
-
- mutable RecursiveMutex cs_addrName;
- std::string addrName GUARDED_BY(cs_addrName);
+ std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
// Our address, as reported by the peer
CService addrLocal GUARDED_BY(cs_addrLocal);
@@ -768,6 +762,9 @@ public:
std::vector<NetWhitebindPermissions> vWhiteBinds;
std::vector<CService> vBinds;
std::vector<CService> onion_binds;
+ /// True if the user did not specify -bind= or -whitebind= and thus
+ /// we should bind on `0.0.0.0` (IPv4) and `::` (IPv6).
+ bool bind_on_any;
bool m_use_addrman_outgoing = true;
std::vector<std::string> m_specified_outgoing;
std::vector<std::string> m_added_nodes;
@@ -784,7 +781,7 @@ public:
nMaxAddnode = connOptions.nMaxAddnode;
nMaxFeeler = connOptions.nMaxFeeler;
m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler;
- clientInterface = connOptions.uiInterface;
+ m_client_interface = connOptions.uiInterface;
m_banman = connOptions.m_banman;
m_msgproc = connOptions.m_msgproc;
nSendBufferMaxSize = connOptions.nSendBufferMaxSize;
@@ -890,6 +887,7 @@ public:
*
* @param[in] address Address of node to try connecting to
* @param[in] conn_type ConnectionType::OUTBOUND or ConnectionType::BLOCK_RELAY
+ * or ConnectionType::ADDR_FETCH
* @return bool Returns false if there are no available
* slots for this connection:
* - conn_type not a supported ConnectionType
@@ -945,8 +943,6 @@ public:
*/
std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval);
- void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); }
-
/** Return true if we should disconnect the peer for failing an inactivity check. */
bool ShouldRunInactivityChecks(const CNode& node, std::optional<int64_t> now=std::nullopt) const;
@@ -962,10 +958,7 @@ private:
bool BindListenPort(const CService& bindAddr, bilingual_str& strError, NetPermissionFlags permissions);
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
- bool InitBinds(
- const std::vector<CService>& binds,
- const std::vector<NetWhitebindPermissions>& whiteBinds,
- const std::vector<CService>& onion_binds);
+ bool InitBinds(const Options& options);
void ThreadOpenAddedConnections();
void AddAddrFetch(const std::string& strDest);
@@ -1125,7 +1118,7 @@ private:
int nMaxFeeler;
int m_max_outbound;
bool m_use_addrman_outgoing;
- CClientUIInterface* clientInterface;
+ CClientUIInterface* m_client_interface;
NetEventsInterface* m_msgproc;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* m_banman;
diff --git a/src/net_permissions.h b/src/net_permissions.h
index c00689465e..bc979e3792 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -31,7 +31,8 @@ enum class NetPermissionFlags : uint32_t {
NoBan = (1U << 4) | Download,
// Can query the mempool
Mempool = (1U << 5),
- // Can request addrs without hitting a privacy-preserving cache
+ // Can request addrs without hitting a privacy-preserving cache, and send us
+ // unlimited amounts of addrs.
Addr = (1U << 7),
// True if the user did not specifically set fine grained permissions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 315d2ac5cd..3ad34e83ba 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -34,6 +34,7 @@
#include <util/check.h> // For NDEBUG compile time check
#include <util/strencodings.h>
#include <util/system.h>
+#include <util/trace.h>
#include <validation.h>
#include <algorithm>
@@ -155,6 +156,13 @@ static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
/** The maximum number of address records permitted in an ADDR message. */
static constexpr size_t MAX_ADDR_TO_SEND{1000};
+/** The maximum rate of address records we're willing to process on average. Can be bypassed using
+ * the NetPermissionFlags::Addr permission. */
+static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
+/** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND
+ * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR
+ * is exempt from this limit). */
+static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
// Internal stuff
namespace {
@@ -217,9 +225,31 @@ struct Peer {
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
std::vector<CAddress> m_addrs_to_send;
- /** Probabilistic filter of addresses that this peer already knows.
- * Used to avoid relaying addresses to this peer more than once. */
- const std::unique_ptr<CRollingBloomFilter> m_addr_known;
+ /** Probabilistic filter to track recent addr messages relayed with this
+ * peer. Used to avoid relaying redundant addresses to this peer.
+ *
+ * We initialize this filter for outbound peers (other than
+ * block-relay-only connections) or when an inbound peer sends us an
+ * address related message (ADDR, ADDRV2, GETADDR).
+ *
+ * Presence of this filter must correlate with m_addr_relay_enabled.
+ **/
+ std::unique_ptr<CRollingBloomFilter> m_addr_known;
+ /** Whether we are participating in address relay with this connection.
+ *
+ * We set this bool to true for outbound peers (other than
+ * block-relay-only connections), or when an inbound peer sends us an
+ * address related message (ADDR, ADDRV2, GETADDR).
+ *
+ * We use this bool to decide whether a peer is eligible for gossiping
+ * addr messages. This avoids relaying to peers that are unlikely to
+ * forward them, effectively blackholing self announcements. Reasons
+ * peers might support addr relay on the link include that they connected
+ * to us as a block-relay-only peer or they are a light client.
+ *
+ * This field must correlate with whether m_addr_known has been
+ * initialized.*/
+ std::atomic_bool m_addr_relay_enabled{false};
/** Whether a getaddr request to this peer is outstanding. */
bool m_getaddr_sent{false};
/** Guards address sending timers. */
@@ -233,6 +263,15 @@ struct Peer {
std::atomic_bool m_wants_addrv2{false};
/** Whether this peer has already sent us a getaddr message. */
bool m_getaddr_recvd{false};
+ /** Number of addresses that can be processed from this peer. Start at 1 to
+ * permit self-announcement. */
+ double m_addr_token_bucket{1.0};
+ /** When m_addr_token_bucket was last updated */
+ std::chrono::microseconds m_addr_token_timestamp{GetTime<std::chrono::microseconds>()};
+ /** Total number of addresses that were dropped due to rate limiting. */
+ std::atomic<uint64_t> m_addr_rate_limited{0};
+ /** Total number of addresses that were processed (excludes rate-limited ones). */
+ std::atomic<uint64_t> m_addr_processed{0};
/** Set of txids to reconsider once their parent transactions have been accepted **/
std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
@@ -242,9 +281,8 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
- explicit Peer(NodeId id, bool addr_relay)
+ explicit Peer(NodeId id)
: m_id(id)
- , m_addr_known{addr_relay ? std::make_unique<CRollingBloomFilter>(5000, 0.001) : nullptr}
{}
};
@@ -254,7 +292,7 @@ class PeerManagerImpl final : public PeerManager
{
public:
PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
/** Overridden from CValidationInterface. */
@@ -271,6 +309,7 @@ public:
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
/** Implement PeerManager */
+ void StartScheduledTasks(CScheduler& scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
@@ -380,7 +419,8 @@ private:
/** The height of the best chain */
std::atomic<int> m_best_height{-1};
- int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
+ /** Next time to check for stale tip */
+ int64_t m_stale_tip_check_time{0};
/** Whether this node is running in blocks only mode */
const bool m_ignore_incoming_txs;
@@ -453,16 +493,26 @@ private:
*
* Memory used: 1.3 MB
*/
- std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
+ CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
/*
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
* confirnmed.
+ *
+ * Blocks don't typically have more than 4000 transactions, so this should
+ * be at least six blocks (~1 hr) worth of transactions that we can store,
+ * inserting both a txid and wtxid for every observed transaction.
+ * If the number of transactions appearing in a block goes up, or if we are
+ * seeing getdata requests more than an hour after initial announcement, we
+ * can increase this number.
+ * The false positive rate of 1/1M should come out to less than 1
+ * transaction per day that would be inadvertently ignored (which is the
+ * same probability that we have in the reject filter).
*/
Mutex m_recent_confirmed_transactions_mutex;
- std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex);
+ CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
/** Have we requested this block from a peer */
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -596,6 +646,14 @@ private:
* @param[in] vRecv The raw message received
*/
void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv);
+
+ /** Checks if address relay is permitted with peer. If needed, initializes
+ * the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
+ *
+ * @return True if address relay is enabled with peer
+ * False if address relay is disallowed
+ */
+ bool SetupAddressRelay(const CNode& node, Peer& peer);
};
} // namespace
@@ -716,11 +774,6 @@ static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
return &it->second;
}
-static bool RelayAddrsWithPeer(const Peer& peer)
-{
- return peer.m_addr_known != nullptr;
-}
-
/**
* Whether the peer supports the address. For example, a peer that does not
* implement BIP155 cannot receive Tor v3 addresses because it requires
@@ -861,7 +914,7 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
}
m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
- uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
+ uint64_t nCMPCTBLOCKVersion = 2;
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings.
@@ -1034,25 +1087,25 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, int64_t nTime)
// Note that pnode->GetLocalServices() is a reflection of the local
// services we were offering when the CNode object was created for this
// peer.
- ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
+ uint64_t my_services{pnode.GetLocalServices()};
uint64_t nonce = pnode.GetLocalNonce();
const int nNodeStartingHeight{m_best_height};
NodeId nodeid = pnode.GetId();
CAddress addr = pnode.addr;
- CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
- addr :
- CAddress(CService(), addr.nServices);
- CAddress addrMe = CAddress(CService(), nLocalNodeServices);
+ CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
+ uint64_t your_services{addr.nServices};
const bool tx_relay = !m_ignore_incoming_txs && pnode.m_tx_relay != nullptr;
- m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
+ m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
+ your_services, addr_you, // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
+ my_services, CService(), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
nonce, strSubVersion, nNodeStartingHeight, tx_relay));
if (fLogIPs) {
- LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), tx_relay, nodeid);
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(), tx_relay, nodeid);
} else {
- LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), tx_relay, nodeid);
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
}
}
@@ -1101,9 +1154,7 @@ void PeerManagerImpl::InitializeNode(CNode *pnode)
assert(m_txrequest.Count(nodeid) == 0);
}
{
- // Addr relay is disabled for outbound block-relay-only peers to
- // prevent adversaries from inferring these links from addr traffic.
- PeerRef peer = std::make_shared<Peer>(nodeid, /* addr_relay = */ !pnode->IsBlockOnlyConn());
+ PeerRef peer = std::make_shared<Peer>(nodeid);
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
}
@@ -1178,6 +1229,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
assert(m_outbound_peers_with_protect_from_disconnect == 0);
assert(m_wtxid_relay_peers == 0);
assert(m_txrequest.Size() == 0);
+ assert(m_orphanage.Size() == 0);
}
} // cs_main
if (node.fSuccessfullyConnected && misbehavior == 0 &&
@@ -1239,6 +1291,9 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
}
stats.m_ping_wait = ping_wait;
+ stats.m_addr_processed = peer->m_addr_processed.load();
+ stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
+ stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
return true;
}
@@ -1262,14 +1317,20 @@ void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const s
if (peer == nullptr) return;
LOCK(peer->m_misbehavior_mutex);
+ const int score_before{peer->m_misbehavior_score};
peer->m_misbehavior_score += howmuch;
+ const int score_now{peer->m_misbehavior_score};
+
const std::string message_prefixed = message.empty() ? "" : (": " + message);
- if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
- LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
+ std::string warning;
+
+ if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
+ warning = " DISCOURAGE THRESHOLD EXCEEDED";
peer->m_should_discourage = true;
- } else {
- LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
}
+
+ LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
+ pnode, score_before, score_now, warning, message_prefixed);
}
bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
@@ -1359,14 +1420,14 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
}
std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
{
- return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, scheduler, chainman, pool, ignore_incoming_txs);
+ return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, chainman, pool, ignore_incoming_txs);
}
PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
: m_chainparams(chainparams),
m_connman(connman),
@@ -1374,23 +1435,12 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn
m_banman(banman),
m_chainman(chainman),
m_mempool(pool),
- m_stale_tip_check_time(0),
m_ignore_incoming_txs(ignore_incoming_txs)
{
- // Initialize global variables that cannot be constructed at startup.
- recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
-
- // Blocks don't typically have more than 4000 transactions, so this should
- // be at least six blocks (~1 hr) worth of transactions that we can store,
- // inserting both a txid and wtxid for every observed transaction.
- // If the number of transactions appearing in a block goes up, or if we are
- // seeing getdata requests more than an hour after initial announcement, we
- // can increase this number.
- // The false positive rate of 1/1M should come out to less than 1
- // transaction per day that would be inadvertently ignored (which is the
- // same probability that we have in the reject filter).
- m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
+}
+void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
+{
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
// combine them in one function and schedule at the quicker (peer-eviction)
@@ -1416,9 +1466,9 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
{
LOCK(m_recent_confirmed_transactions_mutex);
for (const auto& ptx : pblock->vtx) {
- m_recent_confirmed_transactions->insert(ptx->GetHash());
+ m_recent_confirmed_transactions.insert(ptx->GetHash());
if (ptx->GetHash() != ptx->GetWitnessHash()) {
- m_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
+ m_recent_confirmed_transactions.insert(ptx->GetWitnessHash());
}
}
}
@@ -1442,7 +1492,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo
// presumably the most common case of relaying a confirmed transaction
// should be just after a new block containing it is found.
LOCK(m_recent_confirmed_transactions_mutex);
- m_recent_confirmed_transactions->reset();
+ m_recent_confirmed_transactions.reset();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
@@ -1582,14 +1632,13 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta
bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
{
- assert(recentRejects);
if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
- recentRejects->reset();
+ m_recent_rejects.reset();
}
const uint256& hash = gtxid.GetHash();
@@ -1598,10 +1647,10 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
{
LOCK(m_recent_confirmed_transactions_mutex);
- if (m_recent_confirmed_transactions->contains(hash)) return true;
+ if (m_recent_confirmed_transactions.contains(hash)) return true;
}
- return recentRejects->contains(hash) || m_mempool.exists(gtxid);
+ return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
}
bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
@@ -1663,7 +1712,7 @@ void PeerManagerImpl::RelayAddress(NodeId originator,
LOCK(m_peer_mutex);
for (auto& [id, peer] : m_peer_map) {
- if (RelayAddrsWithPeer(*peer) && id != originator && IsAddrCompatible(*peer, addr)) {
+ if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
for (unsigned int i = 0; i < nRelayNodes; i++) {
if (hashKey > best[i].first) {
@@ -1955,7 +2004,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
uint32_t nFetchFlags = 0;
- if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
+ if (State(pfrom.GetId())->fHaveWitness) {
nFetchFlags |= MSG_WITNESS_FLAG;
}
return nFetchFlags;
@@ -2220,8 +2269,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
- assert(recentRejects);
- recentRejects->insert(porphanTx->GetWitnessHash());
+ m_recent_rejects.insert(porphanTx->GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
// failure, since this check depends only on the txid
@@ -2233,7 +2281,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
// We only add the txid if it differs from the wtxid, to
// avoid wasting entries in the rolling bloom filter.
- recentRejects->insert(porphanTx->GetHash());
+ m_recent_rejects.insert(porphanTx->GetHash());
}
}
m_orphanage.EraseTx(orphanHash);
@@ -2439,21 +2487,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
int64_t nTime;
- CAddress addrMe;
- CAddress addrFrom;
+ CService addrMe;
uint64_t nNonce = 1;
- uint64_t nServiceInt;
ServiceFlags nServices;
int nVersion;
std::string cleanSubVer;
int starting_height = -1;
bool fRelay = true;
- vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
+ vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
if (nTime < 0) {
nTime = 0;
}
- nServices = ServiceFlags(nServiceInt);
+ vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
+ vRecv >> addrMe;
if (!pfrom.IsInboundConn())
{
m_addrman.SetServices(pfrom.addr, nServices);
@@ -2472,8 +2519,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- if (!vRecv.empty())
- vRecv >> addrFrom >> nNonce;
+ if (!vRecv.empty()) {
+ // The version message includes information about the sending node which we don't use:
+ // - 8 bytes (service bits)
+ // - 16 bytes (ipv6 address)
+ // - 2 bytes (port)
+ vRecv.ignore(26);
+ vRecv >> nNonce;
+ }
if (!vRecv.empty()) {
std::string strSubVer;
vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
@@ -2554,7 +2607,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
}
- if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
+ // Self advertisement & GETADDR logic
+ if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
// For outbound peers, we try to relay our address (so that other
// nodes can try to find us more quickly, as we have no guarantee
// that an outbound peer is even aware of how to reach us) and do a
@@ -2563,8 +2617,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// empty and no one will know who we are, so these mechanisms are
// important to help us connect to the network.
//
- // We skip this for block-relay-only peers to avoid potentially leaking
- // information about our block-relay-only connections via address relay.
+ // We skip this for block-relay-only peers. We want to avoid
+ // potentially leaking addr information and we do not want to
+ // indicate to the peer that we will participate in addr relay.
if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
@@ -2583,6 +2638,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Get recent addresses
m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
peer->m_getaddr_sent = true;
+ // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
+ // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
+ peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
}
if (!pfrom.IsInboundConn()) {
@@ -2667,8 +2725,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// they may wish to request compact blocks from us
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2;
- if (pfrom.GetLocalServices() & NODE_WITNESS)
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
nCMPCTBLOCKVersion = 1;
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
}
@@ -2686,7 +2743,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 0;
vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
- if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
+ if (nCMPCTBLOCKVersion == 1 || nCMPCTBLOCKVersion == 2) {
LOCK(cs_main);
// fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
@@ -2700,10 +2757,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
pfrom.m_bip152_highbandwidth_from = fAnnounceUsingCMPCTBLOCK;
}
if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
- if (pfrom.GetLocalServices() & NODE_WITNESS)
- State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
- else
- State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
+ State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
}
}
return;
@@ -2763,10 +2817,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
s >> vAddr;
- if (!RelayAddrsWithPeer(*peer)) {
+ if (!SetupAddressRelay(pfrom, *peer)) {
LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
return;
}
+
if (vAddr.size() > MAX_ADDR_TO_SEND)
{
Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
@@ -2777,11 +2832,35 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
std::vector<CAddress> vAddrOk;
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
+
+ // Update/increment addr rate limiting bucket.
+ const auto current_time = GetTime<std::chrono::microseconds>();
+ if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
+ // Don't increment bucket if it's already full
+ const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
+ const double increment = CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
+ peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
+ }
+ peer->m_addr_token_timestamp = current_time;
+
+ const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
+ uint64_t num_proc = 0;
+ uint64_t num_rate_limit = 0;
+ Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
for (CAddress& addr : vAddr)
{
if (interruptMsgProc)
return;
+ // Apply rate limiting.
+ if (peer->m_addr_token_bucket < 1.0) {
+ if (rate_limited) {
+ ++num_rate_limit;
+ continue;
+ }
+ } else {
+ peer->m_addr_token_bucket -= 1.0;
+ }
// We only bother storing full nodes, though this may include
// things which we would not make an outbound connection to, in
// part because we may make feeler connections to them.
@@ -2795,6 +2874,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Do not process banned/discouraged addresses beyond remembering we received them
continue;
}
+ ++num_proc;
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
// Relay to a limited number of other nodes
@@ -2804,9 +2884,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (fReachable)
vAddrOk.push_back(addr);
}
+ peer->m_addr_processed += num_proc;
+ peer->m_addr_rate_limited += num_rate_limit;
+ LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
+ vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
+
m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60);
if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
- if (pfrom.IsAddrFetchConn()) {
+
+ // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
+ if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
pfrom.fDisconnect = true;
}
@@ -3200,7 +3287,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
std::sort(unique_parents.begin(), unique_parents.end());
unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
for (const uint256& parent_txid : unique_parents) {
- if (recentRejects->contains(parent_txid)) {
+ if (m_recent_rejects.contains(parent_txid)) {
fRejectedParents = true;
break;
}
@@ -3241,8 +3328,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// regardless of what witness is provided, we will not accept
// this, so we don't need to allow for redownload of this txid
// from any of our non-wtxidrelay peers.
- recentRejects->insert(tx.GetHash());
- recentRejects->insert(tx.GetWitnessHash());
+ m_recent_rejects.insert(tx.GetHash());
+ m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
}
@@ -3261,8 +3348,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
- assert(recentRejects);
- recentRejects->insert(tx.GetWitnessHash());
+ m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
@@ -3273,7 +3359,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// transactions are later received (resulting in
// parent-fetching by txid via the orphan-handling logic).
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
- recentRejects->insert(tx.GetHash());
+ m_recent_rejects.insert(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetHash());
}
if (RecursiveDynamicUsage(*ptx) < 100000) {
@@ -3282,21 +3368,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
- // If a tx has been detected by recentRejects, we will have reached
+ // If a tx has been detected by m_recent_rejects, we will have reached
// this point and the tx will have been ignored. Because we haven't run
// the tx through AcceptToMemoryPool, we won't have computed a DoS
// score for it or determined exactly why we consider it invalid.
//
// This means we won't penalize any peer subsequently relaying a DoSy
// tx (even if we penalized the first peer who gave it to us) because
- // we have to account for recentRejects showing false positives. In
+ // we have to account for m_recent_rejects showing false positives. In
// other words, we shouldn't penalize a peer if we aren't *sure* they
// submitted a DoSy tx.
//
- // Note that recentRejects doesn't just record DoSy or invalid
+ // Note that m_recent_rejects doesn't just record DoSy or invalid
// transactions, but any tx not accepted by the mempool, which may be
// due to node policy (vs. consensus). So we can't blanket penalize a
- // peer simply for relaying a tx that our recentRejects has caught,
+ // peer simply for relaying a tx that our m_recent_rejects has caught,
// regardless of false positives.
if (state.IsInvalid()) {
@@ -3665,6 +3751,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
+ // Since this must be an inbound connection, SetupAddressRelay will
+ // never fail.
+ Assume(SetupAddressRelay(pfrom, *peer));
+
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (peer->m_getaddr_recvd) {
@@ -3994,6 +4084,15 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
}
CNetMessage& msg(msgs.front());
+ TRACE6(net, inbound_message,
+ pfrom->GetId(),
+ pfrom->m_addr_name.c_str(),
+ pfrom->ConnectionTypeAsString().c_str(),
+ msg.m_command.c_str(),
+ msg.m_recv.size(),
+ msg.m_recv.data()
+ );
+
if (gArgs.GetBoolArg("-capturemessages", false)) {
CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /* incoming */ true);
}
@@ -4243,7 +4342,7 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic
void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
{
// Nothing to do for non-address-relay peers
- if (!RelayAddrsWithPeer(peer)) return;
+ if (!peer.m_addr_relay_enabled) return;
LOCK(peer.m_addr_send_times_mutex);
// Periodically advertise our local address to the peer.
@@ -4371,6 +4470,22 @@ public:
};
}
+bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
+{
+ // We don't participate in addr relay with outbound block-relay-only
+ // connections to prevent providing adversaries with the additional
+ // information of addr traffic to infer the link.
+ if (node.IsBlockOnlyConn()) return false;
+
+ if (!peer.m_addr_relay_enabled.exchange(true)) {
+ // First addr message we have received from the peer, initialize
+ // m_addr_known
+ peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
+ }
+
+ return true;
+}
+
bool PeerManagerImpl::SendMessages(CNode* pto)
{
PeerRef peer = GetPeerRef(pto->GetId());
@@ -4390,6 +4505,12 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
const auto current_time = GetTime<std::chrono::microseconds>();
+ if (pto->IsAddrFetchConn() && current_time - std::chrono::seconds(pto->nTimeConnected) > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
+ LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
+ pto->fDisconnect = true;
+ return true;
+ }
+
MaybeSendPing(*pto, *peer, current_time);
// MaybeSendPing may have marked peer for disconnection
diff --git a/src/net_processing.h b/src/net_processing.h
index d5801aadd3..9d8d788583 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -29,16 +29,22 @@ struct CNodeStateStats {
int m_starting_height = -1;
std::chrono::microseconds m_ping_wait;
std::vector<int> vHeightInFlight;
+ uint64_t m_addr_processed = 0;
+ uint64_t m_addr_rate_limited = 0;
+ bool m_addr_relay_enabled{false};
};
class PeerManager : public CValidationInterface, public NetEventsInterface
{
public:
static std::unique_ptr<PeerManager> make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
virtual ~PeerManager() { }
+ /** Begin running background tasks, should only be called once */
+ virtual void StartScheduledTasks(CScheduler& scheduler) = 0;
+
/** Get statistics from node state */
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const = 0;
diff --git a/src/net_types.cpp b/src/net_types.cpp
new file mode 100644
index 0000000000..c8f57fe6c6
--- /dev/null
+++ b/src/net_types.cpp
@@ -0,0 +1,65 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <net_types.h>
+
+#include <netaddress.h>
+#include <netbase.h>
+#include <univalue.h>
+
+CBanEntry::CBanEntry(const UniValue& json)
+ : nVersion(json["version"].get_int()), nCreateTime(json["ban_created"].get_int64()),
+ nBanUntil(json["banned_until"].get_int64())
+{
+}
+
+UniValue CBanEntry::ToJson() const
+{
+ UniValue json(UniValue::VOBJ);
+ json.pushKV("version", nVersion);
+ json.pushKV("ban_created", nCreateTime);
+ json.pushKV("banned_until", nBanUntil);
+ return json;
+}
+
+static const char* BANMAN_JSON_ADDR_KEY = "address";
+
+/**
+ * Convert a `banmap_t` object to a JSON array.
+ * @param[in] bans Bans list to convert.
+ * @return a JSON array, similar to the one returned by the `listbanned` RPC. Suitable for
+ * passing to `BanMapFromJson()`.
+ */
+UniValue BanMapToJson(const banmap_t& bans)
+{
+ UniValue bans_json(UniValue::VARR);
+ for (const auto& it : bans) {
+ const auto& address = it.first;
+ const auto& ban_entry = it.second;
+ UniValue j = ban_entry.ToJson();
+ j.pushKV(BANMAN_JSON_ADDR_KEY, address.ToString());
+ bans_json.push_back(j);
+ }
+ return bans_json;
+}
+
+/**
+ * Convert a JSON array to a `banmap_t` object.
+ * @param[in] bans_json JSON to convert, must be as returned by `BanMapToJson()`.
+ * @param[out] bans Bans list to create from the JSON.
+ * @throws std::runtime_error if the JSON does not have the expected fields or they contain
+ * unparsable values.
+ */
+void BanMapFromJson(const UniValue& bans_json, banmap_t& bans)
+{
+ for (const auto& ban_entry_json : bans_json.getValues()) {
+ CSubNet subnet;
+ const auto& subnet_str = ban_entry_json[BANMAN_JSON_ADDR_KEY].get_str();
+ if (!LookupSubNet(subnet_str, subnet)) {
+ throw std::runtime_error(
+ strprintf("Cannot parse banned address or subnet: %s", subnet_str));
+ }
+ bans.insert_or_assign(subnet, CBanEntry{ban_entry_json});
+ }
+}
diff --git a/src/net_types.h b/src/net_types.h
index d55a8cde6c..ffdc24c772 100644
--- a/src/net_types.h
+++ b/src/net_types.h
@@ -5,11 +5,56 @@
#ifndef BITCOIN_NET_TYPES_H
#define BITCOIN_NET_TYPES_H
+#include <cstdint>
#include <map>
-class CBanEntry;
class CSubNet;
+class UniValue;
+
+class CBanEntry
+{
+public:
+ static constexpr int CURRENT_VERSION{1};
+ int nVersion{CBanEntry::CURRENT_VERSION};
+ int64_t nCreateTime{0};
+ int64_t nBanUntil{0};
+
+ CBanEntry() {}
+
+ explicit CBanEntry(int64_t nCreateTimeIn)
+ : nCreateTime{nCreateTimeIn} {}
+
+ /**
+ * Create a ban entry from JSON.
+ * @param[in] json A JSON representation of a ban entry, as created by `ToJson()`.
+ * @throw std::runtime_error if the JSON does not have the expected fields.
+ */
+ explicit CBanEntry(const UniValue& json);
+
+ /**
+ * Generate a JSON representation of this ban entry.
+ * @return JSON suitable for passing to the `CBanEntry(const UniValue&)` constructor.
+ */
+ UniValue ToJson() const;
+};
using banmap_t = std::map<CSubNet, CBanEntry>;
+/**
+ * Convert a `banmap_t` object to a JSON array.
+ * @param[in] bans Bans list to convert.
+ * @return a JSON array, similar to the one returned by the `listbanned` RPC. Suitable for
+ * passing to `BanMapFromJson()`.
+ */
+UniValue BanMapToJson(const banmap_t& bans);
+
+/**
+ * Convert a JSON array to a `banmap_t` object.
+ * @param[in] bans_json JSON to convert, must be as returned by `BanMapToJson()`.
+ * @param[out] bans Bans list to create from the JSON.
+ * @throws std::runtime_error if the JSON does not have the expected fields or they contain
+ * unparsable values.
+ */
+void BanMapFromJson(const UniValue& bans_json, banmap_t& bans);
+
#endif // BITCOIN_NET_TYPES_H
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 1ea3969978..e7b3377475 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -489,7 +489,7 @@ bool CNetAddr::IsValid() const
*/
bool CNetAddr::IsRoutable() const
{
- return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal());
+ return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || IsRFC4193() || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal());
}
/**
diff --git a/src/netaddress.h b/src/netaddress.h
index dd47ab5749..eb35ed3fac 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -42,8 +42,7 @@ static constexpr int ADDRV2_FORMAT = 0x20000000;
* over all enum values and also `GetExtNetwork()` "extends" this enum by
* introducing standalone constants starting from `NET_MAX`.
*/
-enum Network
-{
+enum Network {
/// Addresses from these networks are not publicly routable on the global Internet.
NET_UNROUTABLE = 0,
@@ -73,16 +72,14 @@ enum Network
/// Prefix of an IPv6 address when it contains an embedded IPv4 address.
/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
static const std::array<uint8_t, 12> IPV4_IN_IPV6_PREFIX{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF
-};
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF};
/// Prefix of an IPv6 address when it contains an embedded TORv2 address.
/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
/// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they
/// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses.
static const std::array<uint8_t, 6> TORV2_IN_IPV6_PREFIX{
- 0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43
-};
+ 0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43};
/// Prefix of an IPv6 address when it contains an embedded "internal" address.
/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
@@ -112,359 +109,362 @@ static constexpr size_t ADDR_CJDNS_SIZE = 16;
/// Size of "internal" (NET_INTERNAL) address (in bytes).
static constexpr size_t ADDR_INTERNAL_SIZE = 10;
+/// SAM 3.1 and earlier do not support specifying ports and force the port to 0.
+static constexpr uint16_t I2P_SAM31_PORT{0};
+
/**
* Network address.
*/
class CNetAddr
{
- protected:
- /**
- * Raw representation of the network address.
- * In network byte order (big endian) for IPv4 and IPv6.
- */
- prevector<ADDR_IPV6_SIZE, uint8_t> m_addr{ADDR_IPV6_SIZE, 0x0};
-
- /**
- * Network to which this address belongs.
- */
- Network m_net{NET_IPV6};
-
- /**
- * Scope id if scoped/link-local IPV6 address.
- * See https://tools.ietf.org/html/rfc4007
- */
- uint32_t m_scope_id{0};
-
- public:
- CNetAddr();
- explicit CNetAddr(const struct in_addr& ipv4Addr);
- void SetIP(const CNetAddr& ip);
-
- /**
- * Set from a legacy IPv6 address.
- * Legacy IPv6 address may be a normal IPv6 address, or another address
- * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy
- * `addr` encoding.
- */
- void SetLegacyIPv6(Span<const uint8_t> ipv6);
-
- bool SetInternal(const std::string& name);
-
- /**
- * Parse a Tor or I2P address and set this object to it.
- * @param[in] addr Address to parse, for example
- * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion or
- * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p.
- * @returns Whether the operation was successful.
- * @see CNetAddr::IsTor(), CNetAddr::IsI2P()
- */
- bool SetSpecial(const std::string& addr);
-
- bool IsBindAny() const; // INADDR_ANY equivalent
- bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0)
- bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor)
- bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
- bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15)
- bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10)
- bool IsRFC5737() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24)
- bool IsRFC3849() const; // IPv6 documentation address (2001:0DB8::/32)
- bool IsRFC3927() const; // IPv4 autoconfig (169.254.0.0/16)
- bool IsRFC3964() const; // IPv6 6to4 tunnelling (2002::/16)
- bool IsRFC4193() const; // IPv6 unique local (FC00::/7)
- bool IsRFC4380() const; // IPv6 Teredo tunnelling (2001::/32)
- bool IsRFC4843() const; // IPv6 ORCHID (deprecated) (2001:10::/28)
- bool IsRFC7343() const; // IPv6 ORCHIDv2 (2001:20::/28)
- bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64)
- bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96)
- bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765)
- bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36)
- bool IsTor() const;
- bool IsI2P() const;
- bool IsCJDNS() const;
- bool IsLocal() const;
- bool IsRoutable() const;
- bool IsInternal() const;
- bool IsValid() const;
-
- /**
- * Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
- */
- bool IsAddrV1Compatible() const;
-
- enum Network GetNetwork() const;
- std::string ToString() const;
- std::string ToStringIP() const;
- uint64_t GetHash() const;
- bool GetInAddr(struct in_addr* pipv4Addr) const;
- Network GetNetClass() const;
-
- //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32.
- uint32_t GetLinkedIPv4() const;
- //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()).
- bool HasLinkedIPv4() const;
-
- // The AS on the BGP path to the node we use to diversify
- // peers in AddrMan bucketing based on the AS infrastructure.
- // The ip->AS mapping depends on how asmap is constructed.
- uint32_t GetMappedAS(const std::vector<bool> &asmap) const;
-
- std::vector<unsigned char> GetGroup(const std::vector<bool> &asmap) const;
- std::vector<unsigned char> GetAddrBytes() const;
- int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const;
-
- explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0);
- bool GetIn6Addr(struct in6_addr* pipv6Addr) const;
-
- friend bool operator==(const CNetAddr& a, const CNetAddr& b);
- friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); }
- friend bool operator<(const CNetAddr& a, const CNetAddr& b);
-
- /**
- * Whether this address should be relayed to other peers even if we can't reach it ourselves.
- */
- bool IsRelayable() const
- {
- return IsIPv4() || IsIPv6() || IsTor();
- }
+protected:
+ /**
+ * Raw representation of the network address.
+ * In network byte order (big endian) for IPv4 and IPv6.
+ */
+ prevector<ADDR_IPV6_SIZE, uint8_t> m_addr{ADDR_IPV6_SIZE, 0x0};
+
+ /**
+ * Network to which this address belongs.
+ */
+ Network m_net{NET_IPV6};
+
+ /**
+ * Scope id if scoped/link-local IPV6 address.
+ * See https://tools.ietf.org/html/rfc4007
+ */
+ uint32_t m_scope_id{0};
- /**
- * Serialize to a stream.
- */
- template <typename Stream>
- void Serialize(Stream& s) const
- {
- if (s.GetVersion() & ADDRV2_FORMAT) {
- SerializeV2Stream(s);
- } else {
- SerializeV1Stream(s);
- }
- }
+public:
+ CNetAddr();
+ explicit CNetAddr(const struct in_addr& ipv4Addr);
+ void SetIP(const CNetAddr& ip);
+
+ /**
+ * Set from a legacy IPv6 address.
+ * Legacy IPv6 address may be a normal IPv6 address, or another address
+ * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy
+ * `addr` encoding.
+ */
+ void SetLegacyIPv6(Span<const uint8_t> ipv6);
+
+ bool SetInternal(const std::string& name);
+
+ /**
+ * Parse a Tor or I2P address and set this object to it.
+ * @param[in] addr Address to parse, for example
+ * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion or
+ * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p.
+ * @returns Whether the operation was successful.
+ * @see CNetAddr::IsTor(), CNetAddr::IsI2P()
+ */
+ bool SetSpecial(const std::string& addr);
+
+ bool IsBindAny() const; // INADDR_ANY equivalent
+ bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0)
+ bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor)
+ bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
+ bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15)
+ bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10)
+ bool IsRFC5737() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24)
+ bool IsRFC3849() const; // IPv6 documentation address (2001:0DB8::/32)
+ bool IsRFC3927() const; // IPv4 autoconfig (169.254.0.0/16)
+ bool IsRFC3964() const; // IPv6 6to4 tunnelling (2002::/16)
+ bool IsRFC4193() const; // IPv6 unique local (FC00::/7)
+ bool IsRFC4380() const; // IPv6 Teredo tunnelling (2001::/32)
+ bool IsRFC4843() const; // IPv6 ORCHID (deprecated) (2001:10::/28)
+ bool IsRFC7343() const; // IPv6 ORCHIDv2 (2001:20::/28)
+ bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64)
+ bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96)
+ bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765)
+ bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36)
+ bool IsTor() const;
+ bool IsI2P() const;
+ bool IsCJDNS() const;
+ bool IsLocal() const;
+ bool IsRoutable() const;
+ bool IsInternal() const;
+ bool IsValid() const;
+
+ /**
+ * Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
+ */
+ bool IsAddrV1Compatible() const;
+
+ enum Network GetNetwork() const;
+ std::string ToString() const;
+ std::string ToStringIP() const;
+ uint64_t GetHash() const;
+ bool GetInAddr(struct in_addr* pipv4Addr) const;
+ Network GetNetClass() const;
+
+ //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32.
+ uint32_t GetLinkedIPv4() const;
+ //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()).
+ bool HasLinkedIPv4() const;
+
+ // The AS on the BGP path to the node we use to diversify
+ // peers in AddrMan bucketing based on the AS infrastructure.
+ // The ip->AS mapping depends on how asmap is constructed.
+ uint32_t GetMappedAS(const std::vector<bool>& asmap) const;
+
+ std::vector<unsigned char> GetGroup(const std::vector<bool>& asmap) const;
+ std::vector<unsigned char> GetAddrBytes() const;
+ int GetReachabilityFrom(const CNetAddr* paddrPartner = nullptr) const;
+
+ explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0);
+ bool GetIn6Addr(struct in6_addr* pipv6Addr) const;
+
+ friend bool operator==(const CNetAddr& a, const CNetAddr& b);
+ friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); }
+ friend bool operator<(const CNetAddr& a, const CNetAddr& b);
+
+ /**
+ * Whether this address should be relayed to other peers even if we can't reach it ourselves.
+ */
+ bool IsRelayable() const
+ {
+ return IsIPv4() || IsIPv6() || IsTor() || IsI2P();
+ }
- /**
- * Unserialize from a stream.
- */
- template <typename Stream>
- void Unserialize(Stream& s)
- {
- if (s.GetVersion() & ADDRV2_FORMAT) {
- UnserializeV2Stream(s);
- } else {
- UnserializeV1Stream(s);
- }
+ /**
+ * Serialize to a stream.
+ */
+ template <typename Stream>
+ void Serialize(Stream& s) const
+ {
+ if (s.GetVersion() & ADDRV2_FORMAT) {
+ SerializeV2Stream(s);
+ } else {
+ SerializeV1Stream(s);
}
+ }
- friend class CNetAddrHash;
- friend class CSubNet;
-
- private:
- /**
- * Parse a Tor address and set this object to it.
- * @param[in] addr Address to parse, must be a valid C string, for example
- * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.
- * @returns Whether the operation was successful.
- * @see CNetAddr::IsTor()
- */
- bool SetTor(const std::string& addr);
-
- /**
- * Parse an I2P address and set this object to it.
- * @param[in] addr Address to parse, must be a valid C string, for example
- * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p.
- * @returns Whether the operation was successful.
- * @see CNetAddr::IsI2P()
- */
- bool SetI2P(const std::string& addr);
-
- /**
- * BIP155 network ids recognized by this software.
- */
- enum BIP155Network : uint8_t {
- IPV4 = 1,
- IPV6 = 2,
- TORV2 = 3,
- TORV3 = 4,
- I2P = 5,
- CJDNS = 6,
- };
-
- /**
- * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes).
- */
- static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE;
-
- /**
- * Maximum size of an address as defined in BIP155 (in bytes).
- * This is only the size of the address, not the entire CNetAddr object
- * when serialized.
- */
- static constexpr size_t MAX_ADDRV2_SIZE = 512;
-
- /**
- * Get the BIP155 network id of this address.
- * Must not be called for IsInternal() objects.
- * @returns BIP155 network id, except TORV2 which is no longer supported.
- */
- BIP155Network GetBIP155Network() const;
-
- /**
- * Set `m_net` from the provided BIP155 network id and size after validation.
- * @retval true the network was recognized, is valid and `m_net` was set
- * @retval false not recognised (from future?) and should be silently ignored
- * @throws std::ios_base::failure if the network is one of the BIP155 founding
- * networks (id 1..6) with wrong address size.
- */
- bool SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size);
-
- /**
- * Serialize in pre-ADDRv2/BIP155 format to an array.
- */
- void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const
- {
- size_t prefix_size;
-
- switch (m_net) {
- case NET_IPV6:
- assert(m_addr.size() == sizeof(arr));
- memcpy(arr, m_addr.data(), m_addr.size());
- return;
- case NET_IPV4:
- prefix_size = sizeof(IPV4_IN_IPV6_PREFIX);
- assert(prefix_size + m_addr.size() == sizeof(arr));
- memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size);
- memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
- return;
- case NET_INTERNAL:
- prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX);
- assert(prefix_size + m_addr.size() == sizeof(arr));
- memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size);
- memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
- return;
- case NET_ONION:
- case NET_I2P:
- case NET_CJDNS:
- break;
- case NET_UNROUTABLE:
- case NET_MAX:
- assert(false);
- } // no default case, so the compiler can warn about missing cases
-
- // Serialize ONION, I2P and CJDNS as all-zeros.
- memset(arr, 0x0, V1_SERIALIZATION_SIZE);
+ /**
+ * Unserialize from a stream.
+ */
+ template <typename Stream>
+ void Unserialize(Stream& s)
+ {
+ if (s.GetVersion() & ADDRV2_FORMAT) {
+ UnserializeV2Stream(s);
+ } else {
+ UnserializeV1Stream(s);
}
+ }
- /**
- * Serialize in pre-ADDRv2/BIP155 format to a stream.
- */
- template <typename Stream>
- void SerializeV1Stream(Stream& s) const
- {
- uint8_t serialized[V1_SERIALIZATION_SIZE];
+ friend class CNetAddrHash;
+ friend class CSubNet;
- SerializeV1Array(serialized);
+private:
+ /**
+ * Parse a Tor address and set this object to it.
+ * @param[in] addr Address to parse, must be a valid C string, for example
+ * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.
+ * @returns Whether the operation was successful.
+ * @see CNetAddr::IsTor()
+ */
+ bool SetTor(const std::string& addr);
+
+ /**
+ * Parse an I2P address and set this object to it.
+ * @param[in] addr Address to parse, must be a valid C string, for example
+ * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p.
+ * @returns Whether the operation was successful.
+ * @see CNetAddr::IsI2P()
+ */
+ bool SetI2P(const std::string& addr);
+
+ /**
+ * BIP155 network ids recognized by this software.
+ */
+ enum BIP155Network : uint8_t {
+ IPV4 = 1,
+ IPV6 = 2,
+ TORV2 = 3,
+ TORV3 = 4,
+ I2P = 5,
+ CJDNS = 6,
+ };
+
+ /**
+ * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes).
+ */
+ static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE;
+
+ /**
+ * Maximum size of an address as defined in BIP155 (in bytes).
+ * This is only the size of the address, not the entire CNetAddr object
+ * when serialized.
+ */
+ static constexpr size_t MAX_ADDRV2_SIZE = 512;
+
+ /**
+ * Get the BIP155 network id of this address.
+ * Must not be called for IsInternal() objects.
+ * @returns BIP155 network id, except TORV2 which is no longer supported.
+ */
+ BIP155Network GetBIP155Network() const;
+
+ /**
+ * Set `m_net` from the provided BIP155 network id and size after validation.
+ * @retval true the network was recognized, is valid and `m_net` was set
+ * @retval false not recognised (from future?) and should be silently ignored
+ * @throws std::ios_base::failure if the network is one of the BIP155 founding
+ * networks (id 1..6) with wrong address size.
+ */
+ bool SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size);
+
+ /**
+ * Serialize in pre-ADDRv2/BIP155 format to an array.
+ */
+ void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const
+ {
+ size_t prefix_size;
+
+ switch (m_net) {
+ case NET_IPV6:
+ assert(m_addr.size() == sizeof(arr));
+ memcpy(arr, m_addr.data(), m_addr.size());
+ return;
+ case NET_IPV4:
+ prefix_size = sizeof(IPV4_IN_IPV6_PREFIX);
+ assert(prefix_size + m_addr.size() == sizeof(arr));
+ memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size);
+ memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
+ return;
+ case NET_INTERNAL:
+ prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX);
+ assert(prefix_size + m_addr.size() == sizeof(arr));
+ memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size);
+ memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
+ return;
+ case NET_ONION:
+ case NET_I2P:
+ case NET_CJDNS:
+ break;
+ case NET_UNROUTABLE:
+ case NET_MAX:
+ assert(false);
+ } // no default case, so the compiler can warn about missing cases
+
+ // Serialize ONION, I2P and CJDNS as all-zeros.
+ memset(arr, 0x0, V1_SERIALIZATION_SIZE);
+ }
- s << serialized;
- }
+ /**
+ * Serialize in pre-ADDRv2/BIP155 format to a stream.
+ */
+ template <typename Stream>
+ void SerializeV1Stream(Stream& s) const
+ {
+ uint8_t serialized[V1_SERIALIZATION_SIZE];
- /**
- * Serialize as ADDRv2 / BIP155.
- */
- template <typename Stream>
- void SerializeV2Stream(Stream& s) const
- {
- if (IsInternal()) {
- // Serialize NET_INTERNAL as embedded in IPv6. We need to
- // serialize such addresses from addrman.
- s << static_cast<uint8_t>(BIP155Network::IPV6);
- s << COMPACTSIZE(ADDR_IPV6_SIZE);
- SerializeV1Stream(s);
- return;
- }
+ SerializeV1Array(serialized);
- s << static_cast<uint8_t>(GetBIP155Network());
- s << m_addr;
- }
+ s << serialized;
+ }
- /**
- * Unserialize from a pre-ADDRv2/BIP155 format from an array.
- */
- void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE])
- {
- // Use SetLegacyIPv6() so that m_net is set correctly. For example
- // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4).
- SetLegacyIPv6(arr);
+ /**
+ * Serialize as ADDRv2 / BIP155.
+ */
+ template <typename Stream>
+ void SerializeV2Stream(Stream& s) const
+ {
+ if (IsInternal()) {
+ // Serialize NET_INTERNAL as embedded in IPv6. We need to
+ // serialize such addresses from addrman.
+ s << static_cast<uint8_t>(BIP155Network::IPV6);
+ s << COMPACTSIZE(ADDR_IPV6_SIZE);
+ SerializeV1Stream(s);
+ return;
}
- /**
- * Unserialize from a pre-ADDRv2/BIP155 format from a stream.
- */
- template <typename Stream>
- void UnserializeV1Stream(Stream& s)
- {
- uint8_t serialized[V1_SERIALIZATION_SIZE];
+ s << static_cast<uint8_t>(GetBIP155Network());
+ s << m_addr;
+ }
- s >> serialized;
+ /**
+ * Unserialize from a pre-ADDRv2/BIP155 format from an array.
+ */
+ void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE])
+ {
+ // Use SetLegacyIPv6() so that m_net is set correctly. For example
+ // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4).
+ SetLegacyIPv6(arr);
+ }
+
+ /**
+ * Unserialize from a pre-ADDRv2/BIP155 format from a stream.
+ */
+ template <typename Stream>
+ void UnserializeV1Stream(Stream& s)
+ {
+ uint8_t serialized[V1_SERIALIZATION_SIZE];
+
+ s >> serialized;
+
+ UnserializeV1Array(serialized);
+ }
+
+ /**
+ * Unserialize from a ADDRv2 / BIP155 format.
+ */
+ template <typename Stream>
+ void UnserializeV2Stream(Stream& s)
+ {
+ uint8_t bip155_net;
+ s >> bip155_net;
- UnserializeV1Array(serialized);
+ size_t address_size;
+ s >> COMPACTSIZE(address_size);
+
+ if (address_size > MAX_ADDRV2_SIZE) {
+ throw std::ios_base::failure(strprintf(
+ "Address too long: %u > %u", address_size, MAX_ADDRV2_SIZE));
}
- /**
- * Unserialize from a ADDRv2 / BIP155 format.
- */
- template <typename Stream>
- void UnserializeV2Stream(Stream& s)
- {
- uint8_t bip155_net;
- s >> bip155_net;
-
- size_t address_size;
- s >> COMPACTSIZE(address_size);
-
- if (address_size > MAX_ADDRV2_SIZE) {
- throw std::ios_base::failure(strprintf(
- "Address too long: %u > %u", address_size, MAX_ADDRV2_SIZE));
+ m_scope_id = 0;
+
+ if (SetNetFromBIP155Network(bip155_net, address_size)) {
+ m_addr.resize(address_size);
+ s >> MakeSpan(m_addr);
+
+ if (m_net != NET_IPV6) {
+ return;
}
- m_scope_id = 0;
-
- if (SetNetFromBIP155Network(bip155_net, address_size)) {
- m_addr.resize(address_size);
- s >> MakeSpan(m_addr);
-
- if (m_net != NET_IPV6) {
- return;
- }
-
- // Do some special checks on IPv6 addresses.
-
- // Recognize NET_INTERNAL embedded in IPv6, such addresses are not
- // gossiped but could be coming from addrman, when unserializing from
- // disk.
- if (HasPrefix(m_addr, INTERNAL_IN_IPV6_PREFIX)) {
- m_net = NET_INTERNAL;
- memmove(m_addr.data(), m_addr.data() + INTERNAL_IN_IPV6_PREFIX.size(),
- ADDR_INTERNAL_SIZE);
- m_addr.resize(ADDR_INTERNAL_SIZE);
- return;
- }
-
- if (!HasPrefix(m_addr, IPV4_IN_IPV6_PREFIX) &&
- !HasPrefix(m_addr, TORV2_IN_IPV6_PREFIX)) {
- return;
- }
-
- // IPv4 and TORv2 are not supposed to be embedded in IPv6 (like in V1
- // encoding). Unserialize as !IsValid(), thus ignoring them.
- } else {
- // If we receive an unknown BIP155 network id (from the future?) then
- // ignore the address - unserialize as !IsValid().
- s.ignore(address_size);
+ // Do some special checks on IPv6 addresses.
+
+ // Recognize NET_INTERNAL embedded in IPv6, such addresses are not
+ // gossiped but could be coming from addrman, when unserializing from
+ // disk.
+ if (HasPrefix(m_addr, INTERNAL_IN_IPV6_PREFIX)) {
+ m_net = NET_INTERNAL;
+ memmove(m_addr.data(), m_addr.data() + INTERNAL_IN_IPV6_PREFIX.size(),
+ ADDR_INTERNAL_SIZE);
+ m_addr.resize(ADDR_INTERNAL_SIZE);
+ return;
}
- // Mimic a default-constructed CNetAddr object which is !IsValid() and thus
- // will not be gossiped, but continue reading next addresses from the stream.
- m_net = NET_IPV6;
- m_addr.assign(ADDR_IPV6_SIZE, 0x0);
+ if (!HasPrefix(m_addr, IPV4_IN_IPV6_PREFIX) &&
+ !HasPrefix(m_addr, TORV2_IN_IPV6_PREFIX)) {
+ return;
+ }
+
+ // IPv4 and TORv2 are not supposed to be embedded in IPv6 (like in V1
+ // encoding). Unserialize as !IsValid(), thus ignoring them.
+ } else {
+ // If we receive an unknown BIP155 network id (from the future?) then
+ // ignore the address - unserialize as !IsValid().
+ s.ignore(address_size);
}
+
+ // Mimic a default-constructed CNetAddr object which is !IsValid() and thus
+ // will not be gossiped, but continue reading next addresses from the stream.
+ m_net = NET_IPV6;
+ m_addr.assign(ADDR_IPV6_SIZE, 0x0);
+ }
};
class CNetAddrHash
@@ -485,104 +485,86 @@ private:
class CSubNet
{
- protected:
- /// Network (base) address
- CNetAddr network;
- /// Netmask, in network byte order
- uint8_t netmask[16];
- /// Is this value valid? (only used to signal parse errors)
- bool valid;
-
- bool SanityCheck() const;
-
- public:
- /**
- * Construct an invalid subnet (empty, `Match()` always returns false).
- */
- CSubNet();
-
- /**
- * Construct from a given network start and number of bits (CIDR mask).
- * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is
- * created.
- * @param[in] mask CIDR mask, must be in [0, 32] for IPv4 addresses and in [0, 128] for
- * IPv6 addresses. Otherwise an invalid subnet is created.
- */
- CSubNet(const CNetAddr& addr, uint8_t mask);
-
- /**
- * Construct from a given network start and mask.
- * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is
- * created.
- * @param[in] mask Network mask, must be of the same type as `addr` and not contain 0-bits
- * followed by 1-bits. Otherwise an invalid subnet is created.
- */
- CSubNet(const CNetAddr& addr, const CNetAddr& mask);
-
- /**
- * Construct a single-host subnet.
- * @param[in] addr The sole address to be contained in the subnet, can also be non-IPv[46].
- */
- explicit CSubNet(const CNetAddr& addr);
-
- bool Match(const CNetAddr &addr) const;
-
- std::string ToString() const;
- bool IsValid() const;
-
- friend bool operator==(const CSubNet& a, const CSubNet& b);
- friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); }
- friend bool operator<(const CSubNet& a, const CSubNet& b);
-
- SERIALIZE_METHODS(CSubNet, obj)
- {
- READWRITE(obj.network);
- if (obj.network.IsIPv4()) {
- // Before commit 102867c587f5f7954232fb8ed8e85cda78bb4d32, CSubNet used the last 4 bytes of netmask
- // to store the relevant bytes for an IPv4 mask. For compatibility reasons, keep doing so in
- // serialized form.
- unsigned char dummy[12] = {0};
- READWRITE(dummy);
- READWRITE(MakeSpan(obj.netmask).first(4));
- } else {
- READWRITE(obj.netmask);
- }
- READWRITE(obj.valid);
- // Mark invalid if the result doesn't pass sanity checking.
- SER_READ(obj, if (obj.valid) obj.valid = obj.SanityCheck());
- }
+protected:
+ /// Network (base) address
+ CNetAddr network;
+ /// Netmask, in network byte order
+ uint8_t netmask[16];
+ /// Is this value valid? (only used to signal parse errors)
+ bool valid;
+
+ bool SanityCheck() const;
+
+public:
+ /**
+ * Construct an invalid subnet (empty, `Match()` always returns false).
+ */
+ CSubNet();
+
+ /**
+ * Construct from a given network start and number of bits (CIDR mask).
+ * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is
+ * created.
+ * @param[in] mask CIDR mask, must be in [0, 32] for IPv4 addresses and in [0, 128] for
+ * IPv6 addresses. Otherwise an invalid subnet is created.
+ */
+ CSubNet(const CNetAddr& addr, uint8_t mask);
+
+ /**
+ * Construct from a given network start and mask.
+ * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is
+ * created.
+ * @param[in] mask Network mask, must be of the same type as `addr` and not contain 0-bits
+ * followed by 1-bits. Otherwise an invalid subnet is created.
+ */
+ CSubNet(const CNetAddr& addr, const CNetAddr& mask);
+
+ /**
+ * Construct a single-host subnet.
+ * @param[in] addr The sole address to be contained in the subnet, can also be non-IPv[46].
+ */
+ explicit CSubNet(const CNetAddr& addr);
+
+ bool Match(const CNetAddr& addr) const;
+
+ std::string ToString() const;
+ bool IsValid() const;
+
+ friend bool operator==(const CSubNet& a, const CSubNet& b);
+ friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); }
+ friend bool operator<(const CSubNet& a, const CSubNet& b);
};
/** A combination of a network address (CNetAddr) and a (TCP) port */
class CService : public CNetAddr
{
- protected:
- uint16_t port; // host order
-
- public:
- CService();
- CService(const CNetAddr& ip, uint16_t port);
- CService(const struct in_addr& ipv4Addr, uint16_t port);
- explicit CService(const struct sockaddr_in& addr);
- uint16_t GetPort() const;
- bool GetSockAddr(struct sockaddr* paddr, socklen_t *addrlen) const;
- bool SetSockAddr(const struct sockaddr* paddr);
- friend bool operator==(const CService& a, const CService& b);
- friend bool operator!=(const CService& a, const CService& b) { return !(a == b); }
- friend bool operator<(const CService& a, const CService& b);
- std::vector<unsigned char> GetKey() const;
- std::string ToString() const;
- std::string ToStringPort() const;
- std::string ToStringIPPort() const;
-
- CService(const struct in6_addr& ipv6Addr, uint16_t port);
- explicit CService(const struct sockaddr_in6& addr);
-
- SERIALIZE_METHODS(CService, obj)
- {
- READWRITEAS(CNetAddr, obj);
- READWRITE(Using<BigEndianFormatter<2>>(obj.port));
- }
+protected:
+ uint16_t port; // host order
+
+public:
+ CService();
+ CService(const CNetAddr& ip, uint16_t port);
+ CService(const struct in_addr& ipv4Addr, uint16_t port);
+ explicit CService(const struct sockaddr_in& addr);
+ uint16_t GetPort() const;
+ bool GetSockAddr(struct sockaddr* paddr, socklen_t* addrlen) const;
+ bool SetSockAddr(const struct sockaddr* paddr);
+ friend bool operator==(const CService& a, const CService& b);
+ friend bool operator!=(const CService& a, const CService& b) { return !(a == b); }
+ friend bool operator<(const CService& a, const CService& b);
+ std::vector<unsigned char> GetKey() const;
+ std::string ToString() const;
+ std::string ToStringPort() const;
+ std::string ToStringIPPort() const;
+
+ CService(const struct in6_addr& ipv6Addr, uint16_t port);
+ explicit CService(const struct sockaddr_in6& addr);
+
+ SERIALIZE_METHODS(CService, obj)
+ {
+ READWRITEAS(CNetAddr, obj);
+ READWRITE(Using<BigEndianFormatter<2>>(obj.port));
+ }
};
bool SanityCheckASMap(const std::vector<bool>& asmap);
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 0083b74b33..90f7ba191d 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -518,7 +518,7 @@ void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFile
}
nFile++;
}
- pblocktree->WriteReindexing(false);
+ WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
fReindex = false;
LogPrintf("Reindexing finished\n");
// To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
diff --git a/src/node/coinstats.h b/src/node/coinstats.h
index 8be256edc9..69e856dd15 100644
--- a/src/node/coinstats.h
+++ b/src/node/coinstats.h
@@ -45,15 +45,25 @@ struct CCoinsStats
bool index_used{false};
// Following values are only available from coinstats index
+
+ //! Total cumulative amount of block subsidies up to and including this block
CAmount total_subsidy{0};
- CAmount block_unspendable_amount{0};
- CAmount block_prevout_spent_amount{0};
- CAmount block_new_outputs_ex_coinbase_amount{0};
- CAmount block_coinbase_amount{0};
- CAmount unspendables_genesis_block{0};
- CAmount unspendables_bip30{0};
- CAmount unspendables_scripts{0};
- CAmount unspendables_unclaimed_rewards{0};
+ //! Total cumulative amount of unspendable coins up to and including this block
+ CAmount total_unspendable_amount{0};
+ //! Total cumulative amount of prevouts spent up to and including this block
+ CAmount total_prevout_spent_amount{0};
+ //! Total cumulative amount of outputs created up to and including this block
+ CAmount total_new_outputs_ex_coinbase_amount{0};
+ //! Total cumulative amount of coinbase outputs up to and including this block
+ CAmount total_coinbase_amount{0};
+ //! The unspendable coinbase amount from the genesis block
+ CAmount total_unspendables_genesis_block{0};
+ //! The two unspendable coinbase outputs total amount caused by BIP30
+ CAmount total_unspendables_bip30{0};
+ //! Total cumulative amount of outputs sent to unspendable scripts (OP_RETURN for example) up to and including this block
+ CAmount total_unspendables_scripts{0};
+ //! Total cumulative amount of coins lost due to unclaimed miner rewards up to and including this block
+ CAmount total_unspendables_unclaimed_rewards{0};
CCoinsStats(CoinStatsHashType hash_type) : m_hash_type(hash_type) {}
};
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 183b5a5d91..b46ad0333e 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -334,6 +334,7 @@ bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock<Rec
REVERSE_LOCK(lock);
if (!ReadBlockFromDisk(*block.m_data, index, Params().GetConsensus())) block.m_data->SetNull();
}
+ block.found = true;
return true;
}
@@ -660,6 +661,14 @@ public:
RPCRunLater(name, std::move(fn), seconds);
}
int rpcSerializationFlags() override { return RPCSerializationFlags(); }
+ util::SettingsValue getSetting(const std::string& name) override
+ {
+ return gArgs.GetSetting(name);
+ }
+ std::vector<util::SettingsValue> getSettingsList(const std::string& name) override
+ {
+ return gArgs.GetSettingsList(name);
+ }
util::SettingsValue getRwSetting(const std::string& name) override
{
util::SettingsValue result;
@@ -670,7 +679,7 @@ public:
});
return result;
}
- bool updateRwSetting(const std::string& name, const util::SettingsValue& value) override
+ bool updateRwSetting(const std::string& name, const util::SettingsValue& value, bool write) override
{
gArgs.LockSettings([&](util::Settings& settings) {
if (value.isNull()) {
@@ -679,7 +688,7 @@ public:
settings.rw_settings[name] = value;
}
});
- return gArgs.WriteSettingsFile();
+ return !write || gArgs.WriteSettingsFile();
}
void requestMempoolTransactions(Notifications& notifications) override
{
diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp
index f21b390915..2a7bcc057f 100644
--- a/src/node/transaction.cpp
+++ b/src/node/transaction.cpp
@@ -4,9 +4,12 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <consensus/validation.h>
+#include <index/txindex.h>
#include <net.h>
#include <net_processing.h>
+#include <node/blockstorage.h>
#include <node/context.h>
+#include <txmempool.h>
#include <validation.h>
#include <validationinterface.h>
#include <node/transaction.h>
@@ -28,65 +31,83 @@ static TransactionError HandleATMPError(const TxValidationState& state, std::str
TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef tx, std::string& err_string, const CAmount& max_tx_fee, bool relay, bool wait_callback)
{
- // BroadcastTransaction can be called by either sendrawtransaction RPC or wallet RPCs.
- // node.peerman is assigned both before chain clients and before RPC server is accepting calls,
- // and reset after chain clients and RPC sever are stopped. node.peerman should never be null here.
- assert(node.peerman);
+ // BroadcastTransaction can be called by either sendrawtransaction RPC or the wallet.
+ // chainman, mempool and peerman are initialized before the RPC server and wallet are started
+ // and reset after the RPC sever and wallet are stopped.
+ assert(node.chainman);
assert(node.mempool);
+ assert(node.peerman);
+
std::promise<void> promise;
- uint256 hashTx = tx->GetHash();
+ uint256 txid = tx->GetHash();
+ uint256 wtxid = tx->GetWitnessHash();
bool callback_set = false;
- { // cs_main scope
- assert(node.chainman);
- LOCK(cs_main);
- // If the transaction is already confirmed in the chain, don't do anything
- // and return early.
- CCoinsViewCache &view = node.chainman->ActiveChainstate().CoinsTip();
- for (size_t o = 0; o < tx->vout.size(); o++) {
- const Coin& existingCoin = view.AccessCoin(COutPoint(hashTx, o));
- // IsSpent doesn't mean the coin is spent, it means the output doesn't exist.
- // So if the output does exist, then this transaction exists in the chain.
- if (!existingCoin.IsSpent()) return TransactionError::ALREADY_IN_CHAIN;
- }
- if (!node.mempool->exists(hashTx)) {
- // Transaction is not already in the mempool.
- if (max_tx_fee > 0) {
- // First, call ATMP with test_accept and check the fee. If ATMP
- // fails here, return error immediately.
+ {
+ LOCK(cs_main);
+
+ // If the transaction is already confirmed in the chain, don't do anything
+ // and return early.
+ CCoinsViewCache &view = node.chainman->ActiveChainstate().CoinsTip();
+ for (size_t o = 0; o < tx->vout.size(); o++) {
+ const Coin& existingCoin = view.AccessCoin(COutPoint(txid, o));
+ // IsSpent doesn't mean the coin is spent, it means the output doesn't exist.
+ // So if the output does exist, then this transaction exists in the chain.
+ if (!existingCoin.IsSpent()) return TransactionError::ALREADY_IN_CHAIN;
+ }
+
+ if (auto mempool_tx = node.mempool->get(txid); mempool_tx) {
+ // There's already a transaction in the mempool with this txid. Don't
+ // try to submit this transaction to the mempool (since it'll be
+ // rejected as a TX_CONFLICT), but do attempt to reannounce the mempool
+ // transaction if relay=true.
+ //
+ // The mempool transaction may have the same or different witness (and
+ // wtxid) as this transaction. Use the mempool's wtxid for reannouncement.
+ wtxid = mempool_tx->GetWitnessHash();
+ } else {
+ // Transaction is not already in the mempool.
+ if (max_tx_fee > 0) {
+ // First, call ATMP with test_accept and check the fee. If ATMP
+ // fails here, return error immediately.
+ const MempoolAcceptResult result = AcceptToMemoryPool(node.chainman->ActiveChainstate(), *node.mempool, tx, false /* bypass_limits */,
+ true /* test_accept */);
+ if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
+ return HandleATMPError(result.m_state, err_string);
+ } else if (result.m_base_fees.value() > max_tx_fee) {
+ return TransactionError::MAX_FEE_EXCEEDED;
+ }
+ }
+ // Try to submit the transaction to the mempool.
const MempoolAcceptResult result = AcceptToMemoryPool(node.chainman->ActiveChainstate(), *node.mempool, tx, false /* bypass_limits */,
- true /* test_accept */);
+ false /* test_accept */);
if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
return HandleATMPError(result.m_state, err_string);
- } else if (result.m_base_fees.value() > max_tx_fee) {
- return TransactionError::MAX_FEE_EXCEEDED;
}
- }
- // Try to submit the transaction to the mempool.
- const MempoolAcceptResult result = AcceptToMemoryPool(node.chainman->ActiveChainstate(), *node.mempool, tx, false /* bypass_limits */,
- false /* test_accept */);
- if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
- return HandleATMPError(result.m_state, err_string);
- }
- // Transaction was accepted to the mempool.
+ // Transaction was accepted to the mempool.
- if (wait_callback) {
- // For transactions broadcast from outside the wallet, make sure
- // that the wallet has been notified of the transaction before
- // continuing.
- //
- // This prevents a race where a user might call sendrawtransaction
- // with a transaction to/from their wallet, immediately call some
- // wallet RPC, and get a stale result because callbacks have not
- // yet been processed.
- CallFunctionInValidationInterfaceQueue([&promise] {
- promise.set_value();
- });
- callback_set = true;
- }
- }
+ if (relay) {
+ // the mempool tracks locally submitted transactions to make a
+ // best-effort of initial broadcast
+ node.mempool->AddUnbroadcastTx(txid);
+ }
+ if (wait_callback) {
+ // For transactions broadcast from outside the wallet, make sure
+ // that the wallet has been notified of the transaction before
+ // continuing.
+ //
+ // This prevents a race where a user might call sendrawtransaction
+ // with a transaction to/from their wallet, immediately call some
+ // wallet RPC, and get a stale result because callbacks have not
+ // yet been processed.
+ CallFunctionInValidationInterfaceQueue([&promise] {
+ promise.set_value();
+ });
+ callback_set = true;
+ }
+ }
} // cs_main
if (callback_set) {
@@ -96,11 +117,41 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t
}
if (relay) {
- // the mempool tracks locally submitted transactions to make a
- // best-effort of initial broadcast
- node.mempool->AddUnbroadcastTx(hashTx);
- node.peerman->RelayTransaction(hashTx, tx->GetWitnessHash());
+ node.peerman->RelayTransaction(txid, wtxid);
}
return TransactionError::OK;
}
+
+CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
+{
+ if (mempool && !block_index) {
+ CTransactionRef ptx = mempool->get(hash);
+ if (ptx) return ptx;
+ }
+ if (g_txindex) {
+ CTransactionRef tx;
+ uint256 block_hash;
+ if (g_txindex->FindTx(hash, block_hash, tx)) {
+ if (!block_index || block_index->GetBlockHash() == block_hash) {
+ // Don't return the transaction if the provided block hash doesn't match.
+ // The case where a transaction appears in multiple blocks (e.g. reorgs or
+ // BIP30) is handled by the block lookup below.
+ hashBlock = block_hash;
+ return tx;
+ }
+ }
+ }
+ if (block_index) {
+ CBlock block;
+ if (ReadBlockFromDisk(block, block_index, consensusParams)) {
+ for (const auto& tx : block.vtx) {
+ if (tx->GetHash() == hash) {
+ hashBlock = block_index->GetBlockHash();
+ return tx;
+ }
+ }
+ }
+ }
+ return nullptr;
+}
diff --git a/src/node/transaction.h b/src/node/transaction.h
index 0c016ff04e..aed519cf7f 100644
--- a/src/node/transaction.h
+++ b/src/node/transaction.h
@@ -10,7 +10,12 @@
#include <primitives/transaction.h>
#include <util/error.h>
+class CBlockIndex;
+class CTxMemPool;
struct NodeContext;
+namespace Consensus {
+struct Params;
+}
/** Maximum fee rate for sendrawtransaction and testmempoolaccept RPC calls.
* Also used by the GUI when broadcasting a completed PSBT.
@@ -38,4 +43,19 @@ static const CFeeRate DEFAULT_MAX_RAW_TX_FEE_RATE{COIN / 10};
*/
[[nodiscard]] TransactionError BroadcastTransaction(NodeContext& node, CTransactionRef tx, std::string& err_string, const CAmount& max_tx_fee, bool relay, bool wait_callback);
+/**
+ * Return transaction with a given hash.
+ * If mempool is provided and block_index is not provided, check it first for the tx.
+ * If -txindex is available, check it next for the tx.
+ * Finally, if block_index is provided, check for tx by reading entire block from disk.
+ *
+ * @param[in] block_index The block to read from disk, or nullptr
+ * @param[in] mempool If provided, check mempool for tx
+ * @param[in] hash The txid
+ * @param[in] consensusParams The params
+ * @param[out] hashBlock The block hash, if the tx was found via -txindex or block_index
+ * @returns The tx if found, otherwise nullptr
+ */
+CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock);
+
#endif // BITCOIN_NODE_TRANSACTION_H
diff --git a/src/outputtype.cpp b/src/outputtype.cpp
index 8ede7b9974..b5f1df9792 100644
--- a/src/outputtype.cpp
+++ b/src/outputtype.cpp
@@ -13,6 +13,7 @@
#include <util/vector.h>
#include <assert.h>
+#include <optional>
#include <string>
static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy";
@@ -20,22 +21,18 @@ static const std::string OUTPUT_TYPE_STRING_P2SH_SEGWIT = "p2sh-segwit";
static const std::string OUTPUT_TYPE_STRING_BECH32 = "bech32";
static const std::string OUTPUT_TYPE_STRING_BECH32M = "bech32m";
-bool ParseOutputType(const std::string& type, OutputType& output_type)
+std::optional<OutputType> ParseOutputType(const std::string& type)
{
if (type == OUTPUT_TYPE_STRING_LEGACY) {
- output_type = OutputType::LEGACY;
- return true;
+ return OutputType::LEGACY;
} else if (type == OUTPUT_TYPE_STRING_P2SH_SEGWIT) {
- output_type = OutputType::P2SH_SEGWIT;
- return true;
+ return OutputType::P2SH_SEGWIT;
} else if (type == OUTPUT_TYPE_STRING_BECH32) {
- output_type = OutputType::BECH32;
- return true;
+ return OutputType::BECH32;
} else if (type == OUTPUT_TYPE_STRING_BECH32M) {
- output_type = OutputType::BECH32M;
- return true;
+ return OutputType::BECH32M;
}
- return false;
+ return std::nullopt;
}
const std::string& FormatOutputType(OutputType type)
diff --git a/src/outputtype.h b/src/outputtype.h
index 2b83235cd0..0de7689125 100644
--- a/src/outputtype.h
+++ b/src/outputtype.h
@@ -11,6 +11,7 @@
#include <script/standard.h>
#include <array>
+#include <optional>
#include <string>
#include <vector>
@@ -28,7 +29,7 @@ static constexpr auto OUTPUT_TYPES = std::array{
OutputType::BECH32M,
};
-[[nodiscard]] bool ParseOutputType(const std::string& str, OutputType& output_type);
+std::optional<OutputType> ParseOutputType(const std::string& str);
const std::string& FormatOutputType(OutputType type);
/**
diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp
index 8125b41c41..43624c7993 100644
--- a/src/policy/rbf.cpp
+++ b/src/policy/rbf.cpp
@@ -3,6 +3,10 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <policy/rbf.h>
+
+#include <policy/settings.h>
+#include <tinyformat.h>
+#include <util/moneystr.h>
#include <util/rbf.h>
RBFTransactionState IsRBFOptIn(const CTransaction& tx, const CTxMemPool& pool)
@@ -42,3 +46,34 @@ RBFTransactionState IsRBFOptInEmptyMempool(const CTransaction& tx)
// If we don't have a local mempool we can only check the transaction itself.
return SignalsOptInRBF(tx) ? RBFTransactionState::REPLACEABLE_BIP125 : RBFTransactionState::UNKNOWN;
}
+
+bool GetEntriesForConflicts(const CTransaction& tx,
+ CTxMemPool& m_pool,
+ const CTxMemPool::setEntries& setIterConflicting,
+ CTxMemPool::setEntries& allConflicting,
+ std::string& err_string)
+{
+ AssertLockHeld(m_pool.cs);
+ const uint256 hash = tx.GetHash();
+ uint64_t nConflictingCount = 0;
+ for (const auto& mi : setIterConflicting) {
+ nConflictingCount += mi->GetCountWithDescendants();
+ // This potentially overestimates the number of actual descendants
+ // but we just want to be conservative to avoid doing too much
+ // work.
+ if (nConflictingCount > MAX_BIP125_REPLACEMENT_CANDIDATES) {
+ err_string = strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ hash.ToString(),
+ nConflictingCount,
+ MAX_BIP125_REPLACEMENT_CANDIDATES);
+ return false;
+ }
+ }
+ // If not too many to replace, then calculate the set of
+ // transactions that would have to be evicted
+ for (CTxMemPool::txiter it : setIterConflicting) {
+ m_pool.CalculateDescendants(it, allConflicting);
+ }
+ return true;
+}
+
diff --git a/src/policy/rbf.h b/src/policy/rbf.h
index e078070c1c..a67e9058df 100644
--- a/src/policy/rbf.h
+++ b/src/policy/rbf.h
@@ -7,6 +7,10 @@
#include <txmempool.h>
+/** Maximum number of transactions that can be replaced by BIP125 RBF (Rule #5). This includes all
+ * mempool conflicts and their descendants. */
+static constexpr uint32_t MAX_BIP125_REPLACEMENT_CANDIDATES{100};
+
/** The rbf state of unconfirmed transactions */
enum class RBFTransactionState {
/** Unconfirmed tx that does not signal rbf and is not in the mempool */
@@ -31,4 +35,19 @@ enum class RBFTransactionState {
RBFTransactionState IsRBFOptIn(const CTransaction& tx, const CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(pool.cs);
RBFTransactionState IsRBFOptInEmptyMempool(const CTransaction& tx);
+/** Get all descendants of setIterConflicting. Also enforce BIP125 Rule #5, "The number of original
+ * transactions to be replaced and their descendant transactions which will be evicted from the
+ * mempool must not exceed a total of 100 transactions." Quit as early as possible. There cannot be
+ * more than MAX_BIP125_REPLACEMENT_CANDIDATES potential entries.
+ * @param[in] setIterConflicting The set of iterators to mempool entries.
+ * @param[out] err_string Used to return errors, if any.
+ * @param[out] allConflicting Populated with all the mempool entries that would be replaced,
+ * which includes descendants of setIterConflicting. Not cleared at
+ * the start; any existing mempool entries will remain in the set.
+ * @returns false if Rule 5 is broken.
+ */
+bool GetEntriesForConflicts(const CTransaction& tx, CTxMemPool& m_pool,
+ const CTxMemPool::setEntries& setIterConflicting,
+ CTxMemPool::setEntries& allConflicting,
+ std::string& err_string) EXCLUSIVE_LOCKS_REQUIRED(m_pool.cs);
#endif // BITCOIN_POLICY_RBF_H
diff --git a/src/protocol.h b/src/protocol.h
index f9248899dc..2149e45993 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -396,7 +396,6 @@ public:
// ambiguous what that would mean. Make sure no code relying on that is introduced:
assert(!(s.GetType() & SER_GETHASH));
bool use_v2;
- bool store_time;
if (s.GetType() & SER_DISK) {
// In the disk serialization format, the encoding (v1 or v2) is determined by a flag version
// that's part of the serialization itself. ADDRV2_FORMAT in the stream version only determines
@@ -413,24 +412,16 @@ public:
} else {
throw std::ios_base::failure("Unsupported CAddress disk format version");
}
- store_time = true;
} else {
// In the network serialization format, the encoding (v1 or v2) is determined directly by
// the value of ADDRV2_FORMAT in the stream version, as no explicitly encoded version
// exists in the stream.
assert(s.GetType() & SER_NETWORK);
use_v2 = s.GetVersion() & ADDRV2_FORMAT;
- // The only time we serialize a CAddress object without nTime is in
- // the initial VERSION messages which contain two CAddress records.
- // At that point, the serialization version is INIT_PROTO_VERSION.
- // After the version handshake, serialization version is >=
- // MIN_PEER_PROTO_VERSION and all ADDR messages are serialized with
- // nTime.
- store_time = s.GetVersion() != INIT_PROTO_VERSION;
}
SER_READ(obj, obj.nTime = TIME_INIT);
- if (store_time) READWRITE(obj.nTime);
+ READWRITE(obj.nTime);
// nServices is serialized as CompactSize in V2; as uint64_t in V1.
if (use_v2) {
uint64_t services_tmp;
@@ -445,7 +436,7 @@ public:
SerReadWriteMany(os, ser_action, ReadWriteAsHelper<CService>(obj));
}
- //! Always included in serialization, except in the network format on INIT_PROTO_VERSION.
+ //! Always included in serialization.
uint32_t nTime{TIME_INIT};
//! Serialized as uint64_t in V1, and as CompactSize in V2.
ServiceFlags nServices{NODE_NONE};
diff --git a/src/pubkey.cpp b/src/pubkey.cpp
index 175a39b805..d14a20b870 100644
--- a/src/pubkey.cpp
+++ b/src/pubkey.cpp
@@ -180,6 +180,23 @@ XOnlyPubKey::XOnlyPubKey(Span<const unsigned char> bytes)
std::copy(bytes.begin(), bytes.end(), m_keydata.begin());
}
+std::vector<CKeyID> XOnlyPubKey::GetKeyIDs() const
+{
+ std::vector<CKeyID> out;
+ // For now, use the old full pubkey-based key derivation logic. As it is indexed by
+ // Hash160(full pubkey), we need to return both a version prefixed with 0x02, and one
+ // with 0x03.
+ unsigned char b[33] = {0x02};
+ std::copy(m_keydata.begin(), m_keydata.end(), b + 1);
+ CPubKey fullpubkey;
+ fullpubkey.Set(b, b + 33);
+ out.push_back(fullpubkey.GetID());
+ b[0] = 0x03;
+ fullpubkey.Set(b, b + 33);
+ out.push_back(fullpubkey.GetID());
+ return out;
+}
+
bool XOnlyPubKey::IsFullyValid() const
{
secp256k1_xonly_pubkey pubkey;
@@ -191,7 +208,7 @@ bool XOnlyPubKey::VerifySchnorr(const uint256& msg, Span<const unsigned char> si
assert(sigbytes.size() == 64);
secp256k1_xonly_pubkey pubkey;
if (!secp256k1_xonly_pubkey_parse(secp256k1_context_verify, &pubkey, m_keydata.data())) return false;
- return secp256k1_schnorrsig_verify(secp256k1_context_verify, sigbytes.data(), msg.begin(), &pubkey);
+ return secp256k1_schnorrsig_verify(secp256k1_context_verify, sigbytes.data(), msg.begin(), 32, &pubkey);
}
static const CHashWriter HASHER_TAPTWEAK = TaggedHash("TapTweak");
@@ -333,6 +350,7 @@ void CExtPubKey::Decode(const unsigned char code[BIP32_EXTKEY_SIZE]) {
nChild = (code[5] << 24) | (code[6] << 16) | (code[7] << 8) | code[8];
memcpy(chaincode.begin(), code+9, 32);
pubkey.Set(code+41, code+BIP32_EXTKEY_SIZE);
+ if ((nDepth == 0 && (nChild != 0 || ReadLE32(vchFingerprint) != 0)) || !pubkey.IsFullyValid()) pubkey = CPubKey();
}
bool CExtPubKey::Derive(CExtPubKey &out, unsigned int _nChild) const {
diff --git a/src/pubkey.h b/src/pubkey.h
index eec34a89c2..861a2cf500 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -267,6 +267,11 @@ public:
/** Construct a Taproot tweaked output point with this point as internal key. */
std::optional<std::pair<XOnlyPubKey, bool>> CreateTapTweak(const uint256* merkle_root) const;
+ /** Returns a list of CKeyIDs for the CPubKeys that could have been used to create this XOnlyPubKey.
+ * This is needed for key lookups since keys are indexed by CKeyID.
+ */
+ std::vector<CKeyID> GetKeyIDs() const;
+
const unsigned char& operator[](int pos) const { return *(m_keydata.begin() + pos); }
const unsigned char* data() const { return m_keydata.begin(); }
static constexpr size_t size() { return decltype(m_keydata)::size(); }
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 442c813a5a..f6ea147ddb 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -7,12 +7,19 @@
#endif
#include <qt/bitcoin.h>
-#include <qt/bitcoingui.h>
#include <chainparams.h>
+#include <init.h>
+#include <interfaces/handler.h>
+#include <interfaces/node.h>
+#include <node/context.h>
+#include <node/ui_interface.h>
+#include <noui.h>
+#include <qt/bitcoingui.h>
#include <qt/clientmodel.h>
#include <qt/guiconstants.h>
#include <qt/guiutil.h>
+#include <qt/initexecutor.h>
#include <qt/intro.h>
#include <qt/networkstyle.h>
#include <qt/optionsmodel.h>
@@ -20,6 +27,12 @@
#include <qt/splashscreen.h>
#include <qt/utilitydialog.h>
#include <qt/winshutdownmonitor.h>
+#include <uint256.h>
+#include <util/string.h>
+#include <util/system.h>
+#include <util/threadnames.h>
+#include <util/translation.h>
+#include <validation.h>
#ifdef ENABLE_WALLET
#include <qt/paymentserver.h>
@@ -27,18 +40,6 @@
#include <qt/walletmodel.h>
#endif // ENABLE_WALLET
-#include <init.h>
-#include <interfaces/handler.h>
-#include <interfaces/node.h>
-#include <node/context.h>
-#include <node/ui_interface.h>
-#include <noui.h>
-#include <uint256.h>
-#include <util/system.h>
-#include <util/threadnames.h>
-#include <util/translation.h>
-#include <validation.h>
-
#include <boost/signals2/connection.hpp>
#include <memory>
@@ -144,56 +145,61 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans
QApplication::installTranslator(&translator);
}
-/* qDebug() message handler --> debug.log */
-void DebugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString &msg)
+static bool InitSettings()
{
- Q_UNUSED(context);
- if (type == QtDebugMsg) {
- LogPrint(BCLog::QT, "GUI: %s\n", msg.toStdString());
- } else {
- LogPrintf("GUI: %s\n", msg.toStdString());
+ if (!gArgs.GetSettingsPath()) {
+ return true; // Do nothing if settings file disabled.
}
-}
-
-BitcoinCore::BitcoinCore(interfaces::Node& node) :
- QObject(), m_node(node)
-{
-}
-void BitcoinCore::handleRunawayException(const std::exception *e)
-{
- PrintExceptionContinue(e, "Runaway exception");
- Q_EMIT runawayException(QString::fromStdString(m_node.getWarnings().translated));
-}
+ std::vector<std::string> errors;
+ if (!gArgs.ReadSettingsFile(&errors)) {
+ bilingual_str error = _("Settings file could not be read");
+ InitError(Untranslated(strprintf("%s:\n%s\n", error.original, MakeUnorderedList(errors))));
+
+ QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Reset | QMessageBox::Abort);
+ /*: Explanatory text shown on startup when the settings file cannot be read.
+ Prompts user to make a choice between resetting or aborting. */
+ messagebox.setInformativeText(QObject::tr("Do you want to reset settings to default values, or to abort without making changes?"));
+ messagebox.setDetailedText(QString::fromStdString(MakeUnorderedList(errors)));
+ messagebox.setTextFormat(Qt::PlainText);
+ messagebox.setDefaultButton(QMessageBox::Reset);
+ switch (messagebox.exec()) {
+ case QMessageBox::Reset:
+ break;
+ case QMessageBox::Abort:
+ return false;
+ default:
+ assert(false);
+ }
+ }
-void BitcoinCore::initialize()
-{
- try
- {
- util::ThreadRename("qt-init");
- qDebug() << __func__ << ": Running initialization in thread";
- interfaces::BlockAndHeaderTipInfo tip_info;
- bool rv = m_node.appInitMain(&tip_info);
- Q_EMIT initializeResult(rv, tip_info);
- } catch (const std::exception& e) {
- handleRunawayException(&e);
- } catch (...) {
- handleRunawayException(nullptr);
+ errors.clear();
+ if (!gArgs.WriteSettingsFile(&errors)) {
+ bilingual_str error = _("Settings file could not be written");
+ InitError(Untranslated(strprintf("%s:\n%s\n", error.original, MakeUnorderedList(errors))));
+
+ QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Ok);
+ /*: Explanatory text shown on startup when the settings file could not be written.
+ Prompts user to check that we have the ability to write to the file.
+ Explains that the user has the option of running without a settings file.*/
+ messagebox.setInformativeText(QObject::tr("A fatal error occurred. Check that settings file is writable, or try running with -nosettings."));
+ messagebox.setDetailedText(QString::fromStdString(MakeUnorderedList(errors)));
+ messagebox.setTextFormat(Qt::PlainText);
+ messagebox.setDefaultButton(QMessageBox::Ok);
+ messagebox.exec();
+ return false;
}
+ return true;
}
-void BitcoinCore::shutdown()
+/* qDebug() message handler --> debug.log */
+void DebugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString &msg)
{
- try
- {
- qDebug() << __func__ << ": Running Shutdown in thread";
- m_node.appShutdown();
- qDebug() << __func__ << ": Shutdown finished";
- Q_EMIT shutdownResult();
- } catch (const std::exception& e) {
- handleRunawayException(&e);
- } catch (...) {
- handleRunawayException(nullptr);
+ Q_UNUSED(context);
+ if (type == QtDebugMsg) {
+ LogPrint(BCLog::QT, "GUI: %s\n", msg.toStdString());
+ } else {
+ LogPrintf("GUI: %s\n", msg.toStdString());
}
}
@@ -202,7 +208,6 @@ static const char* qt_argv = "bitcoin-qt";
BitcoinApplication::BitcoinApplication():
QApplication(qt_argc, const_cast<char **>(&qt_argv)),
- coreThread(nullptr),
optionsModel(nullptr),
clientModel(nullptr),
window(nullptr),
@@ -230,13 +235,7 @@ void BitcoinApplication::setupPlatformStyle()
BitcoinApplication::~BitcoinApplication()
{
- if(coreThread)
- {
- qDebug() << __func__ << ": Stopping thread";
- coreThread->quit();
- coreThread->wait();
- qDebug() << __func__ << ": Stopped thread";
- }
+ m_executor.reset();
delete window;
window = nullptr;
@@ -291,22 +290,15 @@ bool BitcoinApplication::baseInitialize()
void BitcoinApplication::startThread()
{
- if(coreThread)
- return;
- coreThread = new QThread(this);
- BitcoinCore *executor = new BitcoinCore(node());
- executor->moveToThread(coreThread);
+ assert(!m_executor);
+ m_executor.emplace(node());
/* communication to and from thread */
- connect(executor, &BitcoinCore::initializeResult, this, &BitcoinApplication::initializeResult);
- connect(executor, &BitcoinCore::shutdownResult, this, &BitcoinApplication::shutdownResult);
- connect(executor, &BitcoinCore::runawayException, this, &BitcoinApplication::handleRunawayException);
- connect(this, &BitcoinApplication::requestedInitialize, executor, &BitcoinCore::initialize);
- connect(this, &BitcoinApplication::requestedShutdown, executor, &BitcoinCore::shutdown);
- /* make sure executor object is deleted in its own thread */
- connect(coreThread, &QThread::finished, executor, &QObject::deleteLater);
-
- coreThread->start();
+ connect(&m_executor.value(), &InitExecutor::initializeResult, this, &BitcoinApplication::initializeResult);
+ connect(&m_executor.value(), &InitExecutor::shutdownResult, this, &BitcoinApplication::shutdownResult);
+ connect(&m_executor.value(), &InitExecutor::runawayException, this, &BitcoinApplication::handleRunawayException);
+ connect(this, &BitcoinApplication::requestedInitialize, &m_executor.value(), &InitExecutor::initialize);
+ connect(this, &BitcoinApplication::requestedShutdown, &m_executor.value(), &InitExecutor::shutdown);
}
void BitcoinApplication::parameterSetup()
@@ -339,7 +331,6 @@ void BitcoinApplication::requestShutdown()
shutdownWindow.reset(ShutdownWindow::showShutdownWindow(window));
qDebug() << __func__ << ": Requesting shutdown";
- startThread();
window->hide();
// Must disconnect node signals otherwise current thread can deadlock since
// no event loop is running.
@@ -352,6 +343,17 @@ void BitcoinApplication::requestShutdown()
window->setClientModel(nullptr);
pollShutdownTimer->stop();
+#ifdef ENABLE_WALLET
+ // Delete wallet controller here manually, instead of relying on Qt object
+ // tracking (https://doc.qt.io/qt-5/objecttrees.html). This makes sure
+ // walletmodel m_handle_* notification handlers are deleted before wallets
+ // are unloaded, which can simplify wallet implementations. It also avoids
+ // these notifications having to be handled while GUI objects are being
+ // destroyed, making GUI code less fragile as well.
+ delete m_wallet_controller;
+ m_wallet_controller = nullptr;
+#endif // ENABLE_WALLET
+
delete clientModel;
clientModel = nullptr;
@@ -569,9 +571,8 @@ int GuiMain(int argc, char* argv[])
// Parse URIs on command line -- this can affect Params()
PaymentServer::ipcParseCommandLine(argc, argv);
#endif
- if (!gArgs.InitSettings(error)) {
- InitError(Untranslated(error));
- QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error initializing settings: %1").arg(QString::fromStdString(error)));
+
+ if (!InitSettings()) {
return EXIT_FAILURE;
}
diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h
index f9fab0534b..ed2f26b7f3 100644
--- a/src/qt/bitcoin.h
+++ b/src/qt/bitcoin.h
@@ -9,11 +9,14 @@
#include <config/bitcoin-config.h>
#endif
-#include <QApplication>
+#include <interfaces/node.h>
+#include <qt/initexecutor.h>
+
#include <assert.h>
#include <memory>
+#include <optional>
-#include <interfaces/node.h>
+#include <QApplication>
class BitcoinGUI;
class ClientModel;
@@ -26,31 +29,6 @@ class WalletController;
class WalletModel;
-/** Class encapsulating Bitcoin Core startup and shutdown.
- * Allows running startup and shutdown in a different thread from the UI thread.
- */
-class BitcoinCore: public QObject
-{
- Q_OBJECT
-public:
- explicit BitcoinCore(interfaces::Node& node);
-
-public Q_SLOTS:
- void initialize();
- void shutdown();
-
-Q_SIGNALS:
- void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info);
- void shutdownResult();
- void runawayException(const QString &message);
-
-private:
- /// Pass fatal exception message to UI thread
- void handleRunawayException(const std::exception *e);
-
- interfaces::Node& m_node;
-};
-
/** Main Bitcoin application object */
class BitcoinApplication: public QApplication
{
@@ -112,7 +90,7 @@ Q_SIGNALS:
void windowShown(BitcoinGUI* window);
private:
- QThread *coreThread;
+ std::optional<InitExecutor> m_executor;
OptionsModel *optionsModel;
ClientModel *clientModel;
BitcoinGUI *window;
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index f8aeb01659..862bdd3bfe 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -110,6 +110,10 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty
connect(activity, &CreateWalletActivity::finished, activity, &QObject::deleteLater);
activity->create();
});
+ connect(walletFrame, &WalletFrame::message, [this](const QString& title, const QString& message, unsigned int style) {
+ this->message(title, message, style);
+ });
+ connect(walletFrame, &WalletFrame::currentWalletSet, [this] { updateWalletStatus(); });
setCentralWidget(walletFrame);
} else
#endif // ENABLE_WALLET
@@ -326,7 +330,7 @@ void BitcoinGUI::createActions()
verifyMessageAction->setStatusTip(tr("Verify messages to ensure they were signed with specified Bitcoin addresses"));
m_load_psbt_action = new QAction(tr("&Load PSBT from file…"), this);
m_load_psbt_action->setStatusTip(tr("Load Partially Signed Bitcoin Transaction"));
- m_load_psbt_clipboard_action = new QAction(tr("Load PSBT from clipboard…"), this);
+ m_load_psbt_clipboard_action = new QAction(tr("Load PSBT from &clipboard…"), this);
m_load_psbt_clipboard_action->setStatusTip(tr("Load Partially Signed Bitcoin Transaction from clipboard"));
openRPCConsoleAction = new QAction(tr("Node window"), this);
@@ -483,7 +487,7 @@ void BitcoinGUI::createMenuBar()
QMenu* window_menu = appMenuBar->addMenu(tr("&Window"));
- QAction* minimize_action = window_menu->addAction(tr("Minimize"));
+ QAction* minimize_action = window_menu->addAction(tr("&Minimize"));
minimize_action->setShortcut(QKeySequence(Qt::CTRL + Qt::Key_M));
connect(minimize_action, &QAction::triggered, [] {
QApplication::activeWindow()->showMinimized();
@@ -591,8 +595,8 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel, interfaces::BlockAndH
connect(_clientModel, &ClientModel::numConnectionsChanged, this, &BitcoinGUI::setNumConnections);
connect(_clientModel, &ClientModel::networkActiveChanged, this, &BitcoinGUI::setNetworkActive);
- modalOverlay->setKnownBestHeight(tip_info->header_height, QDateTime::fromTime_t(tip_info->header_time));
- setNumBlocks(tip_info->block_height, QDateTime::fromTime_t(tip_info->block_time), tip_info->verification_progress, false, SynchronizationState::INIT_DOWNLOAD);
+ modalOverlay->setKnownBestHeight(tip_info->header_height, QDateTime::fromSecsSinceEpoch(tip_info->header_time));
+ setNumBlocks(tip_info->block_height, QDateTime::fromSecsSinceEpoch(tip_info->block_time), tip_info->verification_progress, false, SynchronizationState::INIT_DOWNLOAD);
connect(_clientModel, &ClientModel::numBlocksChanged, this, &BitcoinGUI::setNumBlocks);
// Receive and report messages from client model
@@ -672,8 +676,8 @@ void BitcoinGUI::addWallet(WalletModel* walletModel)
{
if (!walletFrame) return;
- WalletView* wallet_view = new WalletView(platformStyle, walletFrame);
- if (!walletFrame->addWallet(walletModel, wallet_view)) return;
+ WalletView* wallet_view = new WalletView(walletModel, platformStyle, walletFrame);
+ if (!walletFrame->addView(wallet_view)) return;
rpcConsole->addWallet(walletModel);
if (m_wallet_selector->count() == 0) {
@@ -682,8 +686,6 @@ void BitcoinGUI::addWallet(WalletModel* walletModel)
m_wallet_selector_label_action->setVisible(true);
m_wallet_selector_action->setVisible(true);
}
- const QString display_name = walletModel->getDisplayName();
- m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel));
connect(wallet_view, &WalletView::outOfSyncWarningClicked, this, &BitcoinGUI::showModalOverlay);
connect(wallet_view, &WalletView::transactionClicked, this, &BitcoinGUI::gotoHistoryPage);
@@ -693,9 +695,10 @@ void BitcoinGUI::addWallet(WalletModel* walletModel)
});
connect(wallet_view, &WalletView::encryptionStatusChanged, this, &BitcoinGUI::updateWalletStatus);
connect(wallet_view, &WalletView::incomingTransaction, this, &BitcoinGUI::incomingTransaction);
- connect(wallet_view, &WalletView::hdEnabledStatusChanged, this, &BitcoinGUI::updateWalletStatus);
connect(this, &BitcoinGUI::setPrivacy, wallet_view, &WalletView::setPrivacy);
wallet_view->setPrivacy(isPrivacyModeActivated());
+ const QString display_name = walletModel->getDisplayName();
+ m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel));
}
void BitcoinGUI::removeWallet(WalletModel* walletModel)
@@ -1337,9 +1340,8 @@ void BitcoinGUI::setEncryptionStatus(int status)
void BitcoinGUI::updateWalletStatus()
{
- if (!walletFrame) {
- return;
- }
+ assert(walletFrame);
+
WalletView * const walletView = walletFrame->currentWalletView();
if (!walletView) {
return;
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index bb2073b9fe..c86cb16af6 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -216,7 +216,7 @@ bool ClientModel::isReleaseVersion() const
QString ClientModel::formatClientStartupTime() const
{
- return QDateTime::fromTime_t(GetStartupTime()).toString();
+ return QDateTime::fromSecsSinceEpoch(GetStartupTime()).toString();
}
QString ClientModel::dataDir() const
@@ -294,7 +294,7 @@ static void BlockTipChanged(ClientModel* clientmodel, SynchronizationState sync_
bool invoked = QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection,
Q_ARG(int, tip.block_height),
- Q_ARG(QDateTime, QDateTime::fromTime_t(tip.block_time)),
+ Q_ARG(QDateTime, QDateTime::fromSecsSinceEpoch(tip.block_time)),
Q_ARG(double, verificationProgress),
Q_ARG(bool, fHeader),
Q_ARG(SynchronizationState, sync_state));
diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp
index dc24bbc6a6..f9a61c3e60 100644
--- a/src/qt/createwalletdialog.cpp
+++ b/src/qt/createwalletdialog.cpp
@@ -32,7 +32,7 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) :
// set to true, enable it when isEncryptWalletChecked is false.
ui->disable_privkeys_checkbox->setEnabled(!checked);
#ifdef ENABLE_EXTERNAL_SIGNER
- ui->external_signer_checkbox->setEnabled(!checked);
+ ui->external_signer_checkbox->setEnabled(m_has_signers && !checked);
#endif
// When the disable_privkeys_checkbox is disabled, uncheck it.
if (!ui->disable_privkeys_checkbox->isEnabled()) {
@@ -115,7 +115,8 @@ CreateWalletDialog::~CreateWalletDialog()
void CreateWalletDialog::setSigners(const std::vector<ExternalSigner>& signers)
{
- if (!signers.empty()) {
+ m_has_signers = !signers.empty();
+ if (m_has_signers) {
ui->external_signer_checkbox->setEnabled(true);
ui->external_signer_checkbox->setChecked(true);
ui->encrypt_wallet_checkbox->setEnabled(false);
diff --git a/src/qt/createwalletdialog.h b/src/qt/createwalletdialog.h
index 25ddf97585..fc13cc44eb 100644
--- a/src/qt/createwalletdialog.h
+++ b/src/qt/createwalletdialog.h
@@ -35,6 +35,7 @@ public:
private:
Ui::CreateWalletDialog *ui;
+ bool m_has_signers = false;
};
#endif // BITCOIN_QT_CREATEWALLETDIALOG_H
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index bd72328c02..2ff1445709 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -51,20 +51,20 @@
</spacer>
</item>
<item>
- <layout class="QHBoxLayout" name="horizontalLayout_Main_Prune">
- <item>
- <widget class="QCheckBox" name="prune">
- <property name="toolTip">
- <string>Enabling pruning significantly reduces the disk space required to store transactions. All blocks are still fully validated. Reverting this setting requires re-downloading the entire blockchain.</string>
- </property>
- <property name="text">
- <string>Prune &amp;block storage to</string>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QSpinBox" name="pruneSize"/>
- </item>
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_Prune">
+ <item>
+ <widget class="QCheckBox" name="prune">
+ <property name="toolTip">
+ <string>Enabling pruning significantly reduces the disk space required to store transactions. All blocks are still fully validated. Reverting this setting requires re-downloading the entire blockchain.</string>
+ </property>
+ <property name="text">
+ <string>Prune &amp;block storage to</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QSpinBox" name="pruneSize"/>
+ </item>
<item>
<widget class="QLabel" name="pruneSizeUnitLabel">
<property name="text">
@@ -201,6 +201,16 @@
</attribute>
<layout class="QVBoxLayout" name="verticalLayout_Wallet">
<item>
+ <widget class="QCheckBox" name="subFeeFromAmount">
+ <property name="toolTip">
+ <string extracomment="Tooltip text for Options window setting that sets subtracting the fee from a sending amount as default.">Whether to set subtract fee from amount as default or not.</string>
+ </property>
+ <property name="text">
+ <string extracomment="An Options window setting to set subtracting the fee from a sending amount as default.">Subtract &amp;fee from amount by default</string>
+ </property>
+ </widget>
+ </item>
+ <item>
<widget class="QGroupBox" name="groupBox">
<property name="title">
<string>Expert</string>
@@ -235,27 +245,27 @@
<string>External Signer (e.g. hardware wallet)</string>
</property>
<layout class="QVBoxLayout" name="verticalLayoutHww">
- <item>
- <layout class="QHBoxLayout" name="horizontalLayoutHww">
- <item>
- <widget class="QLabel" name="externalSignerPathLabel">
- <property name="text">
- <string>&amp;External signer script path</string>
- </property>
- <property name="buddy">
- <cstring>externalSignerPath</cstring>
- </property>
- </widget>
- </item>
- <item>
- <widget class="QLineEdit" name="externalSignerPath">
- <property name="toolTip">
- <string>Full path to a Bitcoin Core compatible script (e.g. C:\Downloads\hwi.exe or /Users/you/Downloads/hwi.py). Beware: malware can steal your coins!</string>
- </property>
- </widget>
- </item>
- </layout>
- </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayoutHww">
+ <item>
+ <widget class="QLabel" name="externalSignerPathLabel">
+ <property name="text">
+ <string>&amp;External signer script path</string>
+ </property>
+ <property name="buddy">
+ <cstring>externalSignerPath</cstring>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QLineEdit" name="externalSignerPath">
+ <property name="toolTip">
+ <string>Full path to a Bitcoin Core compatible script (e.g. C:\Downloads\hwi.exe or /Users/you/Downloads/hwi.py). Beware: malware can steal your coins!</string>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </item>
</layout>
</widget>
</item>
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index ecdfce2f5a..e98e50ba14 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -81,7 +81,7 @@ QString dateTimeStr(const QDateTime &date)
QString dateTimeStr(qint64 nTime)
{
- return dateTimeStr(QDateTime::fromTime_t((qint32)nTime));
+ return dateTimeStr(QDateTime::fromSecsSinceEpoch(nTime));
}
QFont fixedPitchFont(bool use_embedded_font)
diff --git a/src/qt/initexecutor.cpp b/src/qt/initexecutor.cpp
new file mode 100644
index 0000000000..7060f74dab
--- /dev/null
+++ b/src/qt/initexecutor.cpp
@@ -0,0 +1,66 @@
+// Copyright (c) 2014-2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <qt/initexecutor.h>
+
+#include <interfaces/node.h>
+#include <util/system.h>
+#include <util/threadnames.h>
+
+#include <exception>
+
+#include <QDebug>
+#include <QObject>
+#include <QString>
+#include <QThread>
+
+InitExecutor::InitExecutor(interfaces::Node& node)
+ : QObject(), m_node(node)
+{
+ this->moveToThread(&m_thread);
+ m_thread.start();
+}
+
+InitExecutor::~InitExecutor()
+{
+ qDebug() << __func__ << ": Stopping thread";
+ m_thread.quit();
+ m_thread.wait();
+ qDebug() << __func__ << ": Stopped thread";
+}
+
+void InitExecutor::handleRunawayException(const std::exception* e)
+{
+ PrintExceptionContinue(e, "Runaway exception");
+ Q_EMIT runawayException(QString::fromStdString(m_node.getWarnings().translated));
+}
+
+void InitExecutor::initialize()
+{
+ try {
+ util::ThreadRename("qt-init");
+ qDebug() << __func__ << ": Running initialization in thread";
+ interfaces::BlockAndHeaderTipInfo tip_info;
+ bool rv = m_node.appInitMain(&tip_info);
+ Q_EMIT initializeResult(rv, tip_info);
+ } catch (const std::exception& e) {
+ handleRunawayException(&e);
+ } catch (...) {
+ handleRunawayException(nullptr);
+ }
+}
+
+void InitExecutor::shutdown()
+{
+ try {
+ qDebug() << __func__ << ": Running Shutdown in thread";
+ m_node.appShutdown();
+ qDebug() << __func__ << ": Shutdown finished";
+ Q_EMIT shutdownResult();
+ } catch (const std::exception& e) {
+ handleRunawayException(&e);
+ } catch (...) {
+ handleRunawayException(nullptr);
+ }
+}
diff --git a/src/qt/initexecutor.h b/src/qt/initexecutor.h
new file mode 100644
index 0000000000..319ce40465
--- /dev/null
+++ b/src/qt/initexecutor.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2014-2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_QT_INITEXECUTOR_H
+#define BITCOIN_QT_INITEXECUTOR_H
+
+#include <interfaces/node.h>
+
+#include <exception>
+
+#include <QObject>
+#include <QThread>
+
+QT_BEGIN_NAMESPACE
+class QString;
+QT_END_NAMESPACE
+
+/** Class encapsulating Bitcoin Core startup and shutdown.
+ * Allows running startup and shutdown in a different thread from the UI thread.
+ */
+class InitExecutor : public QObject
+{
+ Q_OBJECT
+public:
+ explicit InitExecutor(interfaces::Node& node);
+ ~InitExecutor();
+
+public Q_SLOTS:
+ void initialize();
+ void shutdown();
+
+Q_SIGNALS:
+ void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info);
+ void shutdownResult();
+ void runawayException(const QString& message);
+
+private:
+ /// Pass fatal exception message to UI thread
+ void handleRunawayException(const std::exception* e);
+
+ interfaces::Node& m_node;
+ QThread m_thread;
+};
+
+#endif // BITCOIN_QT_INITEXECUTOR_H
diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts
index 7026f49c01..47c002498a 100644
--- a/src/qt/locale/bitcoin_en.ts
+++ b/src/qt/locale/bitcoin_en.ts
@@ -749,8 +749,8 @@ Signing is only possible with addresses of the type &apos;legacy&apos;.</source>
<source>%n active connection(s) to Bitcoin network.</source>
<extracomment>A substring of the tooltip.</extracomment>
<translation type="unfinished">
- <numerusform></numerusform>
- <numerusform></numerusform>
+ <numerusform>%n active connection to Bitcoin network.</numerusform>
+ <numerusform>%n active connections to Bitcoin network.</numerusform>
</translation>
</message>
<message>
@@ -1376,8 +1376,8 @@ Signing is only possible with addresses of the type &apos;legacy&apos;.</source>
<source>(sufficient to restore backups %n day(s) old)</source>
<extracomment>Explanatory text on the capability of the current prune target.</extracomment>
<translation type="unfinished">
- <numerusform></numerusform>
- <numerusform></numerusform>
+ <numerusform>(sufficient to restore backups %n day old)</numerusform>
+ <numerusform>(sufficient to restore backups %n days old)</numerusform>
</translation>
</message>
<message>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index b12fe96567..5ad4fc9b33 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -239,6 +239,7 @@ void OptionsDialog::setMapper()
/* Wallet */
mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange);
mapper->addMapping(ui->coinControlFeatures, OptionsModel::CoinControlFeatures);
+ mapper->addMapping(ui->subFeeFromAmount, OptionsModel::SubFeeFromAmount);
mapper->addMapping(ui->externalSignerPath, OptionsModel::ExternalSignerPath);
/* Network */
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index 24a4e9ee96..d87fc1f84a 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -124,6 +124,11 @@ void OptionsModel::Init(bool resetSettings)
if (!gArgs.SoftSetArg("-signer", settings.value("external_signer_path").toString().toStdString())) {
addOverriddenOption("-signer");
}
+
+ if (!settings.contains("SubFeeFromAmount")) {
+ settings.setValue("SubFeeFromAmount", false);
+ }
+ m_sub_fee_from_amount = settings.value("SubFeeFromAmount", false).toBool();
#endif
// Network
@@ -335,6 +340,8 @@ QVariant OptionsModel::data(const QModelIndex & index, int role) const
return settings.value("bSpendZeroConfChange");
case ExternalSignerPath:
return settings.value("external_signer_path");
+ case SubFeeFromAmount:
+ return m_sub_fee_from_amount;
#endif
case DisplayUnit:
return nDisplayUnit;
@@ -460,6 +467,10 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
setRestartRequired(true);
}
break;
+ case SubFeeFromAmount:
+ m_sub_fee_from_amount = value.toBool();
+ settings.setValue("SubFeeFromAmount", m_sub_fee_from_amount);
+ break;
#endif
case DisplayUnit:
setDisplayUnit(value);
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index 535843e8ba..203ee27ad8 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -61,6 +61,7 @@ public:
Language, // QString
UseEmbeddedMonospacedFont, // bool
CoinControlFeatures, // bool
+ SubFeeFromAmount, // bool
ThreadsScriptVerif, // int
Prune, // bool
PruneSize, // int
@@ -88,6 +89,7 @@ public:
QString getThirdPartyTxUrls() const { return strThirdPartyTxUrls; }
bool getUseEmbeddedMonospacedFont() const { return m_use_embedded_monospaced_font; }
bool getCoinControlFeatures() const { return fCoinControlFeatures; }
+ bool getSubFeeFromAmount() const { return m_sub_fee_from_amount; }
const QString& getOverriddenByCommandLine() { return strOverriddenByCommandLine; }
/* Explicit setters */
@@ -112,6 +114,7 @@ private:
QString strThirdPartyTxUrls;
bool m_use_embedded_monospaced_font;
bool fCoinControlFeatures;
+ bool m_sub_fee_from_amount;
/* settings that were overridden by command-line */
QString strOverriddenByCommandLine;
diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp
index 1fd1ff3142..864a62edc8 100644
--- a/src/qt/overviewpage.cpp
+++ b/src/qt/overviewpage.cpp
@@ -69,20 +69,18 @@ public:
foreground = brush.color();
}
- painter->setPen(foreground);
- QRect boundingRect;
- painter->drawText(addressRect, Qt::AlignLeft | Qt::AlignVCenter, address, &boundingRect);
- int address_rect_min_width = boundingRect.width();
-
- if (index.data(TransactionTableModel::WatchonlyRole).toBool())
- {
+ if (index.data(TransactionTableModel::WatchonlyRole).toBool()) {
QIcon iconWatchonly = qvariant_cast<QIcon>(index.data(TransactionTableModel::WatchonlyDecorationRole));
- QRect watchonlyRect(boundingRect.right() + 5, mainRect.top()+ypad+halfheight, 16, halfheight);
+ QRect watchonlyRect(addressRect.left(), addressRect.top(), 16, addressRect.height());
iconWatchonly = platformStyle->TextColorIcon(iconWatchonly);
iconWatchonly.paint(painter, watchonlyRect);
- address_rect_min_width += 5 + watchonlyRect.width();
+ addressRect.setLeft(addressRect.left() + watchonlyRect.width() + 5);
}
+ painter->setPen(foreground);
+ QRect boundingRect;
+ painter->drawText(addressRect, Qt::AlignLeft | Qt::AlignVCenter, address, &boundingRect);
+
if(amount < 0)
{
foreground = COLOR_NEGATIVE;
@@ -109,7 +107,8 @@ public:
QRect date_bounding_rect;
painter->drawText(amountRect, Qt::AlignLeft | Qt::AlignVCenter, GUIUtil::dateTimeStr(date), &date_bounding_rect);
- const int minimum_width = std::max(address_rect_min_width, amount_bounding_rect.width() + date_bounding_rect.width());
+ // 0.4*date_bounding_rect.width() is used to visually distinguish a date from an amount.
+ const int minimum_width = 1.4 * date_bounding_rect.width() + amount_bounding_rect.width();
const auto search = m_minimum_width.find(index.row());
if (search == m_minimum_width.end() || search->second != minimum_width) {
m_minimum_width[index.row()] = minimum_width;
diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp
index b324693692..433a1ea934 100644
--- a/src/qt/peertablemodel.cpp
+++ b/src/qt/peertablemodel.cpp
@@ -72,8 +72,13 @@ QVariant PeerTableModel::data(const QModelIndex& index, int role) const
case NetNodeId:
return (qint64)rec->nodeStats.nodeid;
case Address:
- // prepend to peer address down-arrow symbol for inbound connection and up-arrow for outbound connection
- return QString::fromStdString((rec->nodeStats.fInbound ? "↓ " : "↑ ") + rec->nodeStats.addrName);
+ return QString::fromStdString(rec->nodeStats.m_addr_name);
+ case Direction:
+ return QString(rec->nodeStats.fInbound ?
+ //: An Inbound Connection from a Peer.
+ tr("Inbound") :
+ //: An Outbound Connection to a Peer.
+ tr("Outbound"));
case ConnectionType:
return GUIUtil::ConnectionTypeToQString(rec->nodeStats.m_conn_type, /* prepend_direction */ false);
case Network:
@@ -94,6 +99,7 @@ QVariant PeerTableModel::data(const QModelIndex& index, int role) const
return QVariant(Qt::AlignRight | Qt::AlignVCenter);
case Address:
return {};
+ case Direction:
case ConnectionType:
case Network:
return QVariant(Qt::AlignCenter);
@@ -179,5 +185,7 @@ void PeerTableModel::refresh()
m_peers_data.swap(new_peers_data);
}
- Q_EMIT changed();
+ const auto top_left = index(0, 0);
+ const auto bottom_right = index(rowCount() - 1, columnCount() - 1);
+ Q_EMIT dataChanged(top_left, bottom_right);
}
diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h
index 0ff1b5dba7..40265ee266 100644
--- a/src/qt/peertablemodel.h
+++ b/src/qt/peertablemodel.h
@@ -48,6 +48,7 @@ public:
enum ColumnIndex {
NetNodeId = 0,
Address,
+ Direction,
ConnectionType,
Network,
Ping,
@@ -73,9 +74,6 @@ public:
public Q_SLOTS:
void refresh();
-Q_SIGNALS:
- void changed();
-
private:
//! Internal peer data structure.
QList<CNodeCombinedStats> m_peers_data{};
@@ -87,6 +85,9 @@ private:
/*: Title of Peers Table column which contains the
IP/Onion/I2P address of the connected peer. */
tr("Address"),
+ /*: Title of Peers Table column which indicates the direction
+ the peer connection was initiated from. */
+ tr("Direction"),
/*: Title of Peers Table column which describes the type of
peer connection. The "type" describes why the connection exists. */
tr("Type"),
diff --git a/src/qt/peertablesortproxy.cpp b/src/qt/peertablesortproxy.cpp
index 78932da8d4..419133bc32 100644
--- a/src/qt/peertablesortproxy.cpp
+++ b/src/qt/peertablesortproxy.cpp
@@ -25,7 +25,9 @@ bool PeerTableSortProxy::lessThan(const QModelIndex& left_index, const QModelInd
case PeerTableModel::NetNodeId:
return left_stats.nodeid < right_stats.nodeid;
case PeerTableModel::Address:
- return left_stats.addrName.compare(right_stats.addrName) < 0;
+ return left_stats.m_addr_name.compare(right_stats.m_addr_name) < 0;
+ case PeerTableModel::Direction:
+ return left_stats.fInbound > right_stats.fInbound; // default sort Inbound, then Outbound
case PeerTableModel::ConnectionType:
return left_stats.m_conn_type < right_stats.m_conn_type;
case PeerTableModel::Network:
diff --git a/src/qt/psbtoperationsdialog.cpp b/src/qt/psbtoperationsdialog.cpp
index 2adfeeaaf0..289fb9f7c8 100644
--- a/src/qt/psbtoperationsdialog.cpp
+++ b/src/qt/psbtoperationsdialog.cpp
@@ -47,18 +47,22 @@ void PSBTOperationsDialog::openWithPSBT(PartiallySignedTransaction psbtx)
{
m_transaction_data = psbtx;
- bool complete;
- size_t n_could_sign;
- FinalizePSBT(psbtx); // Make sure all existing signatures are fully combined before checking for completeness.
- TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, &n_could_sign, m_transaction_data, complete);
- if (err != TransactionError::OK) {
- showStatus(tr("Failed to load transaction: %1")
- .arg(QString::fromStdString(TransactionErrorString(err).translated)), StatusLevel::ERR);
- return;
+ bool complete = FinalizePSBT(psbtx); // Make sure all existing signatures are fully combined before checking for completeness.
+ if (m_wallet_model) {
+ size_t n_could_sign;
+ TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, &n_could_sign, m_transaction_data, complete);
+ if (err != TransactionError::OK) {
+ showStatus(tr("Failed to load transaction: %1")
+ .arg(QString::fromStdString(TransactionErrorString(err).translated)),
+ StatusLevel::ERR);
+ return;
+ }
+ m_ui->signTransactionButton->setEnabled(!complete && !m_wallet_model->wallet().privateKeysDisabled() && n_could_sign > 0);
+ } else {
+ m_ui->signTransactionButton->setEnabled(false);
}
m_ui->broadcastTransactionButton->setEnabled(complete);
- m_ui->signTransactionButton->setEnabled(!complete && !m_wallet_model->wallet().privateKeysDisabled() && n_could_sign > 0);
updateTransactionDisplay();
}
@@ -133,7 +137,7 @@ void PSBTOperationsDialog::saveTransaction() {
}
CTxDestination address;
ExtractDestination(out.scriptPubKey, address);
- QString amount = BitcoinUnits::format(m_wallet_model->getOptionsModel()->getDisplayUnit(), out.nValue);
+ QString amount = BitcoinUnits::format(m_client_model->getOptionsModel()->getDisplayUnit(), out.nValue);
QString address_str = QString::fromStdString(EncodeDestination(address));
filename_suggestion.append(address_str + "-" + amount);
first = false;
@@ -224,6 +228,10 @@ void PSBTOperationsDialog::showStatus(const QString &msg, StatusLevel level) {
}
size_t PSBTOperationsDialog::couldSignInputs(const PartiallySignedTransaction &psbtx) {
+ if (!m_wallet_model) {
+ return 0;
+ }
+
size_t n_signed;
bool complete;
TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, false /* bip32derivs */, &n_signed, m_transaction_data, complete);
@@ -246,7 +254,10 @@ void PSBTOperationsDialog::showTransactionStatus(const PartiallySignedTransactio
case PSBTRole::SIGNER: {
QString need_sig_text = tr("Transaction still needs signature(s).");
StatusLevel level = StatusLevel::INFO;
- if (m_wallet_model->wallet().privateKeysDisabled()) {
+ if (!m_wallet_model) {
+ need_sig_text += " " + tr("(But no wallet is loaded.)");
+ level = StatusLevel::WARN;
+ } else if (m_wallet_model->wallet().privateKeysDisabled()) {
need_sig_text += " " + tr("(But this wallet cannot sign transactions.)");
level = StatusLevel::WARN;
} else if (n_could_sign < 1) {
diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp
index ec3d970a7f..ab8225e19f 100644
--- a/src/qt/recentrequeststablemodel.cpp
+++ b/src/qt/recentrequeststablemodel.cpp
@@ -234,7 +234,7 @@ bool RecentRequestEntryLessThan::operator()(const RecentRequestEntry& left, cons
switch(column)
{
case RecentRequestsTableModel::Date:
- return pLeft->date.toTime_t() < pRight->date.toTime_t();
+ return pLeft->date.toSecsSinceEpoch() < pRight->date.toSecsSinceEpoch();
case RecentRequestsTableModel::Label:
return pLeft->recipient.label < pRight->recipient.label;
case RecentRequestsTableModel::Message:
diff --git a/src/qt/recentrequeststablemodel.h b/src/qt/recentrequeststablemodel.h
index b817b64e77..c489c0eaf4 100644
--- a/src/qt/recentrequeststablemodel.h
+++ b/src/qt/recentrequeststablemodel.h
@@ -7,6 +7,8 @@
#include <qt/sendcoinsrecipient.h>
+#include <string>
+
#include <QAbstractTableModel>
#include <QStringList>
#include <QDateTime>
@@ -26,9 +28,9 @@ public:
SERIALIZE_METHODS(RecentRequestEntry, obj) {
unsigned int date_timet;
- SER_WRITE(obj, date_timet = obj.date.toTime_t());
+ SER_WRITE(obj, date_timet = obj.date.toSecsSinceEpoch());
READWRITE(obj.nVersion, obj.id, date_timet, obj.recipient);
- SER_READ(obj, obj.date = QDateTime::fromTime_t(date_timet));
+ SER_READ(obj, obj.date = QDateTime::fromSecsSinceEpoch(date_timet));
}
};
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index c973fdbe78..829f7add80 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -35,6 +35,7 @@
#endif
#include <QAbstractButton>
+#include <QAbstractItemModel>
#include <QDateTime>
#include <QFont>
#include <QKeyEvent>
@@ -287,6 +288,7 @@ bool RPCConsole::RPCParseCommandLine(interfaces::Node* node, std::string &strRes
}
if (breakParsing)
break;
+ [[fallthrough]];
}
case STATE_ARGUMENT: // In or after argument
case STATE_EATING_SPACES_IN_ARG:
@@ -400,6 +402,7 @@ bool RPCConsole::RPCParseCommandLine(interfaces::Node* node, std::string &strRes
strResult = lastResult.get_str();
else
strResult = lastResult.write(2);
+ [[fallthrough]];
case STATE_ARGUMENT:
case STATE_EATING_SPACES:
return true;
@@ -648,7 +651,7 @@ void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_
setNumConnections(model->getNumConnections());
connect(model, &ClientModel::numConnectionsChanged, this, &RPCConsole::setNumConnections);
- setNumBlocks(bestblock_height, QDateTime::fromTime_t(bestblock_date), verification_progress, false);
+ setNumBlocks(bestblock_height, QDateTime::fromSecsSinceEpoch(bestblock_date), verification_progress, false);
connect(model, &ClientModel::numBlocksChanged, this, &RPCConsole::setNumBlocks);
updateNetworkState();
@@ -686,7 +689,7 @@ void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_
// peer table signal handling - update peer details when selecting new node
connect(ui->peerWidget->selectionModel(), &QItemSelectionModel::selectionChanged, this, &RPCConsole::updateDetailWidget);
- connect(model->getPeerTableModel(), &PeerTableModel::changed, this, &RPCConsole::updateDetailWidget);
+ connect(model->getPeerTableModel(), &QAbstractItemModel::dataChanged, [this] { updateDetailWidget(); });
// set up ban table
ui->banlistWidget->setModel(model->getBanTableModel());
@@ -703,6 +706,13 @@ void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_
// create ban table context menu
banTableContextMenu = new QMenu(this);
+ /*: Context menu action to copy the IP/Netmask of a banned peer.
+ IP/Netmask is the combination of a peer's IP address and its Netmask.
+ For IP address see: https://en.wikipedia.org/wiki/IP_address */
+ banTableContextMenu->addAction(tr("&Copy IP/Netmask"), [this] {
+ GUIUtil::copyEntryData(ui->banlistWidget, BanTableModel::Address, Qt::DisplayRole);
+ });
+ banTableContextMenu->addSeparator();
banTableContextMenu->addAction(tr("&Unban"), this, &RPCConsole::unbanSelectedNode);
connect(ui->banlistWidget, &QTableView::customContextMenuRequested, this, &RPCConsole::showBanTableContextMenu);
@@ -1126,7 +1136,7 @@ void RPCConsole::updateDetailWidget()
}
const auto stats = selected_peers.first().data(PeerTableModel::StatsRole).value<CNodeCombinedStats*>();
// update the detail ui with latest node information
- QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " ");
+ QString peerAddrDetails(QString::fromStdString(stats->nodeStats.m_addr_name) + " ");
peerAddrDetails += tr("(peer: %1)").arg(QString::number(stats->nodeStats.nodeid));
if (!stats->nodeStats.addrLocal.empty())
peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal));
diff --git a/src/qt/sendcoinsentry.cpp b/src/qt/sendcoinsentry.cpp
index 683c0441fa..5fa5165615 100644
--- a/src/qt/sendcoinsentry.cpp
+++ b/src/qt/sendcoinsentry.cpp
@@ -97,7 +97,9 @@ void SendCoinsEntry::clear()
ui->payTo->clear();
ui->addAsLabel->clear();
ui->payAmount->clear();
- ui->checkboxSubtractFeeFromAmount->setCheckState(Qt::Unchecked);
+ if (model && model->getOptionsModel()) {
+ ui->checkboxSubtractFeeFromAmount->setChecked(model->getOptionsModel()->getSubFeeFromAmount());
+ }
ui->messageTextLabel->clear();
ui->messageTextLabel->hide();
ui->messageLabel->hide();
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 39c69fe184..f4d561286e 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -109,9 +109,10 @@ void TestAddAddressesToSendBook(interfaces::Node& node)
std::unique_ptr<const PlatformStyle> platformStyle(PlatformStyle::instantiate("other"));
OptionsModel optionsModel;
ClientModel clientModel(node, &optionsModel);
- AddWallet(wallet);
- WalletModel walletModel(interfaces::MakeWallet(wallet), clientModel, platformStyle.get());
- RemoveWallet(wallet, std::nullopt);
+ WalletContext& context = *node.walletClient().context();
+ AddWallet(context, wallet);
+ WalletModel walletModel(interfaces::MakeWallet(context, wallet), clientModel, platformStyle.get());
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt);
EditAddressDialog editAddressDialog(EditAddressDialog::NewSendingAddress);
editAddressDialog.setModel(walletModel.getAddressTableModel());
diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp
index 9c31cd50df..8489b33144 100644
--- a/src/qt/test/apptests.cpp
+++ b/src/qt/test/apptests.cpp
@@ -12,7 +12,6 @@
#include <qt/rpcconsole.h>
#include <shutdown.h>
#include <test/util/setup_common.h>
-#include <univalue.h>
#include <validation.h>
#if defined(HAVE_CONFIG_H)
@@ -21,8 +20,10 @@
#include <QAction>
#include <QLineEdit>
+#include <QRegularExpression>
#include <QScopedPointer>
#include <QSignalSpy>
+#include <QString>
#include <QTest>
#include <QTextEdit>
#include <QtGlobal>
@@ -30,6 +31,13 @@
#include <QtTest/QtTestGui>
namespace {
+//! Regex find a string group inside of the console output
+QString FindInConsole(const QString& output, const QString& pattern)
+{
+ const QRegularExpression re(pattern);
+ return re.match(output).captured(1);
+}
+
//! Call getblockchaininfo RPC and check first field of JSON output.
void TestRpcCommand(RPCConsole* console)
{
@@ -41,10 +49,9 @@ void TestRpcCommand(RPCConsole* console)
QTest::keyClick(lineEdit, Qt::Key_Return);
QVERIFY(mw_spy.wait(1000));
QCOMPARE(mw_spy.count(), 4);
- QString output = messagesWidget->toPlainText();
- UniValue value;
- value.read(output.right(output.size() - output.lastIndexOf(QChar::ObjectReplacementCharacter) - 1).toStdString());
- QCOMPARE(value["chain"].get_str(), std::string("regtest"));
+ const QString output = messagesWidget->toPlainText();
+ const QString pattern = QStringLiteral("\"chain\": \"(\\w+)\"");
+ QCOMPARE(FindInConsole(output, pattern), QString("regtest"));
}
} // namespace
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index eb86f027ef..7d66f67f8a 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -8,6 +8,7 @@
#include <interfaces/node.h>
#include <qt/bitcoin.h>
+#include <qt/initexecutor.h>
#include <qt/test/apptests.h>
#include <qt/test/rpcnestedtests.h>
#include <qt/test/uritests.h>
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index e883337fb5..89f2258c0d 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -164,9 +164,10 @@ void TestGUI(interfaces::Node& node)
TransactionView transactionView(platformStyle.get());
OptionsModel optionsModel;
ClientModel clientModel(node, &optionsModel);
- AddWallet(wallet);
- WalletModel walletModel(interfaces::MakeWallet(wallet), clientModel, platformStyle.get());
- RemoveWallet(wallet, std::nullopt);
+ WalletContext& context = *node.walletClient().context();
+ AddWallet(context, wallet);
+ WalletModel walletModel(interfaces::MakeWallet(context, wallet), clientModel, platformStyle.get());
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt);
sendCoinsDialog.setModel(&walletModel);
transactionView.setModel(&walletModel);
diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp
index a631f497af..57c05a647e 100644
--- a/src/qt/transactionfilterproxy.cpp
+++ b/src/qt/transactionfilterproxy.cpp
@@ -7,17 +7,12 @@
#include <qt/transactiontablemodel.h>
#include <qt/transactionrecord.h>
+#include <algorithm>
#include <cstdlib>
-
-// Earliest date that can be represented (far in the past)
-const QDateTime TransactionFilterProxy::MIN_DATE = QDateTime::fromTime_t(0);
-// Last date that can be represented (far in the future)
-const QDateTime TransactionFilterProxy::MAX_DATE = QDateTime::fromTime_t(0xFFFFFFFF);
+#include <optional>
TransactionFilterProxy::TransactionFilterProxy(QObject *parent) :
QSortFilterProxyModel(parent),
- dateFrom(MIN_DATE),
- dateTo(MAX_DATE),
m_search_string(),
typeFilter(ALL_TYPES),
watchOnlyFilter(WatchOnlyFilter_All),
@@ -46,8 +41,8 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
return false;
QDateTime datetime = index.data(TransactionTableModel::DateRole).toDateTime();
- if (datetime < dateFrom || datetime > dateTo)
- return false;
+ if (dateFrom && datetime < *dateFrom) return false;
+ if (dateTo && datetime > *dateTo) return false;
QString address = index.data(TransactionTableModel::AddressRole).toString();
QString label = index.data(TransactionTableModel::LabelRole).toString();
@@ -65,10 +60,10 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
return true;
}
-void TransactionFilterProxy::setDateRange(const QDateTime &from, const QDateTime &to)
+void TransactionFilterProxy::setDateRange(const std::optional<QDateTime>& from, const std::optional<QDateTime>& to)
{
- this->dateFrom = from;
- this->dateTo = to;
+ dateFrom = from;
+ dateTo = to;
invalidateFilter();
}
diff --git a/src/qt/transactionfilterproxy.h b/src/qt/transactionfilterproxy.h
index 693b363692..09bc9e75db 100644
--- a/src/qt/transactionfilterproxy.h
+++ b/src/qt/transactionfilterproxy.h
@@ -10,6 +10,8 @@
#include <QDateTime>
#include <QSortFilterProxyModel>
+#include <optional>
+
/** Filter the transaction list according to pre-specified rules. */
class TransactionFilterProxy : public QSortFilterProxyModel
{
@@ -18,10 +20,6 @@ class TransactionFilterProxy : public QSortFilterProxyModel
public:
explicit TransactionFilterProxy(QObject *parent = nullptr);
- /** Earliest date that can be represented (far in the past) */
- static const QDateTime MIN_DATE;
- /** Last date that can be represented (far in the future) */
- static const QDateTime MAX_DATE;
/** Type filter bit field (all types) */
static const quint32 ALL_TYPES = 0xFFFFFFFF;
@@ -34,7 +32,8 @@ public:
WatchOnlyFilter_No
};
- void setDateRange(const QDateTime &from, const QDateTime &to);
+ /** Filter transactions between date range. Use std::nullopt for open range. */
+ void setDateRange(const std::optional<QDateTime>& from, const std::optional<QDateTime>& to);
void setSearchString(const QString &);
/**
@note Type filter takes a bit field created with TYPE() or ALL_TYPES
@@ -55,8 +54,8 @@ protected:
bool filterAcceptsRow(int source_row, const QModelIndex & source_parent) const override;
private:
- QDateTime dateFrom;
- QDateTime dateTo;
+ std::optional<QDateTime> dateFrom;
+ std::optional<QDateTime> dateTo;
QString m_search_string;
quint32 typeFilter;
WatchOnlyFilter watchOnlyFilter;
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index b68ceaedbb..23590ea4d2 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -610,7 +610,7 @@ QVariant TransactionTableModel::data(const QModelIndex &index, int role) const
case TypeRole:
return rec->type;
case DateRole:
- return QDateTime::fromTime_t(static_cast<uint>(rec->time));
+ return QDateTime::fromSecsSinceEpoch(rec->time);
case WatchonlyRole:
return rec->involvesWatchAddress;
case WatchonlyDecorationRole:
@@ -630,7 +630,7 @@ QVariant TransactionTableModel::data(const QModelIndex &index, int role) const
case TxPlainTextRole:
{
QString details;
- QDateTime date = QDateTime::fromTime_t(static_cast<uint>(rec->time));
+ QDateTime date = QDateTime::fromSecsSinceEpoch(rec->time);
QString txLabel = walletModel->getAddressTableModel()->labelForAddress(QString::fromStdString(rec->address));
details.append(date.toString("M/d/yy HH:mm"));
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index 83d17a32c0..908cb917f1 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -19,6 +19,8 @@
#include <node/ui_interface.h>
+#include <optional>
+
#include <QApplication>
#include <QComboBox>
#include <QDateTimeEdit>
@@ -266,26 +268,26 @@ void TransactionView::chooseDate(int idx)
{
case All:
transactionProxyModel->setDateRange(
- TransactionFilterProxy::MIN_DATE,
- TransactionFilterProxy::MAX_DATE);
+ std::nullopt,
+ std::nullopt);
break;
case Today:
transactionProxyModel->setDateRange(
GUIUtil::StartOfDay(current),
- TransactionFilterProxy::MAX_DATE);
+ std::nullopt);
break;
case ThisWeek: {
// Find last Monday
QDate startOfWeek = current.addDays(-(current.dayOfWeek()-1));
transactionProxyModel->setDateRange(
GUIUtil::StartOfDay(startOfWeek),
- TransactionFilterProxy::MAX_DATE);
+ std::nullopt);
} break;
case ThisMonth:
transactionProxyModel->setDateRange(
GUIUtil::StartOfDay(QDate(current.year(), current.month(), 1)),
- TransactionFilterProxy::MAX_DATE);
+ std::nullopt);
break;
case LastMonth:
transactionProxyModel->setDateRange(
@@ -295,7 +297,7 @@ void TransactionView::chooseDate(int idx)
case ThisYear:
transactionProxyModel->setDateRange(
GUIUtil::StartOfDay(QDate(current.year(), 1, 1)),
- TransactionFilterProxy::MAX_DATE);
+ std::nullopt);
break;
case Range:
dateRangeWidget->setVisible(true);
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index a1f357e0db..f694fbecb5 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -4,12 +4,18 @@
#include <qt/walletframe.h>
+#include <node/ui_interface.h>
+#include <psbt.h>
+#include <qt/guiutil.h>
#include <qt/overviewpage.h>
+#include <qt/psbtoperationsdialog.h>
#include <qt/walletmodel.h>
#include <qt/walletview.h>
#include <cassert>
+#include <QApplication>
+#include <QClipboard>
#include <QGroupBox>
#include <QHBoxLayout>
#include <QLabel>
@@ -58,14 +64,13 @@ void WalletFrame::setClientModel(ClientModel *_clientModel)
}
}
-bool WalletFrame::addWallet(WalletModel* walletModel, WalletView* walletView)
+bool WalletFrame::addView(WalletView* walletView)
{
- if (!clientModel || !walletModel) return false;
+ if (!clientModel) return false;
- if (mapWalletViews.count(walletModel) > 0) return false;
+ if (mapWalletViews.count(walletView->getWalletModel()) > 0) return false;
walletView->setClientModel(clientModel);
- walletView->setWalletModel(walletModel);
walletView->showOutOfSyncWarning(bOutOfSync);
WalletView* current_wallet_view = currentWalletView();
@@ -76,7 +81,7 @@ bool WalletFrame::addWallet(WalletModel* walletModel, WalletView* walletView)
}
walletStack->addWidget(walletView);
- mapWalletViews[walletModel] = walletView;
+ mapWalletViews[walletView->getWalletModel()] = walletView;
return true;
}
@@ -103,7 +108,8 @@ void WalletFrame::setCurrentWallet(WalletModel* wallet_model)
walletView->updateGeometry();
walletStack->setCurrentWidget(walletView);
- walletView->updateEncryptionStatus();
+
+ Q_EMIT currentWalletSet();
}
void WalletFrame::removeWallet(WalletModel* wallet_model)
@@ -184,10 +190,40 @@ void WalletFrame::gotoVerifyMessageTab(QString addr)
void WalletFrame::gotoLoadPSBT(bool from_clipboard)
{
- WalletView *walletView = currentWalletView();
- if (walletView) {
- walletView->gotoLoadPSBT(from_clipboard);
+ std::string data;
+
+ if (from_clipboard) {
+ std::string raw = QApplication::clipboard()->text().toStdString();
+ bool invalid;
+ data = DecodeBase64(raw, &invalid);
+ if (invalid) {
+ Q_EMIT message(tr("Error"), tr("Unable to decode PSBT from clipboard (invalid base64)"), CClientUIInterface::MSG_ERROR);
+ return;
+ }
+ } else {
+ QString filename = GUIUtil::getOpenFileName(this,
+ tr("Load Transaction Data"), QString(),
+ tr("Partially Signed Transaction (*.psbt)"), nullptr);
+ if (filename.isEmpty()) return;
+ if (GetFileSize(filename.toLocal8Bit().data(), MAX_FILE_SIZE_PSBT) == MAX_FILE_SIZE_PSBT) {
+ Q_EMIT message(tr("Error"), tr("PSBT file must be smaller than 100 MiB"), CClientUIInterface::MSG_ERROR);
+ return;
+ }
+ std::ifstream in(filename.toLocal8Bit().data(), std::ios::binary);
+ data = std::string(std::istreambuf_iterator<char>{in}, {});
}
+
+ std::string error;
+ PartiallySignedTransaction psbtx;
+ if (!DecodeRawPSBT(psbtx, data, error)) {
+ Q_EMIT message(tr("Error"), tr("Unable to decode PSBT") + "\n" + QString::fromStdString(error), CClientUIInterface::MSG_ERROR);
+ return;
+ }
+
+ PSBTOperationsDialog* dlg = new PSBTOperationsDialog(this, currentWalletModel(), clientModel);
+ dlg->openWithPSBT(psbtx);
+ dlg->setAttribute(Qt::WA_DeleteOnClose);
+ dlg->exec();
}
void WalletFrame::encryptWallet()
diff --git a/src/qt/walletframe.h b/src/qt/walletframe.h
index 4f77bd716f..cfca5c4c5c 100644
--- a/src/qt/walletframe.h
+++ b/src/qt/walletframe.h
@@ -35,7 +35,7 @@ public:
void setClientModel(ClientModel *clientModel);
- bool addWallet(WalletModel* walletModel, WalletView* walletView);
+ bool addView(WalletView* walletView);
void setCurrentWallet(WalletModel* wallet_model);
void removeWallet(WalletModel* wallet_model);
void removeAllWallets();
@@ -48,6 +48,8 @@ public:
Q_SIGNALS:
void createWalletButtonClicked();
+ void message(const QString& title, const QString& message, unsigned int style);
+ void currentWalletSet();
private:
QStackedWidget *walletStack;
diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp
index 3b8cf4c7ed..309806a1c4 100644
--- a/src/qt/walletview.cpp
+++ b/src/qt/walletview.cpp
@@ -8,7 +8,6 @@
#include <qt/askpassphrasedialog.h>
#include <qt/clientmodel.h>
#include <qt/guiutil.h>
-#include <qt/psbtoperationsdialog.h>
#include <qt/optionsmodel.h>
#include <qt/overviewpage.h>
#include <qt/platformstyle.h>
@@ -21,32 +20,34 @@
#include <interfaces/node.h>
#include <node/ui_interface.h>
-#include <psbt.h>
#include <util/strencodings.h>
#include <QAction>
#include <QActionGroup>
-#include <QApplication>
-#include <QClipboard>
#include <QFileDialog>
#include <QHBoxLayout>
#include <QProgressDialog>
#include <QPushButton>
#include <QVBoxLayout>
-WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent):
- QStackedWidget(parent),
- clientModel(nullptr),
- walletModel(nullptr),
- platformStyle(_platformStyle)
+WalletView::WalletView(WalletModel* wallet_model, const PlatformStyle* _platformStyle, QWidget* parent)
+ : QStackedWidget(parent),
+ clientModel(nullptr),
+ walletModel(wallet_model),
+ platformStyle(_platformStyle)
{
+ assert(walletModel);
+
// Create tabs
overviewPage = new OverviewPage(platformStyle);
+ overviewPage->setWalletModel(walletModel);
transactionsPage = new QWidget(this);
QVBoxLayout *vbox = new QVBoxLayout();
QHBoxLayout *hbox_buttons = new QHBoxLayout();
transactionView = new TransactionView(platformStyle, this);
+ transactionView->setModel(walletModel);
+
vbox->addWidget(transactionView);
QPushButton *exportButton = new QPushButton(tr("&Export"), this);
exportButton->setToolTip(tr("Export the data in the current tab to a file"));
@@ -59,10 +60,16 @@ WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent):
transactionsPage->setLayout(vbox);
receiveCoinsPage = new ReceiveCoinsDialog(platformStyle);
+ receiveCoinsPage->setModel(walletModel);
+
sendCoinsPage = new SendCoinsDialog(platformStyle);
+ sendCoinsPage->setModel(walletModel);
usedSendingAddressesPage = new AddressBookPage(platformStyle, AddressBookPage::ForEditing, AddressBookPage::SendingTab, this);
+ usedSendingAddressesPage->setModel(walletModel->getAddressTableModel());
+
usedReceivingAddressesPage = new AddressBookPage(platformStyle, AddressBookPage::ForEditing, AddressBookPage::ReceivingTab, this);
+ usedReceivingAddressesPage->setModel(walletModel->getAddressTableModel());
addWidget(overviewPage);
addWidget(transactionsPage);
@@ -88,6 +95,21 @@ WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent):
connect(transactionView, &TransactionView::message, this, &WalletView::message);
connect(this, &WalletView::setPrivacy, overviewPage, &OverviewPage::setPrivacy);
+
+ // Receive and pass through messages from wallet model
+ connect(walletModel, &WalletModel::message, this, &WalletView::message);
+
+ // Handle changes in encryption status
+ connect(walletModel, &WalletModel::encryptionStatusChanged, this, &WalletView::encryptionStatusChanged);
+
+ // Balloon pop-up for new transaction
+ connect(walletModel->getTransactionTableModel(), &TransactionTableModel::rowsInserted, this, &WalletView::processNewTransaction);
+
+ // Ask for passphrase if needed
+ connect(walletModel, &WalletModel::requireUnlock, this, &WalletView::unlockWallet);
+
+ // Show progress dialog
+ connect(walletModel, &WalletModel::showProgress, this, &WalletView::showProgress);
}
WalletView::~WalletView()
@@ -100,49 +122,15 @@ void WalletView::setClientModel(ClientModel *_clientModel)
overviewPage->setClientModel(_clientModel);
sendCoinsPage->setClientModel(_clientModel);
- if (walletModel) walletModel->setClientModel(_clientModel);
-}
-
-void WalletView::setWalletModel(WalletModel *_walletModel)
-{
- this->walletModel = _walletModel;
-
- // Put transaction list in tabs
- transactionView->setModel(_walletModel);
- overviewPage->setWalletModel(_walletModel);
- receiveCoinsPage->setModel(_walletModel);
- sendCoinsPage->setModel(_walletModel);
- usedReceivingAddressesPage->setModel(_walletModel ? _walletModel->getAddressTableModel() : nullptr);
- usedSendingAddressesPage->setModel(_walletModel ? _walletModel->getAddressTableModel() : nullptr);
-
- if (_walletModel)
- {
- // Receive and pass through messages from wallet model
- connect(_walletModel, &WalletModel::message, this, &WalletView::message);
-
- // Handle changes in encryption status
- connect(_walletModel, &WalletModel::encryptionStatusChanged, this, &WalletView::encryptionStatusChanged);
- updateEncryptionStatus();
-
- // update HD status
- Q_EMIT hdEnabledStatusChanged();
-
- // Balloon pop-up for new transaction
- connect(_walletModel->getTransactionTableModel(), &TransactionTableModel::rowsInserted, this, &WalletView::processNewTransaction);
-
- // Ask for passphrase if needed
- connect(_walletModel, &WalletModel::requireUnlock, this, &WalletView::unlockWallet);
-
- // Show progress dialog
- connect(_walletModel, &WalletModel::showProgress, this, &WalletView::showProgress);
- }
+ walletModel->setClientModel(_clientModel);
}
void WalletView::processNewTransaction(const QModelIndex& parent, int start, int /*end*/)
{
// Prevent balloon-spam when initial block download is in progress
- if (!walletModel || !clientModel || clientModel->node().isInitialBlockDownload())
+ if (!clientModel || clientModel->node().isInitialBlockDownload()) {
return;
+ }
TransactionTableModel *ttm = walletModel->getTransactionTableModel();
if (!ttm || ttm->processingQueuedTransactions())
@@ -205,44 +193,6 @@ void WalletView::gotoVerifyMessageTab(QString addr)
signVerifyMessageDialog->setAddress_VM(addr);
}
-void WalletView::gotoLoadPSBT(bool from_clipboard)
-{
- std::string data;
-
- if (from_clipboard) {
- std::string raw = QApplication::clipboard()->text().toStdString();
- bool invalid;
- data = DecodeBase64(raw, &invalid);
- if (invalid) {
- Q_EMIT message(tr("Error"), tr("Unable to decode PSBT from clipboard (invalid base64)"), CClientUIInterface::MSG_ERROR);
- return;
- }
- } else {
- QString filename = GUIUtil::getOpenFileName(this,
- tr("Load Transaction Data"), QString(),
- tr("Partially Signed Transaction (*.psbt)"), nullptr);
- if (filename.isEmpty()) return;
- if (GetFileSize(filename.toLocal8Bit().data(), MAX_FILE_SIZE_PSBT) == MAX_FILE_SIZE_PSBT) {
- Q_EMIT message(tr("Error"), tr("PSBT file must be smaller than 100 MiB"), CClientUIInterface::MSG_ERROR);
- return;
- }
- std::ifstream in(filename.toLocal8Bit().data(), std::ios::binary);
- data = std::string(std::istreambuf_iterator<char>{in}, {});
- }
-
- std::string error;
- PartiallySignedTransaction psbtx;
- if (!DecodeRawPSBT(psbtx, data, error)) {
- Q_EMIT message(tr("Error"), tr("Unable to decode PSBT") + "\n" + QString::fromStdString(error), CClientUIInterface::MSG_ERROR);
- return;
- }
-
- PSBTOperationsDialog* dlg = new PSBTOperationsDialog(this, walletModel, clientModel);
- dlg->openWithPSBT(psbtx);
- dlg->setAttribute(Qt::WA_DeleteOnClose);
- dlg->exec();
-}
-
bool WalletView::handlePaymentRequest(const SendCoinsRecipient& recipient)
{
return sendCoinsPage->handlePaymentRequest(recipient);
@@ -253,20 +203,13 @@ void WalletView::showOutOfSyncWarning(bool fShow)
overviewPage->showOutOfSyncWarning(fShow);
}
-void WalletView::updateEncryptionStatus()
-{
- Q_EMIT encryptionStatusChanged();
-}
-
void WalletView::encryptWallet()
{
- if(!walletModel)
- return;
AskPassphraseDialog dlg(AskPassphraseDialog::Encrypt, this);
dlg.setModel(walletModel);
dlg.exec();
- updateEncryptionStatus();
+ Q_EMIT encryptionStatusChanged();
}
void WalletView::backupWallet()
@@ -298,8 +241,6 @@ void WalletView::changePassphrase()
void WalletView::unlockWallet()
{
- if(!walletModel)
- return;
// Unlock wallet when requested by wallet model
if (walletModel->getEncryptionStatus() == WalletModel::Locked)
{
@@ -311,17 +252,11 @@ void WalletView::unlockWallet()
void WalletView::usedSendingAddresses()
{
- if(!walletModel)
- return;
-
GUIUtil::bringToFront(usedSendingAddressesPage);
}
void WalletView::usedReceivingAddresses()
{
- if(!walletModel)
- return;
-
GUIUtil::bringToFront(usedReceivingAddressesPage);
}
diff --git a/src/qt/walletview.h b/src/qt/walletview.h
index fedf06b710..eebc163624 100644
--- a/src/qt/walletview.h
+++ b/src/qt/walletview.h
@@ -35,19 +35,14 @@ class WalletView : public QStackedWidget
Q_OBJECT
public:
- explicit WalletView(const PlatformStyle *platformStyle, QWidget *parent);
+ explicit WalletView(WalletModel* wallet_model, const PlatformStyle* platformStyle, QWidget* parent);
~WalletView();
/** Set the client model.
The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic.
*/
void setClientModel(ClientModel *clientModel);
- WalletModel *getWalletModel() { return walletModel; }
- /** Set the wallet model.
- The wallet model represents a bitcoin wallet, and offers access to the list of transactions, address book and sending
- functionality.
- */
- void setWalletModel(WalletModel *walletModel);
+ WalletModel* getWalletModel() const noexcept { return walletModel; }
bool handlePaymentRequest(const SendCoinsRecipient& recipient);
@@ -55,7 +50,12 @@ public:
private:
ClientModel *clientModel;
- WalletModel *walletModel;
+
+ //!
+ //! The wallet model represents a bitcoin wallet, and offers access to
+ //! the list of transactions, address book and sending functionality.
+ //!
+ WalletModel* const walletModel;
OverviewPage *overviewPage;
QWidget *transactionsPage;
@@ -83,8 +83,6 @@ public Q_SLOTS:
void gotoSignMessageTab(QString addr = "");
/** Show Sign/Verify Message dialog and switch to verify message tab */
void gotoVerifyMessageTab(QString addr = "");
- /** Load Partially Signed Bitcoin Transaction */
- void gotoLoadPSBT(bool from_clipboard = false);
/** Show incoming transaction notification for new transactions.
@@ -105,9 +103,6 @@ public Q_SLOTS:
/** Show used receiving addresses */
void usedReceivingAddresses();
- /** Re-emit encryption status signal */
- void updateEncryptionStatus();
-
/** Show progress dialog e.g. for rescan */
void showProgress(const QString &title, int nProgress);
@@ -119,8 +114,6 @@ Q_SIGNALS:
void message(const QString &title, const QString &message, unsigned int style);
/** Encryption status of wallet changed */
void encryptionStatusChanged();
- /** HD-Enabled status of wallet changed (only possible during startup) */
- void hdEnabledStatusChanged();
/** Notify that a new transaction appeared */
void incomingTransaction(const QString& date, int unit, const CAmount& amount, const QString& type, const QString& address, const QString& label, const QString& walletName);
/** Notify that the out of sync warning icon has been pressed */
diff --git a/src/qt/winshutdownmonitor.h b/src/qt/winshutdownmonitor.h
index 8edb98c744..bf399edcf3 100644
--- a/src/qt/winshutdownmonitor.h
+++ b/src/qt/winshutdownmonitor.h
@@ -17,7 +17,7 @@ class WinShutdownMonitor : public QAbstractNativeEventFilter
{
public:
/** Implements QAbstractNativeEventFilter interface for processing Windows messages */
- bool nativeEventFilter(const QByteArray &eventType, void *pMessage, long *pnResult);
+ bool nativeEventFilter(const QByteArray &eventType, void *pMessage, long *pnResult) override;
/** Register the reason for blocking shutdown on Windows to allow clean client exit */
static void registerShutdownBlockReason(const QString& strReason, const HWND& mainWinId);
diff --git a/src/rest.cpp b/src/rest.cpp
index d599f381e3..e50ab33e54 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -524,6 +524,7 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
// convert hex to bin, continue then with bin part
std::vector<unsigned char> strRequestV = ParseHex(strRequestMutable);
strRequestMutable.assign(strRequestV.begin(), strRequestV.end());
+ [[fallthrough]];
}
case RetFormat::BINARY: {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index b630458f23..909019d796 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -36,6 +36,7 @@
#include <txmempool.h>
#include <undo.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/system.h>
#include <util/translation.h>
#include <validation.h>
@@ -1120,13 +1121,13 @@ static RPCHelpMan gettxoutsetinfo()
{RPCResult::Type::STR_AMOUNT, "total_unspendable_amount", "The total amount of coins permanently excluded from the UTXO set (only available if coinstatsindex is used)"},
{RPCResult::Type::OBJ, "block_info", "Info on amounts in the block at this block height (only available if coinstatsindex is used)",
{
- {RPCResult::Type::STR_AMOUNT, "prevout_spent", ""},
- {RPCResult::Type::STR_AMOUNT, "coinbase", ""},
- {RPCResult::Type::STR_AMOUNT, "new_outputs_ex_coinbase", ""},
- {RPCResult::Type::STR_AMOUNT, "unspendable", ""},
+ {RPCResult::Type::STR_AMOUNT, "prevout_spent", "Total amount of all prevouts spent in this block"},
+ {RPCResult::Type::STR_AMOUNT, "coinbase", "Coinbase subsidy amount of this block"},
+ {RPCResult::Type::STR_AMOUNT, "new_outputs_ex_coinbase", "Total amount of new outputs created by this block"},
+ {RPCResult::Type::STR_AMOUNT, "unspendable", "Total amount of unspendable outputs created in this block"},
{RPCResult::Type::OBJ, "unspendables", "Detailed view of the unspendable categories",
{
- {RPCResult::Type::STR_AMOUNT, "genesis_block", ""},
+ {RPCResult::Type::STR_AMOUNT, "genesis_block", "The unspendable amount of the Genesis block subsidy"},
{RPCResult::Type::STR_AMOUNT, "bip30", "Transactions overridden by duplicates (no longer possible with BIP30)"},
{RPCResult::Type::STR_AMOUNT, "scripts", "Amounts sent to scripts that are unspendable (for example OP_RETURN outputs)"},
{RPCResult::Type::STR_AMOUNT, "unclaimed_rewards", "Fee rewards that miners did not claim in their coinbase transaction"},
@@ -1178,6 +1179,18 @@ static RPCHelpMan gettxoutsetinfo()
pindex = ParseHashOrHeight(request.params[1], chainman);
}
+ if (stats.index_requested && g_coin_stats_index) {
+ if (!g_coin_stats_index->BlockUntilSyncedToCurrentChain()) {
+ const IndexSummary summary{g_coin_stats_index->GetSummary()};
+
+ // If a specific block was requested and the index has already synced past that height, we can return the
+ // data already even though the index is not fully synced yet.
+ if (pindex->nHeight > summary.best_block_height) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to get data because coinstatsindex is still syncing. Current height: %d", summary.best_block_height));
+ }
+ }
+ }
+
if (GetUTXOStats(coins_view, *blockman, stats, node.rpc_interruption_point, pindex)) {
ret.pushKV("height", (int64_t)stats.nHeight);
ret.pushKV("bestblock", stats.hashBlock.GetHex());
@@ -1194,7 +1207,7 @@ static RPCHelpMan gettxoutsetinfo()
ret.pushKV("transactions", static_cast<int64_t>(stats.nTransactions));
ret.pushKV("disk_size", stats.nDiskSize);
} else {
- ret.pushKV("total_unspendable_amount", ValueFromAmount(stats.block_unspendable_amount));
+ ret.pushKV("total_unspendable_amount", ValueFromAmount(stats.total_unspendable_amount));
CCoinsStats prev_stats{hash_type};
@@ -1203,28 +1216,21 @@ static RPCHelpMan gettxoutsetinfo()
}
UniValue block_info(UniValue::VOBJ);
- block_info.pushKV("prevout_spent", ValueFromAmount(stats.block_prevout_spent_amount - prev_stats.block_prevout_spent_amount));
- block_info.pushKV("coinbase", ValueFromAmount(stats.block_coinbase_amount - prev_stats.block_coinbase_amount));
- block_info.pushKV("new_outputs_ex_coinbase", ValueFromAmount(stats.block_new_outputs_ex_coinbase_amount - prev_stats.block_new_outputs_ex_coinbase_amount));
- block_info.pushKV("unspendable", ValueFromAmount(stats.block_unspendable_amount - prev_stats.block_unspendable_amount));
+ block_info.pushKV("prevout_spent", ValueFromAmount(stats.total_prevout_spent_amount - prev_stats.total_prevout_spent_amount));
+ block_info.pushKV("coinbase", ValueFromAmount(stats.total_coinbase_amount - prev_stats.total_coinbase_amount));
+ block_info.pushKV("new_outputs_ex_coinbase", ValueFromAmount(stats.total_new_outputs_ex_coinbase_amount - prev_stats.total_new_outputs_ex_coinbase_amount));
+ block_info.pushKV("unspendable", ValueFromAmount(stats.total_unspendable_amount - prev_stats.total_unspendable_amount));
UniValue unspendables(UniValue::VOBJ);
- unspendables.pushKV("genesis_block", ValueFromAmount(stats.unspendables_genesis_block - prev_stats.unspendables_genesis_block));
- unspendables.pushKV("bip30", ValueFromAmount(stats.unspendables_bip30 - prev_stats.unspendables_bip30));
- unspendables.pushKV("scripts", ValueFromAmount(stats.unspendables_scripts - prev_stats.unspendables_scripts));
- unspendables.pushKV("unclaimed_rewards", ValueFromAmount(stats.unspendables_unclaimed_rewards - prev_stats.unspendables_unclaimed_rewards));
+ unspendables.pushKV("genesis_block", ValueFromAmount(stats.total_unspendables_genesis_block - prev_stats.total_unspendables_genesis_block));
+ unspendables.pushKV("bip30", ValueFromAmount(stats.total_unspendables_bip30 - prev_stats.total_unspendables_bip30));
+ unspendables.pushKV("scripts", ValueFromAmount(stats.total_unspendables_scripts - prev_stats.total_unspendables_scripts));
+ unspendables.pushKV("unclaimed_rewards", ValueFromAmount(stats.total_unspendables_unclaimed_rewards - prev_stats.total_unspendables_unclaimed_rewards));
block_info.pushKV("unspendables", unspendables);
ret.pushKV("block_info", block_info);
}
} else {
- if (g_coin_stats_index) {
- const IndexSummary summary{g_coin_stats_index->GetSummary()};
-
- if (!summary.synced) {
- throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to read UTXO set because coinstatsindex is still syncing. Current height: %d", summary.best_block_height));
- }
- }
throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set");
}
return ret;
@@ -1323,7 +1329,7 @@ static RPCHelpMan verifychain()
"\nVerifies blockchain database.\n",
{
{"checklevel", RPCArg::Type::NUM, RPCArg::DefaultHint{strprintf("%d, range=0-4", DEFAULT_CHECKLEVEL)},
- strprintf("How thorough the block verification is:\n - %s", Join(CHECKLEVEL_DOC, "\n- "))},
+ strprintf("How thorough the block verification is:\n%s", MakeUnorderedList(CHECKLEVEL_DOC))},
{"nblocks", RPCArg::Type::NUM, RPCArg::DefaultHint{strprintf("%d, 0=all", DEFAULT_CHECKBLOCKS)}, "The number of blocks to check."},
},
RPCResult{
@@ -1350,10 +1356,7 @@ static RPCHelpMan verifychain()
static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue& softforks, const Consensus::Params& params, Consensus::BuriedDeployment dep)
{
// For buried deployments.
- // A buried deployment is one where the height of the activation has been hardcoded into
- // the client implementation long after the consensus change has activated. See BIP 90.
- // Buried deployments with activation height value of
- // std::numeric_limits<int>::max() are disabled and thus hidden.
+
if (!DeploymentEnabled(params, dep)) return;
UniValue rv(UniValue::VOBJ);
@@ -1368,8 +1371,8 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue& softforks, const Consensus::Params& consensusParams, Consensus::DeploymentPos id)
{
// For BIP9 deployments.
- // Deployments that are never active are hidden.
- if (consensusParams.vDeployments[id].nStartTime == Consensus::BIP9Deployment::NEVER_ACTIVE) return;
+
+ if (!DeploymentEnabled(consensusParams, id)) return;
UniValue bip9(UniValue::VOBJ);
const ThresholdState thresholdState = g_versionbitscache.State(active_chain_tip, consensusParams, id);
@@ -1380,23 +1383,24 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
case ThresholdState::ACTIVE: bip9.pushKV("status", "active"); break;
case ThresholdState::FAILED: bip9.pushKV("status", "failed"); break;
}
- if (ThresholdState::STARTED == thresholdState)
- {
+ const bool has_signal = (ThresholdState::STARTED == thresholdState || ThresholdState::LOCKED_IN == thresholdState);
+ if (has_signal) {
bip9.pushKV("bit", consensusParams.vDeployments[id].bit);
}
bip9.pushKV("start_time", consensusParams.vDeployments[id].nStartTime);
bip9.pushKV("timeout", consensusParams.vDeployments[id].nTimeout);
int64_t since_height = g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id);
bip9.pushKV("since", since_height);
- if (ThresholdState::STARTED == thresholdState)
- {
+ if (has_signal) {
UniValue statsUV(UniValue::VOBJ);
BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id);
statsUV.pushKV("period", statsStruct.period);
- statsUV.pushKV("threshold", statsStruct.threshold);
statsUV.pushKV("elapsed", statsStruct.elapsed);
statsUV.pushKV("count", statsStruct.count);
- statsUV.pushKV("possible", statsStruct.possible);
+ if (ThresholdState::LOCKED_IN != thresholdState) {
+ statsUV.pushKV("threshold", statsStruct.threshold);
+ statsUV.pushKV("possible", statsStruct.possible);
+ }
bip9.pushKV("statistics", statsUV);
}
bip9.pushKV("min_activation_height", consensusParams.vDeployments[id].min_activation_height);
@@ -1425,7 +1429,8 @@ RPCHelpMan getblockchaininfo()
{RPCResult::Type::NUM, "headers", "the current number of headers we have validated"},
{RPCResult::Type::STR, "bestblockhash", "the hash of the currently best block"},
{RPCResult::Type::NUM, "difficulty", "the current difficulty"},
- {RPCResult::Type::NUM, "mediantime", "median time for the current best block"},
+ {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME},
{RPCResult::Type::NUM, "verificationprogress", "estimate of verification progress [0..1]"},
{RPCResult::Type::BOOL, "initialblockdownload", "(debug information) estimate of whether this node is in Initial Block Download mode"},
{RPCResult::Type::STR_HEX, "chainwork", "total amount of work in active chain, in hexadecimal"},
@@ -1442,18 +1447,18 @@ RPCHelpMan getblockchaininfo()
{RPCResult::Type::OBJ, "bip9", "status of bip9 softforks (only for \"bip9\" type)",
{
{RPCResult::Type::STR, "status", "one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\""},
- {RPCResult::Type::NUM, "bit", "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)"},
+ {RPCResult::Type::NUM, "bit", "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" and \"locked_in\" status)"},
{RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
{RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
{RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
{RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"},
- {RPCResult::Type::OBJ, "statistics", "numeric statistics about BIP9 signalling for a softfork (only for \"started\" status)",
+ {RPCResult::Type::OBJ, "statistics", "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)",
{
- {RPCResult::Type::NUM, "period", "the length in blocks of the BIP9 signalling period"},
- {RPCResult::Type::NUM, "threshold", "the number of blocks with the version bit set required to activate the feature"},
+ {RPCResult::Type::NUM, "period", "the length in blocks of the signalling period"},
+ {RPCResult::Type::NUM, "threshold", "the number of blocks with the version bit set required to activate the feature (only for \"started\" status)"},
{RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
{RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
- {RPCResult::Type::BOOL, "possible", "returns false if there are not enough blocks left in this period to pass activation threshold"},
+ {RPCResult::Type::BOOL, "possible", "returns false if there are not enough blocks left in this period to pass activation threshold (only for \"started\" status)"},
}},
}},
{RPCResult::Type::NUM, "height", "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
@@ -1481,6 +1486,7 @@ RPCHelpMan getblockchaininfo()
obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1);
obj.pushKV("bestblockhash", tip->GetBlockHash().GetHex());
obj.pushKV("difficulty", (double)GetDifficulty(tip));
+ obj.pushKV("time", (int64_t)tip->nTime);
obj.pushKV("mediantime", (int64_t)tip->GetMedianTimePast());
obj.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip));
obj.pushKV("initialblockdownload", active_chainstate.IsInitialBlockDownload());
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 9c8582c7a3..4357ab2bb3 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -142,6 +142,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "importmulti", 0, "requests" },
{ "importmulti", 1, "options" },
{ "importdescriptors", 0, "requests" },
+ { "listdescriptors", 0, "private" },
{ "verifychain", 0, "checklevel" },
{ "verifychain", 1, "nblocks" },
{ "getblockstats", 0, "hash_or_height" },
@@ -186,6 +187,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "createwallet", 5, "descriptors"},
{ "createwallet", 6, "load_on_startup"},
{ "createwallet", 7, "external_signer"},
+ { "restorewallet", 2, "load_on_startup"},
{ "loadwallet", 1, "load_on_startup"},
{ "unloadwallet", 1, "load_on_startup"},
{ "getnodeaddresses", 0, "count"},
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 2762d78493..27cbb3a702 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -850,7 +850,7 @@ static RPCHelpMan getblocktemplate()
case ThresholdState::LOCKED_IN:
// Ensure bit is set in block version
pblock->nVersion |= g_versionbitscache.Mask(consensusParams, pos);
- // FALL THROUGH to get vbavailable set...
+ [[fallthrough]];
case ThresholdState::STARTED:
{
const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos];
@@ -1089,7 +1089,8 @@ static RPCHelpMan estimatesmartfee()
"have been observed to make an estimate for any number of blocks."},
}},
RPCExamples{
- HelpExampleCli("estimatesmartfee", "6")
+ HelpExampleCli("estimatesmartfee", "6") +
+ HelpExampleRpc("estimatesmartfee", "6")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 5178ce60e8..e5804211f3 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -24,6 +24,7 @@
#include <util/strencodings.h>
#include <util/system.h>
+#include <optional>
#include <stdint.h>
#include <tuple>
#ifdef HAVE_MALLOC_INFO
@@ -108,7 +109,7 @@ static RPCHelpMan createmultisig()
"\nCreate a multisig address from 2 public keys\n"
+ HelpExampleCli("createmultisig", "2 \"[\\\"03789ed0bb717d88f7d321a368d905e7430207ebbd82bd342cf11ae157a7ace5fd\\\",\\\"03dbc6764b8884a92e871274b87583e6d5c2a58819473e17e107ef3f6aa5a61626\\\"]\"") +
"\nAs a JSON-RPC call\n"
- + HelpExampleRpc("createmultisig", "2, \"[\\\"03789ed0bb717d88f7d321a368d905e7430207ebbd82bd342cf11ae157a7ace5fd\\\",\\\"03dbc6764b8884a92e871274b87583e6d5c2a58819473e17e107ef3f6aa5a61626\\\"]\"")
+ + HelpExampleRpc("createmultisig", "2, [\"03789ed0bb717d88f7d321a368d905e7430207ebbd82bd342cf11ae157a7ace5fd\",\"03dbc6764b8884a92e871274b87583e6d5c2a58819473e17e107ef3f6aa5a61626\"]")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
@@ -128,12 +129,13 @@ static RPCHelpMan createmultisig()
// Get the output type
OutputType output_type = OutputType::LEGACY;
if (!request.params[2].isNull()) {
- if (!ParseOutputType(request.params[2].get_str(), output_type)) {
+ std::optional<OutputType> parsed = ParseOutputType(request.params[2].get_str());
+ if (!parsed) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[2].get_str()));
- }
- if (output_type == OutputType::BECH32M) {
+ } else if (parsed.value() == OutputType::BECH32M) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "createmultisig cannot create bech32m multisig addresses");
}
+ output_type = parsed.value();
}
// Construct using pay-to-script-hash:
@@ -156,6 +158,8 @@ static RPCHelpMan createmultisig()
static RPCHelpMan getdescriptorinfo()
{
+ const std::string EXAMPLE_DESCRIPTOR = "wpkh([d34db33f/84h/0h/0h]0279be667ef9dcbbac55a06295Ce870b07029Bfcdb2dce28d959f2815b16f81798)";
+
return RPCHelpMan{"getdescriptorinfo",
{"\nAnalyses a descriptor.\n"},
{
@@ -173,7 +177,8 @@ static RPCHelpMan getdescriptorinfo()
},
RPCExamples{
"Analyse a descriptor\n" +
- HelpExampleCli("getdescriptorinfo", "\"wpkh([d34db33f/84h/0h/0h]0279be667ef9dcbbac55a06295Ce870b07029Bfcdb2dce28d959f2815b16f81798)\"")
+ HelpExampleCli("getdescriptorinfo", "\"" + EXAMPLE_DESCRIPTOR + "\"") +
+ HelpExampleRpc("getdescriptorinfo", "\"" + EXAMPLE_DESCRIPTOR + "\"")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
@@ -199,6 +204,8 @@ static RPCHelpMan getdescriptorinfo()
static RPCHelpMan deriveaddresses()
{
+ const std::string EXAMPLE_DESCRIPTOR = "wpkh([d34db33f/84h/0h/0h]xpub6DJ2dNUysrn5Vt36jH2KLBT2i1auw1tTSSomg8PhqNiUtx8QX2SvC9nrHu81fT41fvDUnhMjEzQgXnQjKEu3oaqMSzhSrHMxyyoEAmUHQbY/0/*)#cjjspncu";
+
return RPCHelpMan{"deriveaddresses",
{"\nDerives one or more addresses corresponding to an output descriptor.\n"
"Examples of output descriptors are:\n"
@@ -221,7 +228,8 @@ static RPCHelpMan deriveaddresses()
},
RPCExamples{
"First three native segwit receive addresses\n" +
- HelpExampleCli("deriveaddresses", "\"wpkh([d34db33f/84h/0h/0h]xpub6DJ2dNUysrn5Vt36jH2KLBT2i1auw1tTSSomg8PhqNiUtx8QX2SvC9nrHu81fT41fvDUnhMjEzQgXnQjKEu3oaqMSzhSrHMxyyoEAmUHQbY/0/*)#cjjspncu\" \"[0,2]\"")
+ HelpExampleCli("deriveaddresses", "\"" + EXAMPLE_DESCRIPTOR + "\" \"[0,2]\"") +
+ HelpExampleRpc("deriveaddresses", "\"" + EXAMPLE_DESCRIPTOR + "\", \"[0,2]\"")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 3013c76825..0f554ec5e7 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -150,6 +150,9 @@ static RPCHelpMan getpeerinfo()
{
{RPCResult::Type::NUM, "n", "The heights of blocks we're currently asking from this peer"},
}},
+ {RPCResult::Type::BOOL, "addr_relay_enabled", "Whether we participate in address relay with this peer"},
+ {RPCResult::Type::NUM, "addr_processed", "The total number of addresses processed, excluding those dropped due to rate limiting"},
+ {RPCResult::Type::NUM, "addr_rate_limited", "The total number of addresses dropped due to rate limiting"},
{RPCResult::Type::ARR, "permissions", "Any special permissions that have been granted to this peer",
{
{RPCResult::Type::STR, "permission_type", Join(NET_PERMISSIONS_DOC, ",\n") + ".\n"},
@@ -194,7 +197,7 @@ static RPCHelpMan getpeerinfo()
CNodeStateStats statestats;
bool fStateStats = peerman.GetNodeStateStats(stats.nodeid, statestats);
obj.pushKV("id", stats.nodeid);
- obj.pushKV("addr", stats.addrName);
+ obj.pushKV("addr", stats.m_addr_name);
if (stats.addrBind.IsValid()) {
obj.pushKV("addrbind", stats.addrBind.ToString());
}
@@ -242,6 +245,9 @@ static RPCHelpMan getpeerinfo()
heights.push_back(height);
}
obj.pushKV("inflight", heights);
+ obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled);
+ obj.pushKV("addr_processed", statestats.m_addr_processed);
+ obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited);
}
UniValue permissions(UniValue::VARR);
for (const auto& permission : NetPermissions::ToStrings(stats.m_permissionFlags)) {
@@ -337,7 +343,7 @@ static RPCHelpMan addconnection()
"\nOpen an outbound connection to a specified node. This RPC is for testing only.\n",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address and port to attempt connecting to."},
- {"connection_type", RPCArg::Type::STR, RPCArg::Optional::NO, "Type of connection to open, either \"outbound-full-relay\" or \"block-relay-only\"."},
+ {"connection_type", RPCArg::Type::STR, RPCArg::Optional::NO, "Type of connection to open (\"outbound-full-relay\", \"block-relay-only\" or \"addr-fetch\")."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -363,6 +369,8 @@ static RPCHelpMan addconnection()
conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
} else if (conn_type_in == "block-relay-only") {
conn_type = ConnectionType::BLOCK_RELAY;
+ } else if (conn_type_in == "addr-fetch") {
+ conn_type = ConnectionType::ADDR_FETCH;
} else {
throw JSONRPCError(RPC_INVALID_PARAMETER, self.ToString());
}
@@ -943,7 +951,7 @@ static RPCHelpMan addpeeraddress()
address.nTime = GetAdjustedTime();
// The source address is set equal to the address. This is equivalent to the peer
// announcing itself.
- if (node.addrman->Add(address, address)) success = true;
+ if (node.addrman->Add({address}, address)) success = true;
}
obj.pushKV("success", success);
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index ccb3123714..00e77d89e5 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -74,12 +74,10 @@ static RPCHelpMan getrawtransaction()
"getrawtransaction",
"\nReturn the raw transaction data.\n"
- "\nBy default this function only works for mempool transactions. When called with a blockhash\n"
- "argument, getrawtransaction will return the transaction if the specified block is available and\n"
- "the transaction is found in that block. When called without a blockhash argument, getrawtransaction\n"
- "will return the transaction if it is in the mempool, or if -txindex is enabled and the transaction\n"
- "is in a block in the blockchain.\n"
-
+ "\nBy default, this call only returns a transaction if it is in the mempool. If -txindex is enabled\n"
+ "and no blockhash argument is passed, it will return the transaction if it is in the mempool or any block.\n"
+ "If a blockhash argument is passed, it will return the transaction if\n"
+ "the specified block is available and the transaction is in that block.\n"
"\nHint: Use gettransaction for wallet transactions.\n"
"\nIf verbose is 'true', returns an Object with information about 'txid'.\n"
@@ -894,8 +892,7 @@ static RPCHelpMan testmempoolaccept()
"\nThis checks if transactions violate the consensus or policy rules.\n"
"\nSee sendrawtransaction call.\n",
{
- {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of hex strings of raw transactions.\n"
- " Length must be one for now.",
+ {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of hex strings of raw transactions.",
{
{"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
},
@@ -906,7 +903,7 @@ static RPCHelpMan testmempoolaccept()
RPCResult{
RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n"
"Returns results for each transaction in the same order they were passed in.\n"
- "It is possible for transactions to not be fully validated ('allowed' unset) if another transaction failed.\n",
+ "Transactions that cannot be fully validated due to failures in other transactions will not contain an 'allowed' result.\n",
{
{RPCResult::Type::OBJ, "", "",
{
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 122a92f084..f21eddf56c 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -18,6 +18,7 @@
#include <univalue.h>
#include <util/rbf.h>
#include <util/strencodings.h>
+#include <util/translation.h>
CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, bool rbf)
{
@@ -280,22 +281,22 @@ void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
int nHashType = ParseSighashString(hashType);
// Script verification errors
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
bool complete = SignTransaction(mtx, keystore, coins, nHashType, input_errors);
SignTransactionResultToJSON(mtx, complete, coins, input_errors, result);
}
-void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, const std::map<int, std::string>& input_errors, UniValue& result)
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, const std::map<int, bilingual_str>& input_errors, UniValue& result)
{
// Make errors UniValue
UniValue vErrors(UniValue::VARR);
for (const auto& err_pair : input_errors) {
- if (err_pair.second == "Missing amount") {
+ if (err_pair.second.original == "Missing amount") {
// This particular error needs to be an exception for some reason
throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing amount for %s", coins.at(mtx.vin.at(err_pair.first).prevout).out.ToString()));
}
- TxInErrorToJSON(mtx.vin.at(err_pair.first), vErrors, err_pair.second);
+ TxInErrorToJSON(mtx.vin.at(err_pair.first), vErrors, err_pair.second.original);
}
result.pushKV("hex", EncodeHexTx(CTransaction(mtx)));
diff --git a/src/rpc/rawtransaction_util.h b/src/rpc/rawtransaction_util.h
index ce7d5834fa..d2e116f7ee 100644
--- a/src/rpc/rawtransaction_util.h
+++ b/src/rpc/rawtransaction_util.h
@@ -8,6 +8,7 @@
#include <map>
#include <string>
+struct bilingual_str;
class FillableSigningProvider;
class UniValue;
struct CMutableTransaction;
@@ -25,7 +26,7 @@ class SigningProvider;
* @param result JSON object where signed transaction results accumulate
*/
void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, const UniValue& hashType, UniValue& result);
-void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, const std::map<int, std::string>& input_errors, UniValue& result);
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, const std::map<int, bilingual_str>& input_errors, UniValue& result);
/**
* Parse a prevtxs UniValue array and get the map of coins from it
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 682b55742a..621a1b9fd6 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -1242,14 +1242,8 @@ std::unique_ptr<PubkeyProvider> InferXOnlyPubkey(const XOnlyPubKey& xkey, ParseS
CPubKey pubkey(full_key);
std::unique_ptr<PubkeyProvider> key_provider = std::make_unique<ConstPubkeyProvider>(0, pubkey, true);
KeyOriginInfo info;
- if (provider.GetKeyOrigin(pubkey.GetID(), info)) {
+ if (provider.GetKeyOriginByXOnly(xkey, info)) {
return std::make_unique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider));
- } else {
- full_key[0] = 0x03;
- pubkey = CPubKey(full_key);
- if (provider.GetKeyOrigin(pubkey.GetID(), info)) {
- return std::make_unique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider));
- }
}
return key_provider;
}
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index ef48f89965..eafa9840d7 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1807,16 +1807,16 @@ bool GenericTransactionSignatureChecker<T>::CheckSequence(const CScriptNum& nSeq
template class GenericTransactionSignatureChecker<CTransaction>;
template class GenericTransactionSignatureChecker<CMutableTransaction>;
-static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CScript& scriptPubKey, unsigned int flags, SigVersion sigversion, const BaseSignatureChecker& checker, ScriptExecutionData& execdata, ScriptError* serror)
+static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CScript& exec_script, unsigned int flags, SigVersion sigversion, const BaseSignatureChecker& checker, ScriptExecutionData& execdata, ScriptError* serror)
{
std::vector<valtype> stack{stack_span.begin(), stack_span.end()};
if (sigversion == SigVersion::TAPSCRIPT) {
// OP_SUCCESSx processing overrides everything, including stack element size limits
- CScript::const_iterator pc = scriptPubKey.begin();
- while (pc < scriptPubKey.end()) {
+ CScript::const_iterator pc = exec_script.begin();
+ while (pc < exec_script.end()) {
opcodetype opcode;
- if (!scriptPubKey.GetOp(pc, opcode)) {
+ if (!exec_script.GetOp(pc, opcode)) {
// Note how this condition would not be reached if an unknown OP_SUCCESSx was found
return set_error(serror, SCRIPT_ERR_BAD_OPCODE);
}
@@ -1839,7 +1839,7 @@ static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CS
}
// Run the script interpreter.
- if (!EvalScript(stack, scriptPubKey, flags, checker, sigversion, execdata, serror)) return false;
+ if (!EvalScript(stack, exec_script, flags, checker, sigversion, execdata, serror)) return false;
// Scripts inside witness implicitly require cleanstack behaviour
if (stack.size() != 1) return set_error(serror, SCRIPT_ERR_CLEANSTACK);
@@ -1874,9 +1874,9 @@ static bool VerifyTaprootCommitment(const std::vector<unsigned char>& control, c
assert(control.size() >= TAPROOT_CONTROL_BASE_SIZE);
assert(program.size() >= uint256::size());
//! The internal pubkey (x-only, so no Y coordinate parity).
- const XOnlyPubKey p{uint256(std::vector<unsigned char>(control.begin() + 1, control.begin() + TAPROOT_CONTROL_BASE_SIZE))};
+ const XOnlyPubKey p{Span<const unsigned char>{control.data() + 1, control.data() + TAPROOT_CONTROL_BASE_SIZE}};
//! The output pubkey (taken from the scriptPubKey).
- const XOnlyPubKey q{uint256(program)};
+ const XOnlyPubKey q{program};
// Compute the Merkle root from the leaf and the provided path.
const uint256 merkle_root = ComputeTaprootMerkleRoot(control, tapleaf_hash);
// Verify that the output pubkey matches the tweaked internal pubkey, after correcting for parity.
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 034c937b99..ab49e84577 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -39,8 +39,7 @@ enum
* All flags are intended to be soft forks: the set of acceptable scripts under
* flags (A | B) is a subset of the acceptable scripts under flag (A).
*/
-enum
-{
+enum : uint32_t {
SCRIPT_VERIFY_NONE = 0,
// Evaluate P2SH subscripts (BIP16).
@@ -140,6 +139,10 @@ enum
// Making unknown public key versions (in BIP 342 scripts) non-standard
SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE = (1U << 20),
+
+ // Constants to point to the highest flag in use. Add new flags above this line.
+ //
+ SCRIPT_VERIFY_END_MARKER
};
bool CheckSignatureEncoding(const std::vector<unsigned char> &vchSig, unsigned int flags, ScriptError* serror);
@@ -167,6 +170,13 @@ struct PrecomputedTransactionData
PrecomputedTransactionData() = default;
+ /** Initialize this PrecomputedTransactionData with transaction data.
+ *
+ * @param[in] tx The transaction for which data is being precomputed.
+ * @param[in] spent_outputs The CTxOuts being spent, one for each tx.vin, in order.
+ * @param[in] force Whether to precompute data for all optional features,
+ * regardless of what is in the inputs (used at signing
+ * time, when the inputs aren't filled in yet). */
template <class T>
void Init(const T& tx, std::vector<CTxOut>&& spent_outputs, bool force = false);
diff --git a/src/script/script.h b/src/script/script.h
index 974cde4984..8cd1cc3855 100644
--- a/src/script/script.h
+++ b/src/script/script.h
@@ -6,6 +6,7 @@
#ifndef BITCOIN_SCRIPT_SCRIPT_H
#define BITCOIN_SCRIPT_SCRIPT_H
+#include <attributes.h>
#include <crypto/common.h>
#include <prevector.h>
#include <serialize.h>
@@ -438,9 +439,9 @@ public:
/** Delete non-existent operator to defend against future introduction */
CScript& operator<<(const CScript& b) = delete;
- CScript& operator<<(int64_t b) { return push_int64(b); }
+ CScript& operator<<(int64_t b) LIFETIMEBOUND { return push_int64(b); }
- CScript& operator<<(opcodetype opcode)
+ CScript& operator<<(opcodetype opcode) LIFETIMEBOUND
{
if (opcode < 0 || opcode > 0xff)
throw std::runtime_error("CScript::operator<<(): invalid opcode");
@@ -448,13 +449,13 @@ public:
return *this;
}
- CScript& operator<<(const CScriptNum& b)
+ CScript& operator<<(const CScriptNum& b) LIFETIMEBOUND
{
*this << b.getvch();
return *this;
}
- CScript& operator<<(const std::vector<unsigned char>& b)
+ CScript& operator<<(const std::vector<unsigned char>& b) LIFETIMEBOUND
{
if (b.size() < OP_PUSHDATA1)
{
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 65276f641f..b912b00365 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -11,6 +11,7 @@
#include <script/signingprovider.h>
#include <script/standard.h>
#include <uint256.h>
+#include <util/translation.h>
#include <util/vector.h>
typedef std::vector<unsigned char> valtype;
@@ -59,22 +60,7 @@ bool MutableTransactionSignatureCreator::CreateSchnorrSig(const SigningProvider&
assert(sigversion == SigVersion::TAPROOT || sigversion == SigVersion::TAPSCRIPT);
CKey key;
- {
- // For now, use the old full pubkey-based key derivation logic. As it indexed by
- // Hash160(full pubkey), we need to try both a version prefixed with 0x02, and one
- // with 0x03.
- unsigned char b[33] = {0x02};
- std::copy(pubkey.begin(), pubkey.end(), b + 1);
- CPubKey fullpubkey;
- fullpubkey.Set(b, b + 33);
- CKeyID keyid = fullpubkey.GetID();
- if (!provider.GetKey(keyid, key)) {
- b[0] = 0x03;
- fullpubkey.Set(b, b + 33);
- CKeyID keyid = fullpubkey.GetID();
- if (!provider.GetKey(keyid, key)) return false;
- }
- }
+ if (!provider.GetKeyByXOnly(pubkey, key)) return false;
// BIP341/BIP342 signing needs lots of precomputed transaction data. While some
// (non-SIGHASH_DEFAULT) sighash modes exist that can work with just some subset
@@ -612,21 +598,24 @@ bool IsSolvable(const SigningProvider& provider, const CScript& script)
bool IsSegWitOutput(const SigningProvider& provider, const CScript& script)
{
- std::vector<valtype> solutions;
- auto whichtype = Solver(script, solutions);
- if (whichtype == TxoutType::WITNESS_V0_SCRIPTHASH || whichtype == TxoutType::WITNESS_V0_KEYHASH || whichtype == TxoutType::WITNESS_UNKNOWN) return true;
- if (whichtype == TxoutType::SCRIPTHASH) {
- auto h160 = uint160(solutions[0]);
- CScript subscript;
- if (provider.GetCScript(CScriptID{h160}, subscript)) {
- whichtype = Solver(subscript, solutions);
- if (whichtype == TxoutType::WITNESS_V0_SCRIPTHASH || whichtype == TxoutType::WITNESS_V0_KEYHASH || whichtype == TxoutType::WITNESS_UNKNOWN) return true;
+ int version;
+ valtype program;
+ if (script.IsWitnessProgram(version, program)) return true;
+ if (script.IsPayToScriptHash()) {
+ std::vector<valtype> solutions;
+ auto whichtype = Solver(script, solutions);
+ if (whichtype == TxoutType::SCRIPTHASH) {
+ auto h160 = uint160(solutions[0]);
+ CScript subscript;
+ if (provider.GetCScript(CScriptID{h160}, subscript)) {
+ if (subscript.IsWitnessProgram(version, program)) return true;
+ }
}
}
return false;
}
-bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, int nHashType, std::map<int, std::string>& input_errors)
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, int nHashType, std::map<int, bilingual_str>& input_errors)
{
bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
@@ -636,29 +625,26 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
PrecomputedTransactionData txdata;
std::vector<CTxOut> spent_outputs;
- spent_outputs.resize(mtx.vin.size());
- bool have_all_spent_outputs = true;
- for (unsigned int i = 0; i < mtx.vin.size(); i++) {
+ for (unsigned int i = 0; i < mtx.vin.size(); ++i) {
CTxIn& txin = mtx.vin[i];
auto coin = coins.find(txin.prevout);
if (coin == coins.end() || coin->second.IsSpent()) {
- have_all_spent_outputs = false;
+ txdata.Init(txConst, /* spent_outputs */ {}, /* force */ true);
+ break;
} else {
- spent_outputs[i] = CTxOut(coin->second.out.nValue, coin->second.out.scriptPubKey);
+ spent_outputs.emplace_back(coin->second.out.nValue, coin->second.out.scriptPubKey);
}
}
- if (have_all_spent_outputs) {
+ if (spent_outputs.size() == mtx.vin.size()) {
txdata.Init(txConst, std::move(spent_outputs), true);
- } else {
- txdata.Init(txConst, {}, true);
}
// Sign what we can:
- for (unsigned int i = 0; i < mtx.vin.size(); i++) {
+ for (unsigned int i = 0; i < mtx.vin.size(); ++i) {
CTxIn& txin = mtx.vin[i];
auto coin = coins.find(txin.prevout);
if (coin == coins.end() || coin->second.IsSpent()) {
- input_errors[i] = "Input not found or already spent";
+ input_errors[i] = _("Input not found or already spent");
continue;
}
const CScript& prevPubKey = coin->second.out.scriptPubKey;
@@ -674,7 +660,7 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
// amount must be specified for valid segwit signature
if (amount == MAX_MONEY && !txin.scriptWitness.IsNull()) {
- input_errors[i] = "Missing amount";
+ input_errors[i] = _("Missing amount");
continue;
}
@@ -682,12 +668,12 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount, txdata, MissingDataBehavior::FAIL), &serror)) {
if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) {
// Unable to sign input and verification failed (possible attempt to partially sign).
- input_errors[i] = "Unable to sign input, invalid stack size (possibly missing key)";
+ input_errors[i] = Untranslated("Unable to sign input, invalid stack size (possibly missing key)");
} else if (serror == SCRIPT_ERR_SIG_NULLFAIL) {
// Verification failed (possibly due to insufficient signatures).
- input_errors[i] = "CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)";
+ input_errors[i] = Untranslated("CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)");
} else {
- input_errors[i] = ScriptErrorString(serror);
+ input_errors[i] = Untranslated(ScriptErrorString(serror));
}
} else {
// If this input succeeds, make sure there is no error set for it
diff --git a/src/script/sign.h b/src/script/sign.h
index b4e7318892..6d3479c143 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -21,6 +21,7 @@ class CScript;
class CTransaction;
class SigningProvider;
+struct bilingual_str;
struct CMutableTransaction;
/** Interface for signature creators. */
@@ -44,8 +45,8 @@ class MutableTransactionSignatureCreator : public BaseSignatureCreator {
const PrecomputedTransactionData* m_txdata;
public:
- MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn = SIGHASH_ALL);
- MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn = SIGHASH_ALL);
+ MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn);
+ MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn);
const BaseSignatureChecker& Checker() const override { return checker; }
bool CreateSig(const SigningProvider& provider, std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
bool CreateSchnorrSig(const SigningProvider& provider, std::vector<unsigned char>& sig, const XOnlyPubKey& pubkey, const uint256* leaf_hash, const uint256* merkle_root, SigVersion sigversion) const override;
@@ -178,6 +179,6 @@ bool IsSolvable(const SigningProvider& provider, const CScript& script);
bool IsSegWitOutput(const SigningProvider& provider, const CScript& script);
/** Sign the CMutableTransaction */
-bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* provider, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors);
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* provider, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors);
#endif // BITCOIN_SCRIPT_SIGN_H
diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h
index 939ae10622..fbce61c6a9 100644
--- a/src/script/signingprovider.h
+++ b/src/script/signingprovider.h
@@ -26,6 +26,30 @@ public:
virtual bool HaveKey(const CKeyID &address) const { return false; }
virtual bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const { return false; }
virtual bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const { return false; }
+
+ bool GetKeyByXOnly(const XOnlyPubKey& pubkey, CKey& key) const
+ {
+ for (const auto& id : pubkey.GetKeyIDs()) {
+ if (GetKey(id, key)) return true;
+ }
+ return false;
+ }
+
+ bool GetPubKeyByXOnly(const XOnlyPubKey& pubkey, CPubKey& out) const
+ {
+ for (const auto& id : pubkey.GetKeyIDs()) {
+ if (GetPubKey(id, out)) return true;
+ }
+ return false;
+ }
+
+ bool GetKeyOriginByXOnly(const XOnlyPubKey& pubkey, KeyOriginInfo& info) const
+ {
+ for (const auto& id : pubkey.GetKeyIDs()) {
+ if (GetKeyOrigin(id, info)) return true;
+ }
+ return false;
+ }
};
extern const SigningProvider& DUMMY_SIGNING_PROVIDER;
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index b8349bb9ab..67a79a157c 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -504,6 +504,7 @@ WitnessV1Taproot TaprootBuilder::GetOutput() { return WitnessV1Taproot{m_output_
TaprootSpendData TaprootBuilder::GetSpendData() const
{
+ assert(IsComplete());
TaprootSpendData spd;
spd.merkle_root = m_branch.size() == 0 ? uint256() : m_branch[0]->hash;
spd.internal_key = m_internal_key;
diff --git a/src/script/standard.h b/src/script/standard.h
index ac4e2f3276..78492733db 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -227,8 +227,11 @@ struct TaprootSpendData
/** The Merkle root of the script tree (0 if no scripts). */
uint256 merkle_root;
/** Map from (script, leaf_version) to (sets of) control blocks.
- * The control blocks are sorted by size, so that the signing logic can
- * easily prefer the cheapest one. */
+ * More than one control block for a given script is only possible if it
+ * appears in multiple branches of the tree. We keep them all so that
+ * inference can reconstruct the full tree. Within each set, the control
+ * blocks are sorted by size, so that the signing logic can easily
+ * prefer the cheapest one. */
std::map<std::pair<CScript, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> scripts;
/** Merge other TaprootSpendData (for the same scriptPubKey) into this. */
void Merge(TaprootSpendData other);
@@ -252,7 +255,7 @@ private:
/** Merkle hash of this node. */
uint256 hash;
/** Tracked leaves underneath this node (either from the node itself, or its children).
- * The merkle_branch field for each is the partners to get to *this* node. */
+ * The merkle_branch field of each is the partners to get to *this* node. */
std::vector<LeafInfo> leaves;
};
/** Whether the builder is in a valid state so far. */
diff --git a/src/secp256k1/.cirrus.yml b/src/secp256k1/.cirrus.yml
index 506a860336..bf71a70839 100644
--- a/src/secp256k1/.cirrus.yml
+++ b/src/secp256k1/.cirrus.yml
@@ -1,21 +1,28 @@
env:
- WIDEMUL: auto
+ ### compiler options
+ HOST:
+ # Specific warnings can be disabled with -Wno-error=foo.
+ # -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual.
+ WERROR_CFLAGS: -Werror -pedantic-errors
+ MAKEFLAGS: -j2
+ BUILD: check
+ ### secp256k1 config
STATICPRECOMPUTATION: yes
ECMULTGENPRECISION: auto
ASM: no
- BUILD: check
+ WIDEMUL: auto
WITH_VALGRIND: yes
- RUN_VALGRIND: no
EXTRAFLAGS:
- HOST:
+ ### secp256k1 modules
+ EXPERIMENTAL: no
ECDH: no
RECOVERY: no
SCHNORRSIG: no
- EXPERIMENTAL: no
- CTIMETEST: yes
+ ### test options
+ TEST_ITERS:
BENCH: yes
- ITERS: 2
- MAKEFLAGS: -j2
+ BENCH_ITERS: 2
+ CTIMETEST: yes
cat_logs_snippet: &CAT_LOGS
always:
@@ -63,27 +70,8 @@ task:
- env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no}
- env: {CPPFLAGS: -DDETERMINISTIC}
- env: {CFLAGS: -O0, CTIMETEST: no}
- - env:
- CFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer"
- LDFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer"
- UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1"
- ASM: x86_64
- ECDH: yes
- RECOVERY: yes
- EXPERIMENTAL: yes
- SCHNORRSIG: yes
- CTIMETEST: no
- env: { ECMULTGENPRECISION: 2 }
- env: { ECMULTGENPRECISION: 8 }
- - env:
- RUN_VALGRIND: yes
- ASM: x86_64
- ECDH: yes
- RECOVERY: yes
- EXPERIMENTAL: yes
- SCHNORRSIG: yes
- EXTRAFLAGS: "--disable-openssl-tests"
- BUILD:
matrix:
- env:
CC: gcc
@@ -111,6 +99,7 @@ task:
CC: i686-linux-gnu-gcc
- env:
CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include
+ << : *MERGE_BASE
test_script:
- ./ci/cirrus.sh
<< : *CAT_LOGS
@@ -181,9 +170,9 @@ task:
cpu: 1
memory: 1G
env:
- QEMU_CMD: qemu-s390x
+ WRAPPER_CMD: qemu-s390x
+ TEST_ITERS: 16
HOST: s390x-linux-gnu
- BUILD:
WITH_VALGRIND: no
ECDH: yes
RECOVERY: yes
@@ -196,3 +185,158 @@ task:
- rm /etc/ld.so.cache
- ./ci/cirrus.sh
<< : *CAT_LOGS
+
+task:
+ name: "ARM32: Linux (Debian stable, QEMU)"
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ WRAPPER_CMD: qemu-arm
+ TEST_ITERS: 16
+ HOST: arm-linux-gnueabihf
+ WITH_VALGRIND: no
+ ECDH: yes
+ RECOVERY: yes
+ EXPERIMENTAL: yes
+ SCHNORRSIG: yes
+ CTIMETEST: no
+ matrix:
+ - env: {}
+ - env: {ASM: arm}
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
+
+task:
+ name: "ARM64: Linux (Debian stable, QEMU)"
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ WRAPPER_CMD: qemu-aarch64
+ TEST_ITERS: 16
+ HOST: aarch64-linux-gnu
+ WITH_VALGRIND: no
+ ECDH: yes
+ RECOVERY: yes
+ EXPERIMENTAL: yes
+ SCHNORRSIG: yes
+ CTIMETEST: no
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
+
+task:
+ name: "ppc64le: Linux (Debian stable, QEMU)"
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ WRAPPER_CMD: qemu-ppc64le
+ TEST_ITERS: 16
+ HOST: powerpc64le-linux-gnu
+ WITH_VALGRIND: no
+ ECDH: yes
+ RECOVERY: yes
+ EXPERIMENTAL: yes
+ SCHNORRSIG: yes
+ CTIMETEST: no
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
+
+task:
+ name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)"
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ WRAPPER_CMD: wine64-stable
+ TEST_ITERS: 16
+ HOST: x86_64-w64-mingw32
+ WITH_VALGRIND: no
+ ECDH: yes
+ RECOVERY: yes
+ EXPERIMENTAL: yes
+ SCHNORRSIG: yes
+ CTIMETEST: no
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
+
+# Sanitizers
+task:
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ ECDH: yes
+ RECOVERY: yes
+ EXPERIMENTAL: yes
+ SCHNORRSIG: yes
+ CTIMETEST: no
+ EXTRAFLAGS: "--disable-openssl-tests"
+ matrix:
+ - name: "Valgrind (memcheck)"
+ env:
+ # The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html)
+ WRAPPER_CMD: "valgrind --error-exitcode=42"
+ TEST_ITERS: 16
+ - name: "UBSan, ASan, LSan"
+ env:
+ CFLAGS: "-fsanitize=undefined,address"
+ CFLAGS_FOR_BUILD: "-fsanitize=undefined,address"
+ UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1"
+ ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1"
+ LSAN_OPTIONS: "use_unaligned=1"
+ TEST_ITERS: 32
+ # Try to cover many configurations with just a tiny matrix.
+ matrix:
+ - env:
+ ASM: auto
+ STATICPRECOMPUTATION: yes
+ - env:
+ ASM: no
+ STATICPRECOMPUTATION: no
+ ECMULTGENPRECISION: 2
+ matrix:
+ - env:
+ CC: clang
+ - env:
+ HOST: i686-linux-gnu
+ CC: i686-linux-gnu-gcc
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
+
+task:
+ name: "C++ -fpermissive"
+ container:
+ dockerfile: ci/linux-debian.Dockerfile
+ cpu: 1
+ memory: 1G
+ env:
+ # ./configure correctly errors out when given CC=g++.
+ # We hack around this by passing CC=g++ only to make.
+ CC: gcc
+ MAKEFLAGS: -j2 CC=g++ CFLAGS=-fpermissive
+ WERROR_CFLAGS:
+ EXPERIMENTAL: yes
+ ECDH: yes
+ RECOVERY: yes
+ SCHNORRSIG: yes
+ << : *MERGE_BASE
+ test_script:
+ - ./ci/cirrus.sh
+ << : *CAT_LOGS
diff --git a/src/secp256k1/.gitignore b/src/secp256k1/.gitignore
index ccdef02b29..79b740db8a 100644
--- a/src/secp256k1/.gitignore
+++ b/src/secp256k1/.gitignore
@@ -23,6 +23,7 @@ aclocal.m4
autom4te.cache/
config.log
config.status
+conftest*
*.tar.gz
*.la
libtool
@@ -33,6 +34,14 @@ libtool
*~
*.log
*.trs
+
+coverage/
+coverage.html
+coverage.*.html
+*.gcda
+*.gcno
+*.gcov
+
src/libsecp256k1-config.h
src/libsecp256k1-config.h.in
src/ecmult_static_context.h
diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am
index 58c9635e53..1e03560884 100644
--- a/src/secp256k1/Makefile.am
+++ b/src/secp256k1/Makefile.am
@@ -1,5 +1,9 @@
ACLOCAL_AMFLAGS = -I build-aux/m4
+# AM_CFLAGS will be automatically prepended to CFLAGS by Automake when compiling some foo
+# which does not have an explicit foo_CFLAGS variable set.
+AM_CFLAGS = $(SECP_CFLAGS)
+
lib_LTLIBRARIES = libsecp256k1.la
include_HEADERS = include/secp256k1.h
include_HEADERS += include/secp256k1_preallocated.h
@@ -68,7 +72,7 @@ endif
endif
libsecp256k1_la_SOURCES = src/secp256k1.c
-libsecp256k1_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
+libsecp256k1_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
libsecp256k1_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB)
if VALGRIND_ENABLED
@@ -81,27 +85,27 @@ noinst_PROGRAMS += bench_verify bench_sign bench_internal bench_ecmult
bench_verify_SOURCES = src/bench_verify.c
bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
# SECP_TEST_INCLUDES are only used here for CRYPTO_CPPFLAGS
-bench_verify_CPPFLAGS = -DSECP256K1_BUILD $(SECP_TEST_INCLUDES)
+bench_verify_CPPFLAGS = $(SECP_TEST_INCLUDES)
bench_sign_SOURCES = src/bench_sign.c
bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
bench_internal_SOURCES = src/bench_internal.c
bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB)
-bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES)
+bench_internal_CPPFLAGS = $(SECP_INCLUDES)
bench_ecmult_SOURCES = src/bench_ecmult.c
bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB)
-bench_ecmult_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES)
+bench_ecmult_CPPFLAGS = $(SECP_INCLUDES)
endif
TESTS =
if USE_TESTS
noinst_PROGRAMS += tests
tests_SOURCES = src/tests.c
-tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
+tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
if VALGRIND_ENABLED
tests_CPPFLAGS += -DVALGRIND
noinst_PROGRAMS += valgrind_ctime_test
valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c
-valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB)
+valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
endif
if !ENABLE_COVERAGE
tests_CPPFLAGS += -DVERIFY
@@ -114,7 +118,7 @@ endif
if USE_EXHAUSTIVE_TESTS
noinst_PROGRAMS += exhaustive_tests
exhaustive_tests_SOURCES = src/tests_exhaustive.c
-exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES)
+exhaustive_tests_CPPFLAGS = -I$(top_srcdir)/src $(SECP_INCLUDES)
if !ENABLE_COVERAGE
exhaustive_tests_CPPFLAGS += -DVERIFY
endif
@@ -129,10 +133,10 @@ CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -I$(builddir)/src
gen_context_OBJECTS = gen_context.o
gen_context_BIN = gen_context$(BUILD_EXEEXT)
gen_%.o: src/gen_%.c src/libsecp256k1-config.h
- $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
+ $(CC_FOR_BUILD) $(DEFS) $(CPPFLAGS_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
$(gen_context_BIN): $(gen_context_OBJECTS)
- $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@
+ $(CC_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
$(tests_OBJECTS): src/ecmult_static_context.h
diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md
index 197a56fff8..182c29d9ce 100644
--- a/src/secp256k1/README.md
+++ b/src/secp256k1/README.md
@@ -17,6 +17,7 @@ Features:
* Suitable for embedded systems.
* Optional module for public key recovery.
* Optional module for ECDH key exchange.
+* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) (experimental).
Experimental features have not received enough scrutiny to satisfy the standard of quality of this library but are made available for testing and review by the community. The APIs of these features should not be considered stable.
@@ -96,7 +97,8 @@ To create a report, `gcovr` is recommended, as it includes branch coverage repor
To create a HTML report with coloured and annotated source code:
- $ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html
+ $ mkdir -p coverage
+ $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html
Reporting a vulnerability
------------
diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
index e57888ca18..8245b2b863 100644
--- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4
+++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
@@ -82,3 +82,19 @@ if test x"$has_valgrind" != x"yes"; then
AC_CHECK_HEADER([valgrind/memcheck.h], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed])])
fi
])
+
+dnl SECP_TRY_APPEND_CFLAGS(flags, VAR)
+dnl Append flags to VAR if CC accepts them.
+AC_DEFUN([SECP_TRY_APPEND_CFLAGS], [
+ AC_MSG_CHECKING([if ${CC} supports $1])
+ SECP_TRY_APPEND_CFLAGS_saved_CFLAGS="$CFLAGS"
+ CFLAGS="$1 $CFLAGS"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [flag_works=yes], [flag_works=no])
+ AC_MSG_RESULT($flag_works)
+ CFLAGS="$SECP_TRY_APPEND_CFLAGS_saved_CFLAGS"
+ if test x"$flag_works" = x"yes"; then
+ $2="$$2 $1"
+ fi
+ unset flag_works
+ AC_SUBST($2)
+])
diff --git a/src/secp256k1/ci/cirrus.sh b/src/secp256k1/ci/cirrus.sh
index f26ca98d1d..27db1e6779 100755
--- a/src/secp256k1/ci/cirrus.sh
+++ b/src/secp256k1/ci/cirrus.sh
@@ -25,42 +25,27 @@ valgrind --version || true
make
# Print information about binaries so that we can see that the architecture is correct
-file *tests || true
+file *tests* || true
file bench_* || true
file .libs/* || true
-if [ -n "$BUILD" ]
-then
- make "$BUILD"
-fi
+# This tells `make check` to wrap test invocations.
+export LOG_COMPILER="$WRAPPER_CMD"
-if [ "$RUN_VALGRIND" = "yes" ]
-then
- # the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html)
- valgrind --error-exitcode=42 ./tests 16
- valgrind --error-exitcode=42 ./exhaustive_tests
-fi
+# This limits the iterations in the tests and benchmarks.
+export SECP256K1_TEST_ITERS="$TEST_ITERS"
+export SECP256K1_BENCH_ITERS="$BENCH_ITERS"
-if [ -n "$QEMU_CMD" ]
-then
- $QEMU_CMD ./tests 16
- $QEMU_CMD ./exhaustive_tests
-fi
+make "$BUILD"
if [ "$BENCH" = "yes" ]
then
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute'
- if [ -n "$QEMU_CMD" ]
- then
- EXEC="$EXEC $QEMU_CMD"
- fi
- if [ "$RUN_VALGRIND" = "yes" ]
+ if [ -n "$WRAPPER_CMD" ]
then
- EXEC="$EXEC valgrind --error-exitcode=42"
+ EXEC="$EXEC $WRAPPER_CMD"
fi
- # This limits the iterations in the benchmarks below to ITER iterations.
- export SECP256K1_BENCH_ITERS="$ITERS"
{
$EXEC ./bench_ecmult
$EXEC ./bench_internal
diff --git a/src/secp256k1/ci/linux-debian.Dockerfile b/src/secp256k1/ci/linux-debian.Dockerfile
index 5967cf8b31..2c02ed69d0 100644
--- a/src/secp256k1/ci/linux-debian.Dockerfile
+++ b/src/secp256k1/ci/linux-debian.Dockerfile
@@ -2,12 +2,24 @@ FROM debian:stable
RUN dpkg --add-architecture i386
RUN dpkg --add-architecture s390x
+RUN dpkg --add-architecture armhf
+RUN dpkg --add-architecture arm64
+RUN dpkg --add-architecture ppc64el
RUN apt-get update
# dkpg-dev: to make pkg-config work in cross-builds
+# llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces
RUN apt-get install --no-install-recommends --no-upgrade -y \
git ca-certificates \
make automake libtool pkg-config dpkg-dev valgrind qemu-user \
- gcc clang libc6-dbg \
- gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 \
- gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x
+ gcc clang llvm libc6-dbg \
+ g++ \
+ gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan5:i386 \
+ gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \
+ gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \
+ gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \
+ gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \
+ wine gcc-mingw-w64-x86-64
+
+# Run a dummy command in wine to make it set up configuration
+RUN wine64-stable xcopy || true
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 1ed991afa7..9969cfa343 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -8,10 +8,6 @@ AH_TOP([#define LIBSECP256K1_CONFIG_H])
AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/])
AM_INIT_AUTOMAKE([foreign subdir-objects])
-# Set -g if CFLAGS are not already set, which matches the default autoconf
-# behavior (see PROG_CC in the Autoconf manual) with the exception that we don't
-# set -O2 here because we set it in any case (see further down).
-: ${CFLAGS="-g"}
LT_INIT
# Make the compilation flags quiet unless V=1 is used.
@@ -42,8 +38,8 @@ AM_PROG_AS
case $host_os in
*darwin*)
if test x$cross_compiling != xyes; then
- AC_PATH_PROG([BREW],brew,)
- if test x$BREW != x; then
+ AC_CHECK_PROG([BREW], brew, brew)
+ if test x$BREW = xbrew; then
# These Homebrew packages may be keg-only, meaning that they won't be found
# in expected paths because they may conflict with system files. Ask
# Homebrew where each one is located, then adjust paths accordingly.
@@ -58,10 +54,10 @@ case $host_os in
VALGRIND_CPPFLAGS="-I$valgrind_prefix/include"
fi
else
- AC_PATH_PROG([PORT],port,)
+ AC_CHECK_PROG([PORT], port, port)
# If homebrew isn't installed and macports is, add the macports default paths
# as a last resort.
- if test x$PORT != x; then
+ if test x$PORT = xport; then
CPPFLAGS="$CPPFLAGS -isystem /opt/local/include"
LDFLAGS="$LDFLAGS -L/opt/local/lib"
fi
@@ -70,35 +66,41 @@ case $host_os in
;;
esac
-CFLAGS="-W $CFLAGS"
-
-warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
-saved_CFLAGS="$CFLAGS"
-CFLAGS="$warn_CFLAGS $CFLAGS"
-AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}])
-AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
- [ AC_MSG_RESULT([yes]) ],
- [ AC_MSG_RESULT([no])
- CFLAGS="$saved_CFLAGS"
- ])
-
-saved_CFLAGS="$CFLAGS"
-CFLAGS="-Wconditional-uninitialized $CFLAGS"
-AC_MSG_CHECKING([if ${CC} supports -Wconditional-uninitialized])
-AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
- [ AC_MSG_RESULT([yes]) ],
- [ AC_MSG_RESULT([no])
- CFLAGS="$saved_CFLAGS"
- ])
-
-saved_CFLAGS="$CFLAGS"
-CFLAGS="-fvisibility=hidden $CFLAGS"
-AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden])
-AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
- [ AC_MSG_RESULT([yes]) ],
- [ AC_MSG_RESULT([no])
- CFLAGS="$saved_CFLAGS"
- ])
+# Try if some desirable compiler flags are supported and append them to SECP_CFLAGS.
+#
+# These are our own flags, so we append them to our own SECP_CFLAGS variable (instead of CFLAGS) as
+# recommended in the automake manual (Section "Flag Variables Ordering"). CFLAGS belongs to the user
+# and we are not supposed to touch it. In the Makefile, we will need to ensure that SECP_CFLAGS
+# is prepended to CFLAGS when invoking the compiler so that the user always has the last word (flag).
+#
+# Another advantage of not touching CFLAGS is that the contents of CFLAGS will be picked up by
+# libtool for compiling helper executables. For example, when compiling for Windows, libtool will
+# generate entire wrapper executables (instead of simple wrapper scripts as on Unix) to ensure
+# proper operation of uninstalled programs linked by libtool against the uninstalled shared library.
+# These executables are compiled from C source file for which our flags may not be appropriate,
+# e.g., -std=c89 flag has lead to undesirable warnings in the past.
+#
+# TODO We should analogously not touch CPPFLAGS and LDFLAGS but currently there are no issues.
+AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [
+ # Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will
+ # not error out if it gets unknown warning flags and the checks here will always succeed
+ # no matter if clang knows the flag or not.
+ SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS"
+ SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS)
+
+ SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic.
+ SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic.
+ SECP_TRY_APPEND_CFLAGS([-Wall], $1) # GCC >= 2.95 and probably many other compilers
+ SECP_TRY_APPEND_CFLAGS([-Wno-unused-function], $1) # GCC >= 3.0, -Wunused-function is implied by -Wall.
+ SECP_TRY_APPEND_CFLAGS([-Wextra], $1) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions.
+ SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95
+ SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0
+ SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only
+ SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0
+
+ CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS"
+])
+SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS)
###
### Define config arguments
@@ -213,10 +215,14 @@ AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
if test x"$enable_coverage" = x"yes"; then
AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code])
- CFLAGS="-O0 --coverage $CFLAGS"
+ SECP_CFLAGS="-O0 --coverage $SECP_CFLAGS"
LDFLAGS="--coverage $LDFLAGS"
else
- CFLAGS="-O2 $CFLAGS"
+ # Most likely the CFLAGS already contain -O2 because that is autoconf's default.
+ # We still add it here because passing it twice is not an issue, and handling
+ # this case would just add unnecessary complexity (see #896).
+ SECP_CFLAGS="-O2 $SECP_CFLAGS"
+ SECP_CFLAGS_FOR_BUILD="-O2 $SECP_CFLAGS_FOR_BUILD"
fi
if test x"$req_asm" = x"auto"; then
@@ -351,6 +357,9 @@ if test x"$enable_valgrind" = x"yes"; then
SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS"
fi
+# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI)
+SECP_CFLAGS="$SECP_CFLAGS $WERROR_CFLAGS"
+
# Handle static precomputation (after everything which modifies CFLAGS and friends)
if test x"$use_ecmult_static_precomputation" != x"no"; then
if test x"$cross_compiling" = x"no"; then
@@ -360,8 +369,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then
fi
# If we're not cross-compiling, simply use the same compiler for building the static precompation code.
CC_FOR_BUILD="$CC"
- CFLAGS_FOR_BUILD="$CFLAGS"
CPPFLAGS_FOR_BUILD="$CPPFLAGS"
+ SECP_CFLAGS_FOR_BUILD="$SECP_CFLAGS"
+ CFLAGS_FOR_BUILD="$CFLAGS"
LDFLAGS_FOR_BUILD="$LDFLAGS"
else
AX_PROG_CC_FOR_BUILD
@@ -371,22 +381,14 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then
cross_compiling=no
SAVE_CC="$CC"
CC="$CC_FOR_BUILD"
- SAVE_CFLAGS="$CFLAGS"
- CFLAGS="$CFLAGS_FOR_BUILD"
SAVE_CPPFLAGS="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS_FOR_BUILD"
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS_FOR_BUILD"
SAVE_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS_FOR_BUILD"
- warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function"
- saved_CFLAGS="$CFLAGS"
- CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS"
- AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}])
- AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
- [ AC_MSG_RESULT([yes]) ],
- [ AC_MSG_RESULT([no])
- CFLAGS="$saved_CFLAGS"
- ])
+ SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS_FOR_BUILD)
AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}])
AC_RUN_IFELSE(
@@ -394,19 +396,17 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then
[working_native_cc=yes],
[working_native_cc=no],[:])
- CFLAGS_FOR_BUILD="$CFLAGS"
-
# Restore the environment
cross_compiling=$save_cross_compiling
CC="$SAVE_CC"
- CFLAGS="$SAVE_CFLAGS"
CPPFLAGS="$SAVE_CPPFLAGS"
+ CFLAGS="$SAVE_CFLAGS"
LDFLAGS="$SAVE_LDFLAGS"
if test x"$working_native_cc" = x"no"; then
AC_MSG_RESULT([no])
set_precomp=no
- m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.])
+ m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CPPFLAGS_FOR_BUILD, CFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.])
if test x"$use_ecmult_static_precomputation" = x"yes"; then
AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build])
else
@@ -419,8 +419,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then
fi
AC_SUBST(CC_FOR_BUILD)
- AC_SUBST(CFLAGS_FOR_BUILD)
AC_SUBST(CPPFLAGS_FOR_BUILD)
+ AC_SUBST(SECP_CFLAGS_FOR_BUILD)
+ AC_SUBST(CFLAGS_FOR_BUILD)
AC_SUBST(LDFLAGS_FOR_BUILD)
else
set_precomp=no
@@ -490,6 +491,7 @@ AC_SUBST(SECP_INCLUDES)
AC_SUBST(SECP_LIBS)
AC_SUBST(SECP_TEST_LIBS)
AC_SUBST(SECP_TEST_INCLUDES)
+AC_SUBST(SECP_CFLAGS)
AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"])
AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"])
AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"])
@@ -532,13 +534,15 @@ fi
echo
echo " valgrind = $enable_valgrind"
echo " CC = $CC"
-echo " CFLAGS = $CFLAGS"
echo " CPPFLAGS = $CPPFLAGS"
+echo " SECP_CFLAGS = $SECP_CFLAGS"
+echo " CFLAGS = $CFLAGS"
echo " LDFLAGS = $LDFLAGS"
echo
if test x"$set_precomp" = x"yes"; then
echo " CC_FOR_BUILD = $CC_FOR_BUILD"
-echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD"
echo " CPPFLAGS_FOR_BUILD = $CPPFLAGS_FOR_BUILD"
+echo " SECP_CFLAGS_FOR_BUILD = $SECP_CFLAGS_FOR_BUILD"
+echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD"
echo " LDFLAGS_FOR_BUILD = $LDFLAGS_FOR_BUILD"
fi
diff --git a/src/secp256k1/contrib/lax_der_parsing.c b/src/secp256k1/contrib/lax_der_parsing.c
index c1627e37e9..bf562303ed 100644
--- a/src/secp256k1/contrib/lax_der_parsing.c
+++ b/src/secp256k1/contrib/lax_der_parsing.c
@@ -5,7 +5,6 @@
***********************************************************************/
#include <string.h>
-#include <secp256k1.h>
#include "lax_der_parsing.h"
@@ -121,7 +120,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_
/* Copy R value */
if (rlen > 32) {
overflow = 1;
- } else {
+ } else if (rlen) {
memcpy(tmpsig + 32 - rlen, input + rpos, rlen);
}
@@ -133,7 +132,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_
/* Copy S value */
if (slen > 32) {
overflow = 1;
- } else {
+ } else if (slen) {
memcpy(tmpsig + 64 - slen, input + spos, slen);
}
diff --git a/src/secp256k1/contrib/lax_der_parsing.h b/src/secp256k1/contrib/lax_der_parsing.h
index 6b7255e28f..034a38e6a0 100644
--- a/src/secp256k1/contrib/lax_der_parsing.h
+++ b/src/secp256k1/contrib/lax_der_parsing.h
@@ -51,7 +51,13 @@
#ifndef SECP256K1_CONTRIB_LAX_DER_PARSING_H
#define SECP256K1_CONTRIB_LAX_DER_PARSING_H
+/* #include secp256k1.h only when it hasn't been included yet.
+ This enables this file to be #included directly in other project
+ files (such as tests.c) without the need to set an explicit -I flag,
+ which would be necessary to locate secp256k1.h. */
+#ifndef SECP256K1_H
#include <secp256k1.h>
+#endif
#ifdef __cplusplus
extern "C" {
diff --git a/src/secp256k1/contrib/lax_der_privatekey_parsing.c b/src/secp256k1/contrib/lax_der_privatekey_parsing.c
index 429760fbb6..a1b8200079 100644
--- a/src/secp256k1/contrib/lax_der_privatekey_parsing.c
+++ b/src/secp256k1/contrib/lax_der_privatekey_parsing.c
@@ -5,7 +5,6 @@
***********************************************************************/
#include <string.h>
-#include <secp256k1.h>
#include "lax_der_privatekey_parsing.h"
@@ -45,7 +44,7 @@ int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, co
if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) {
return 0;
}
- memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
+ if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]);
if (!secp256k1_ec_seckey_verify(ctx, out32)) {
memset(out32, 0, 32);
return 0;
diff --git a/src/secp256k1/contrib/lax_der_privatekey_parsing.h b/src/secp256k1/contrib/lax_der_privatekey_parsing.h
index 602c7c556a..1a8ad8ae0c 100644
--- a/src/secp256k1/contrib/lax_der_privatekey_parsing.h
+++ b/src/secp256k1/contrib/lax_der_privatekey_parsing.h
@@ -28,7 +28,13 @@
#ifndef SECP256K1_CONTRIB_BER_PRIVATEKEY_H
#define SECP256K1_CONTRIB_BER_PRIVATEKEY_H
+/* #include secp256k1.h only when it hasn't been included yet.
+ This enables this file to be #included directly in other project
+ files (such as tests.c) without the need to set an explicit -I flag,
+ which would be necessary to locate secp256k1.h. */
+#ifndef SECP256K1_H
#include <secp256k1.h>
+#endif
#ifdef __cplusplus
extern "C" {
diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h
index d368488af2..7be7fd5723 100644
--- a/src/secp256k1/include/secp256k1.h
+++ b/src/secp256k1/include/secp256k1.h
@@ -7,7 +7,9 @@ extern "C" {
#include <stddef.h>
-/* These rules specify the order of arguments in API calls:
+/* Unless explicitly stated all pointer arguments must not be NULL.
+ *
+ * The following rules specify the order of arguments in API calls:
*
* 1. Context pointers go first, followed by output arguments, combined
* output/input arguments, and finally input-only arguments.
@@ -61,8 +63,9 @@ typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space;
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
- * If you need to convert to a format suitable for storage, transmission, or
- * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse.
+ * If you need to convert to a format suitable for storage or transmission,
+ * use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. To
+ * compare keys, use secp256k1_ec_pubkey_cmp.
*/
typedef struct {
unsigned char data[64];
@@ -127,6 +130,17 @@ typedef int (*secp256k1_nonce_function)(
# define SECP256K1_INLINE inline
# endif
+/** When this header is used at build-time the SECP256K1_BUILD define needs to be set
+ * to correctly setup export attributes and nullness checks. This is normally done
+ * by secp256k1.c but to guard against this header being included before secp256k1.c
+ * has had a chance to set the define (e.g. via test harnesses that just includes
+ * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the
+ * BUILD define so this condition can be caught.
+ */
+#ifndef SECP256K1_BUILD
+# define SECP256K1_NO_BUILD
+#endif
+
#ifndef SECP256K1_API
# if defined(_WIN32)
# ifdef SECP256K1_BUILD
@@ -370,6 +384,21 @@ SECP256K1_API int secp256k1_ec_pubkey_serialize(
unsigned int flags
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+/** Compare two public keys using lexicographic (of compressed serialization) order
+ *
+ * Returns: <0 if the first public key is less than the second
+ * >0 if the first public key is greater than the second
+ * 0 if the two public keys are equal
+ * Args: ctx: a secp256k1 context object.
+ * In: pubkey1: first public key to compare
+ * pubkey2: second public key to compare
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_cmp(
+ const secp256k1_context* ctx,
+ const secp256k1_pubkey* pubkey1,
+ const secp256k1_pubkey* pubkey2
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
/** Parse an ECDSA signature in compact (64 bytes) format.
*
* Returns: 1 when the signature could be parsed, 0 otherwise.
@@ -764,6 +793,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine(
size_t n
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+/** Compute a tagged hash as defined in BIP-340.
+ *
+ * This is useful for creating a message hash and achieving domain separation
+ * through an application-specific tag. This function returns
+ * SHA256(SHA256(tag)||SHA256(tag)||msg). Therefore, tagged hash
+ * implementations optimized for a specific tag can precompute the SHA256 state
+ * after hashing the tag hashes.
+ *
+ * Returns 0 if the arguments are invalid and 1 otherwise.
+ * Args: ctx: pointer to a context object
+ * Out: hash32: pointer to a 32-byte array to store the resulting hash
+ * In: tag: pointer to an array containing the tag
+ * taglen: length of the tag array
+ * msg: pointer to an array containing the message
+ * msglen: length of the message array
+ */
+SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_tagged_sha256(
+ const secp256k1_context* ctx,
+ unsigned char *hash32,
+ const unsigned char *tag,
+ size_t taglen,
+ const unsigned char *msg,
+ size_t msglen
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/secp256k1/include/secp256k1_extrakeys.h b/src/secp256k1/include/secp256k1_extrakeys.h
index 6fc7b290f8..0a37fb6b9d 100644
--- a/src/secp256k1/include/secp256k1_extrakeys.h
+++ b/src/secp256k1/include/secp256k1_extrakeys.h
@@ -15,9 +15,9 @@ extern "C" {
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
- * If you need to convert to a format suitable for storage, transmission, or
- * comparison, use secp256k1_xonly_pubkey_serialize and
- * secp256k1_xonly_pubkey_parse.
+ * If you need to convert to a format suitable for storage, transmission, use
+ * use secp256k1_xonly_pubkey_serialize and secp256k1_xonly_pubkey_parse. To
+ * compare keys, use secp256k1_xonly_pubkey_cmp.
*/
typedef struct {
unsigned char data[64];
@@ -67,6 +67,21 @@ SECP256K1_API int secp256k1_xonly_pubkey_serialize(
const secp256k1_xonly_pubkey* pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+/** Compare two x-only public keys using lexicographic order
+ *
+ * Returns: <0 if the first public key is less than the second
+ * >0 if the first public key is greater than the second
+ * 0 if the two public keys are equal
+ * Args: ctx: a secp256k1 context object.
+ * In: pubkey1: first public key to compare
+ * pubkey2: second public key to compare
+ */
+SECP256K1_API int secp256k1_xonly_pubkey_cmp(
+ const secp256k1_context* ctx,
+ const secp256k1_xonly_pubkey* pk1,
+ const secp256k1_xonly_pubkey* pk2
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
+
/** Converts a secp256k1_pubkey into a secp256k1_xonly_pubkey.
*
* Returns: 1 if the public key was successfully converted
diff --git a/src/secp256k1/include/secp256k1_schnorrsig.h b/src/secp256k1/include/secp256k1_schnorrsig.h
index 0150cd3395..74cbcac45e 100644
--- a/src/secp256k1/include/secp256k1_schnorrsig.h
+++ b/src/secp256k1/include/secp256k1_schnorrsig.h
@@ -23,24 +23,29 @@ extern "C" {
*
* Returns: 1 if a nonce was successfully generated. 0 will cause signing to
* return an error.
- * Out: nonce32: pointer to a 32-byte array to be filled by the function.
- * In: msg32: the 32-byte message hash being verified (will not be NULL)
- * key32: pointer to a 32-byte secret key (will not be NULL)
- * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32
- * (will not be NULL)
- * algo16: pointer to a 16-byte array describing the signature
- * algorithm (will not be NULL).
- * data: Arbitrary data pointer that is passed through.
+ * Out: nonce32: pointer to a 32-byte array to be filled by the function
+ * In: msg: the message being verified. Is NULL if and only if msglen
+ * is 0.
+ * msglen: the length of the message
+ * key32: pointer to a 32-byte secret key (will not be NULL)
+ * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32
+ * (will not be NULL)
+ * algo: pointer to an array describing the signature
+ * algorithm (will not be NULL)
+ * algolen: the length of the algo array
+ * data: arbitrary data pointer that is passed through
*
* Except for test cases, this function should compute some cryptographic hash of
* the message, the key, the pubkey, the algorithm description, and data.
*/
typedef int (*secp256k1_nonce_function_hardened)(
unsigned char *nonce32,
- const unsigned char *msg32,
+ const unsigned char *msg,
+ size_t msglen,
const unsigned char *key32,
const unsigned char *xonly_pk32,
- const unsigned char *algo16,
+ const unsigned char *algo,
+ size_t algolen,
void *data
);
@@ -50,59 +55,113 @@ typedef int (*secp256k1_nonce_function_hardened)(
*
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
* auxiliary random data as defined in BIP-340. If the data pointer is NULL,
- * schnorrsig_sign does not produce BIP-340 compliant signatures. The algo16
- * argument must be non-NULL, otherwise the function will fail and return 0.
- * The hash will be tagged with algo16 after removing all terminating null
- * bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set
- * to "BIP0340/nonce\0\0\0"
+ * the nonce derivation procedure follows BIP-340 by setting the auxiliary
+ * random data to zero. The algo argument must be non-NULL, otherwise the
+ * function will fail and return 0. The hash will be tagged with algo.
+ * Therefore, to create BIP-340 compliant signatures, algo must be set to
+ * "BIP0340/nonce" and algolen to 13.
*/
SECP256K1_API extern const secp256k1_nonce_function_hardened secp256k1_nonce_function_bip340;
+/** Data structure that contains additional arguments for schnorrsig_sign_custom.
+ *
+ * A schnorrsig_extraparams structure object can be initialized correctly by
+ * setting it to SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT.
+ *
+ * Members:
+ * magic: set to SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC at initialization
+ * and has no other function than making sure the object is
+ * initialized.
+ * noncefp: pointer to a nonce generation function. If NULL,
+ * secp256k1_nonce_function_bip340 is used
+ * ndata: pointer to arbitrary data used by the nonce generation function
+ * (can be NULL). If it is non-NULL and
+ * secp256k1_nonce_function_bip340 is used, then ndata must be a
+ * pointer to 32-byte auxiliary randomness as per BIP-340.
+ */
+typedef struct {
+ unsigned char magic[4];
+ secp256k1_nonce_function_hardened noncefp;
+ void* ndata;
+} secp256k1_schnorrsig_extraparams;
+
+#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c }
+#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\
+ SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC,\
+ NULL,\
+ NULL\
+}
+
/** Create a Schnorr signature.
*
* Does _not_ strictly follow BIP-340 because it does not verify the resulting
* signature. Instead, you can manually use secp256k1_schnorrsig_verify and
* abort if it fails.
*
- * Otherwise BIP-340 compliant if the noncefp argument is NULL or
- * secp256k1_nonce_function_bip340 and the ndata argument is 32-byte auxiliary
- * randomness.
+ * This function only signs 32-byte messages. If you have messages of a
+ * different size (or the same size but without a context-specific tag
+ * prefix), it is recommended to create a 32-byte message hash with
+ * secp256k1_tagged_sha256 and then sign the hash. Tagged hashing allows
+ * providing an context-specific tag for domain separation. This prevents
+ * signatures from being valid in multiple contexts by accident.
*
* Returns 1 on success, 0 on failure.
* Args: ctx: pointer to a context object, initialized for signing (cannot be NULL)
* Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL)
* In: msg32: the 32-byte message being signed (cannot be NULL)
* keypair: pointer to an initialized keypair (cannot be NULL)
- * noncefp: pointer to a nonce generation function. If NULL, secp256k1_nonce_function_bip340 is used
- * ndata: pointer to arbitrary data used by the nonce generation
- * function (can be NULL). If it is non-NULL and
- * secp256k1_nonce_function_bip340 is used, then ndata must be a
- * pointer to 32-byte auxiliary randomness as per BIP-340.
+ * aux_rand32: 32 bytes of fresh randomness. While recommended to provide
+ * this, it is only supplemental to security and can be NULL. See
+ * BIP-340 "Default Signing" for a full explanation of this
+ * argument and for guidance if randomness is expensive.
*/
SECP256K1_API int secp256k1_schnorrsig_sign(
const secp256k1_context* ctx,
unsigned char *sig64,
const unsigned char *msg32,
const secp256k1_keypair *keypair,
- secp256k1_nonce_function_hardened noncefp,
- void *ndata
+ unsigned char *aux_rand32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+/** Create a Schnorr signature with a more flexible API.
+ *
+ * Same arguments as secp256k1_schnorrsig_sign except that it allows signing
+ * variable length messages and accepts a pointer to an extraparams object that
+ * allows customizing signing by passing additional arguments.
+ *
+ * Creates the same signatures as schnorrsig_sign if msglen is 32 and the
+ * extraparams.ndata is the same as aux_rand32.
+ *
+ * In: msg: the message being signed. Can only be NULL if msglen is 0.
+ * msglen: length of the message
+ * extraparams: pointer to a extraparams object (can be NULL)
+ */
+SECP256K1_API int secp256k1_schnorrsig_sign_custom(
+ const secp256k1_context* ctx,
+ unsigned char *sig64,
+ const unsigned char *msg,
+ size_t msglen,
+ const secp256k1_keypair *keypair,
+ secp256k1_schnorrsig_extraparams *extraparams
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5);
+
/** Verify a Schnorr signature.
*
* Returns: 1: correct signature
* 0: incorrect signature
* Args: ctx: a secp256k1 context object, initialized for verification.
* In: sig64: pointer to the 64-byte signature to verify (cannot be NULL)
- * msg32: the 32-byte message being verified (cannot be NULL)
+ * msg: the message being verified. Can only be NULL if msglen is 0.
+ * msglen: length of the message
* pubkey: pointer to an x-only public key to verify with (cannot be NULL)
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify(
const secp256k1_context* ctx,
const unsigned char *sig64,
- const unsigned char *msg32,
+ const unsigned char *msg,
+ size_t msglen,
const secp256k1_xonly_pubkey *pubkey
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5);
#ifdef __cplusplus
}
diff --git a/src/secp256k1/obj/.gitignore b/src/secp256k1/obj/.gitignore
deleted file mode 100644
index e69de29bb2..0000000000
--- a/src/secp256k1/obj/.gitignore
+++ /dev/null
diff --git a/src/secp256k1/src/bench_ecdh.c b/src/secp256k1/src/bench_ecdh.c
index ab4b8f4244..cb020d26b4 100644
--- a/src/secp256k1/src/bench_ecdh.c
+++ b/src/secp256k1/src/bench_ecdh.c
@@ -6,8 +6,8 @@
#include <string.h>
-#include "include/secp256k1.h"
-#include "include/secp256k1_ecdh.h"
+#include "../include/secp256k1.h"
+#include "../include/secp256k1_ecdh.h"
#include "util.h"
#include "bench.h"
diff --git a/src/secp256k1/src/bench_ecmult.c b/src/secp256k1/src/bench_ecmult.c
index 204e85a5dd..1d463f92d0 100644
--- a/src/secp256k1/src/bench_ecmult.c
+++ b/src/secp256k1/src/bench_ecmult.c
@@ -5,7 +5,8 @@
***********************************************************************/
#include <stdio.h>
-#include "include/secp256k1.h"
+#include "secp256k1.c"
+#include "../include/secp256k1.h"
#include "util.h"
#include "hash_impl.h"
@@ -14,33 +15,177 @@
#include "scalar_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
-#include "secp256k1.c"
#define POINTS 32768
+void help(char **argv) {
+ printf("Benchmark EC multiplication algorithms\n");
+ printf("\n");
+ printf("Usage: %s <help|pippenger_wnaf|strauss_wnaf|simple>\n", argv[0]);
+ printf("The output shows the number of multiplied and summed points right after the\n");
+ printf("function name. The letter 'g' indicates that one of the points is the generator.\n");
+ printf("The benchmarks are divided by the number of points.\n");
+ printf("\n");
+ printf("default (ecmult_multi): picks pippenger_wnaf or strauss_wnaf depending on the\n");
+ printf(" batch size\n");
+ printf("pippenger_wnaf: for all batch sizes\n");
+ printf("strauss_wnaf: for all batch sizes\n");
+ printf("simple: multiply and sum each point individually\n");
+}
+
typedef struct {
/* Setup once in advance */
secp256k1_context* ctx;
secp256k1_scratch_space* scratch;
secp256k1_scalar* scalars;
secp256k1_ge* pubkeys;
+ secp256k1_gej* pubkeys_gej;
secp256k1_scalar* seckeys;
secp256k1_gej* expected_output;
secp256k1_ecmult_multi_func ecmult_multi;
- /* Changes per test */
+ /* Changes per benchmark */
size_t count;
int includes_g;
- /* Changes per test iteration */
+ /* Changes per benchmark iteration, used to pick different scalars and pubkeys
+ * in each run. */
size_t offset1;
size_t offset2;
- /* Test output. */
+ /* Benchmark output. */
secp256k1_gej* output;
} bench_data;
-static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) {
+/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */
+static void hash_into_offset(bench_data* data, size_t x) {
+ data->offset1 = (x * 0x537b7f6f + 0x8f66a481) % POINTS;
+ data->offset2 = (x * 0x7f6f537b + 0x6a1a8f49) % POINTS;
+}
+
+/* Check correctness of the benchmark by computing
+ * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */
+static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) {
+ int i;
+ secp256k1_gej sum_output, tmp;
+ secp256k1_scalar sum_scalars;
+
+ secp256k1_gej_set_infinity(&sum_output);
+ secp256k1_scalar_clear(&sum_scalars);
+ for (i = 0; i < iters; ++i) {
+ secp256k1_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL);
+ if (scalar_gen_offset != NULL) {
+ secp256k1_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]);
+ }
+ if (seckey_offset != NULL) {
+ secp256k1_scalar s = data->seckeys[(*seckey_offset+i) % POINTS];
+ secp256k1_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]);
+ secp256k1_scalar_add(&sum_scalars, &sum_scalars, &s);
+ }
+ }
+ secp256k1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars);
+ secp256k1_gej_neg(&tmp, &tmp);
+ secp256k1_gej_add_var(&tmp, &tmp, &sum_output, NULL);
+ CHECK(secp256k1_gej_is_infinity(&tmp));
+}
+
+static void bench_ecmult_setup(void* arg) {
+ bench_data* data = (bench_data*)arg;
+ /* Re-randomize offset to ensure that we're using different scalars and
+ * group elements in each run. */
+ hash_into_offset(data, data->offset1);
+}
+
+static void bench_ecmult_gen(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ int i;
+
+ for (i = 0; i < iters; ++i) {
+ secp256k1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]);
+ }
+}
+
+static void bench_ecmult_gen_teardown(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters);
+}
+
+static void bench_ecmult_const(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ int i;
+
+ for (i = 0; i < iters; ++i) {
+ secp256k1_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256);
+ }
+}
+
+static void bench_ecmult_const_teardown(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters);
+}
+
+static void bench_ecmult_1(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ int i;
+
+ for (i = 0; i < iters; ++i) {
+ secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL);
+ }
+}
+
+static void bench_ecmult_1_teardown(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters);
+}
+
+static void bench_ecmult_1g(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ secp256k1_scalar zero;
+ int i;
+
+ secp256k1_scalar_set_int(&zero, 0);
+ for (i = 0; i < iters; ++i) {
+ secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]);
+ }
+}
+
+static void bench_ecmult_1g_teardown(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters);
+}
+
+static void bench_ecmult_2g(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ int i;
+
+ for (i = 0; i < iters/2; ++i) {
+ secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]);
+ }
+}
+
+static void bench_ecmult_2g_teardown(void* arg, int iters) {
+ bench_data* data = (bench_data*)arg;
+ bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, &data->offset1, iters/2);
+}
+
+static void run_ecmult_bench(bench_data* data, int iters) {
+ char str[32];
+ sprintf(str, "ecmult_gen");
+ run_benchmark(str, bench_ecmult_gen, bench_ecmult_setup, bench_ecmult_gen_teardown, data, 10, iters);
+ sprintf(str, "ecmult_const");
+ run_benchmark(str, bench_ecmult_const, bench_ecmult_setup, bench_ecmult_const_teardown, data, 10, iters);
+ /* ecmult with non generator point */
+ sprintf(str, "ecmult 1");
+ run_benchmark(str, bench_ecmult_1, bench_ecmult_setup, bench_ecmult_1_teardown, data, 10, iters);
+ /* ecmult with generator point */
+ sprintf(str, "ecmult 1g");
+ run_benchmark(str, bench_ecmult_1g, bench_ecmult_setup, bench_ecmult_1g_teardown, data, 10, iters);
+ /* ecmult with generator and non-generator point. The reported time is per point. */
+ sprintf(str, "ecmult 2g");
+ run_benchmark(str, bench_ecmult_2g, bench_ecmult_setup, bench_ecmult_2g_teardown, data, 10, 2*iters);
+}
+
+static int bench_ecmult_multi_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) {
bench_data* data = (bench_data*)arg;
if (data->includes_g) ++idx;
if (idx == 0) {
@@ -53,7 +198,7 @@ static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, vo
return 1;
}
-static void bench_ecmult(void* arg, int iters) {
+static void bench_ecmult_multi(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int includes_g = data->includes_g;
@@ -62,19 +207,18 @@ static void bench_ecmult(void* arg, int iters) {
iters = iters / data->count;
for (iter = 0; iter < iters; ++iter) {
- data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
+ data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_ecmult_multi_callback, arg, count - includes_g);
data->offset1 = (data->offset1 + count) % POINTS;
data->offset2 = (data->offset2 + count - 1) % POINTS;
}
}
-static void bench_ecmult_setup(void* arg) {
+static void bench_ecmult_multi_setup(void* arg) {
bench_data* data = (bench_data*)arg;
- data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS;
- data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS;
+ hash_into_offset(data, data->count);
}
-static void bench_ecmult_teardown(void* arg, int iters) {
+static void bench_ecmult_multi_teardown(void* arg, int iters) {
bench_data* data = (bench_data*)arg;
int iter;
iters = iters / data->count;
@@ -88,7 +232,7 @@ static void bench_ecmult_teardown(void* arg, int iters) {
static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) {
secp256k1_sha256 sha256;
- unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
+ unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0};
unsigned char buf[32];
int overflow = 0;
c[6] = num;
@@ -102,7 +246,7 @@ static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) {
CHECK(!overflow);
}
-static void run_test(bench_data* data, size_t count, int includes_g, int num_iters) {
+static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) {
char str[32];
static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
size_t iters = 1 + num_iters / count;
@@ -112,8 +256,7 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite
data->includes_g = includes_g;
/* Compute (the negation of) the expected results directly. */
- data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS;
- data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS;
+ hash_into_offset(data, data->count);
for (iter = 0; iter < iters; ++iter) {
secp256k1_scalar tmp;
secp256k1_scalar total = data->scalars[(data->offset1++) % POINTS];
@@ -127,25 +270,26 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite
}
/* Run the benchmark. */
- sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count);
- run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * iters);
+ sprintf(str, includes_g ? "ecmult_multi %ig" : "ecmult_multi %i", (int)count);
+ run_benchmark(str, bench_ecmult_multi, bench_ecmult_multi_setup, bench_ecmult_multi_teardown, data, 10, count * iters);
}
int main(int argc, char **argv) {
bench_data data;
int i, p;
- secp256k1_gej* pubkeys_gej;
size_t scratch_size;
int iters = get_iters(10000);
- data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
- scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
- data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size);
data.ecmult_multi = secp256k1_ecmult_multi_var;
if (argc > 1) {
- if(have_flag(argc, argv, "pippenger_wnaf")) {
+ if(have_flag(argc, argv, "-h")
+ || have_flag(argc, argv, "--help")
+ || have_flag(argc, argv, "help")) {
+ help(argv);
+ return 1;
+ } else if(have_flag(argc, argv, "pippenger_wnaf")) {
printf("Using pippenger_wnaf:\n");
data.ecmult_multi = secp256k1_ecmult_pippenger_batch_single;
} else if(have_flag(argc, argv, "strauss_wnaf")) {
@@ -153,39 +297,48 @@ int main(int argc, char **argv) {
data.ecmult_multi = secp256k1_ecmult_strauss_batch_single;
} else if(have_flag(argc, argv, "simple")) {
printf("Using simple algorithm:\n");
- data.ecmult_multi = secp256k1_ecmult_multi_var;
- secp256k1_scratch_space_destroy(data.ctx, data.scratch);
- data.scratch = NULL;
} else {
- fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]);
- fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n");
+ fprintf(stderr, "%s: unrecognized argument '%s'.\n\n", argv[0], argv[1]);
+ help(argv);
return 1;
}
}
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16;
+ if (!have_flag(argc, argv, "simple")) {
+ data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size);
+ } else {
+ data.scratch = NULL;
+ }
+
/* Allocate stuff */
data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS);
data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS);
data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS);
+ data.pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS);
data.expected_output = malloc(sizeof(secp256k1_gej) * (iters + 1));
data.output = malloc(sizeof(secp256k1_gej) * (iters + 1));
/* Generate a set of scalars, and private/public keypairs. */
- pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS);
- secp256k1_gej_set_ge(&pubkeys_gej[0], &secp256k1_ge_const_g);
+ secp256k1_gej_set_ge(&data.pubkeys_gej[0], &secp256k1_ge_const_g);
secp256k1_scalar_set_int(&data.seckeys[0], 1);
for (i = 0; i < POINTS; ++i) {
generate_scalar(i, &data.scalars[i]);
if (i) {
- secp256k1_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL);
+ secp256k1_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL);
secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]);
}
}
- secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS);
- free(pubkeys_gej);
+ secp256k1_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS);
+
+
+ /* Initialize offset1 and offset2 */
+ hash_into_offset(&data, 0);
+ run_ecmult_bench(&data, iters);
for (i = 1; i <= 8; ++i) {
- run_test(&data, i, 1, iters);
+ run_ecmult_multi_bench(&data, i, 1, iters);
}
/* This is disabled with low count of iterations because the loop runs 77 times even with iters=1
@@ -194,7 +347,7 @@ int main(int argc, char **argv) {
if (iters > 2) {
for (p = 0; p <= 11; ++p) {
for (i = 9; i <= 16; ++i) {
- run_test(&data, i << p, 1, iters);
+ run_ecmult_multi_bench(&data, i << p, 1, iters);
}
}
}
@@ -205,6 +358,7 @@ int main(int argc, char **argv) {
secp256k1_context_destroy(data.ctx);
free(data.scalars);
free(data.pubkeys);
+ free(data.pubkeys_gej);
free(data.seckeys);
free(data.output);
free(data.expected_output);
diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c
index 73b8a24ccb..161b1c4a47 100644
--- a/src/secp256k1/src/bench_internal.c
+++ b/src/secp256k1/src/bench_internal.c
@@ -5,7 +5,8 @@
***********************************************************************/
#include <stdio.h>
-#include "include/secp256k1.h"
+#include "secp256k1.c"
+#include "../include/secp256k1.h"
#include "assumptions.h"
#include "util.h"
@@ -16,7 +17,6 @@
#include "ecmult_const_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
-#include "secp256k1.c"
typedef struct {
secp256k1_scalar scalar[2];
diff --git a/src/secp256k1/src/bench_recover.c b/src/secp256k1/src/bench_recover.c
index 3f6270ce84..4bcac19dc0 100644
--- a/src/secp256k1/src/bench_recover.c
+++ b/src/secp256k1/src/bench_recover.c
@@ -4,8 +4,8 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
-#include "include/secp256k1.h"
-#include "include/secp256k1_recovery.h"
+#include "../include/secp256k1.h"
+#include "../include/secp256k1_recovery.h"
#include "util.h"
#include "bench.h"
diff --git a/src/secp256k1/src/bench_schnorrsig.c b/src/secp256k1/src/bench_schnorrsig.c
index f7f591c41d..d95bc00f48 100644
--- a/src/secp256k1/src/bench_schnorrsig.c
+++ b/src/secp256k1/src/bench_schnorrsig.c
@@ -8,11 +8,13 @@
#include <stdlib.h>
-#include "include/secp256k1.h"
-#include "include/secp256k1_schnorrsig.h"
+#include "../include/secp256k1.h"
+#include "../include/secp256k1_schnorrsig.h"
#include "util.h"
#include "bench.h"
+#define MSGLEN 32
+
typedef struct {
secp256k1_context *ctx;
int n;
@@ -26,13 +28,13 @@ typedef struct {
void bench_schnorrsig_sign(void* arg, int iters) {
bench_schnorrsig_data *data = (bench_schnorrsig_data *)arg;
int i;
- unsigned char msg[32] = "benchmarkexamplemessagetemplate";
+ unsigned char msg[MSGLEN] = {0};
unsigned char sig[64];
for (i = 0; i < iters; i++) {
msg[0] = i;
msg[1] = i >> 8;
- CHECK(secp256k1_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL));
+ CHECK(secp256k1_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL));
}
}
@@ -43,7 +45,7 @@ void bench_schnorrsig_verify(void* arg, int iters) {
for (i = 0; i < iters; i++) {
secp256k1_xonly_pubkey pk;
CHECK(secp256k1_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1);
- CHECK(secp256k1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk));
+ CHECK(secp256k1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk));
}
}
@@ -58,9 +60,10 @@ int main(void) {
data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *));
+ CHECK(MSGLEN >= 4);
for (i = 0; i < iters; i++) {
unsigned char sk[32];
- unsigned char *msg = (unsigned char *)malloc(32);
+ unsigned char *msg = (unsigned char *)malloc(MSGLEN);
unsigned char *sig = (unsigned char *)malloc(64);
secp256k1_keypair *keypair = (secp256k1_keypair *)malloc(sizeof(*keypair));
unsigned char *pk_char = (unsigned char *)malloc(32);
@@ -69,7 +72,7 @@ int main(void) {
msg[1] = sk[1] = i >> 8;
msg[2] = sk[2] = i >> 16;
msg[3] = sk[3] = i >> 24;
- memset(&msg[4], 'm', 28);
+ memset(&msg[4], 'm', MSGLEN - 4);
memset(&sk[4], 's', 28);
data.keypairs[i] = keypair;
@@ -78,7 +81,7 @@ int main(void) {
data.sigs[i] = sig;
CHECK(secp256k1_keypair_create(data.ctx, keypair, sk));
- CHECK(secp256k1_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL));
+ CHECK(secp256k1_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL));
CHECK(secp256k1_keypair_xonly_pub(data.ctx, &pk, NULL, keypair));
CHECK(secp256k1_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1);
}
diff --git a/src/secp256k1/src/bench_sign.c b/src/secp256k1/src/bench_sign.c
index 933f367c4b..f659c18c92 100644
--- a/src/secp256k1/src/bench_sign.c
+++ b/src/secp256k1/src/bench_sign.c
@@ -4,7 +4,7 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
-#include "include/secp256k1.h"
+#include "../include/secp256k1.h"
#include "util.h"
#include "bench.h"
diff --git a/src/secp256k1/src/bench_verify.c b/src/secp256k1/src/bench_verify.c
index c56aefd369..565ae4beec 100644
--- a/src/secp256k1/src/bench_verify.c
+++ b/src/secp256k1/src/bench_verify.c
@@ -7,7 +7,7 @@
#include <stdio.h>
#include <string.h>
-#include "include/secp256k1.h"
+#include "../include/secp256k1.h"
#include "util.h"
#include "bench.h"
diff --git a/src/secp256k1/src/ecdsa_impl.h b/src/secp256k1/src/ecdsa_impl.h
index 156a33d112..c32141e887 100644
--- a/src/secp256k1/src/ecdsa_impl.h
+++ b/src/secp256k1/src/ecdsa_impl.h
@@ -140,7 +140,7 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char
overflow = 1;
}
if (!overflow) {
- memcpy(ra + 32 - rlen, *sig, rlen);
+ if (rlen) memcpy(ra + 32 - rlen, *sig, rlen);
secp256k1_scalar_set_b32(r, ra, &overflow);
}
if (overflow) {
diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h
index 7ab617e20e..84537bbfed 100644
--- a/src/secp256k1/src/ecmult.h
+++ b/src/secp256k1/src/ecmult.h
@@ -17,7 +17,6 @@ typedef struct {
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
} secp256k1_ecmult_context;
-static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx);
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc);
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src);
diff --git a/src/secp256k1/src/ecmult_gen.h b/src/secp256k1/src/ecmult_gen.h
index 539618dcbb..05cf4d52cc 100644
--- a/src/secp256k1/src/ecmult_gen.h
+++ b/src/secp256k1/src/ecmult_gen.h
@@ -35,7 +35,6 @@ typedef struct {
secp256k1_gej initial;
} secp256k1_ecmult_gen_context;
-static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx);
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc);
static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src);
diff --git a/src/secp256k1/src/gen_context.c b/src/secp256k1/src/gen_context.c
index 024c557261..f9176eb996 100644
--- a/src/secp256k1/src/gen_context.c
+++ b/src/secp256k1/src/gen_context.c
@@ -13,7 +13,13 @@
/* We can't require the precomputed tables when creating them. */
#undef USE_ECMULT_STATIC_PRECOMPUTATION
-#include "include/secp256k1.h"
+/* In principle we could use ASM, but this yields only a minor speedup in
+ build time and it's very complicated. In particular when cross-compiling, we'd
+ need to build the ASM for the build and the host machine. */
+#undef USE_EXTERNAL_ASM
+#undef USE_ASM_X86_64
+
+#include "../include/secp256k1.h"
#include "assumptions.h"
#include "util.h"
#include "field_impl.h"
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index 19ebd8f44e..47aea32be1 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -100,8 +100,8 @@ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3;
- r->infinity = a->infinity;
if (a->infinity) {
+ secp256k1_ge_set_infinity(r);
return;
}
secp256k1_fe_inv_var(&a->z, &a->z);
@@ -110,8 +110,7 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe_mul(&a->x, &a->x, &z2);
secp256k1_fe_mul(&a->y, &a->y, &z3);
secp256k1_fe_set_int(&a->z, 1);
- r->x = a->x;
- r->y = a->y;
+ secp256k1_ge_set_xy(r, &a->x, &a->y);
}
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
@@ -120,7 +119,9 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
size_t last_i = SIZE_MAX;
for (i = 0; i < len; i++) {
- if (!a[i].infinity) {
+ if (a[i].infinity) {
+ secp256k1_ge_set_infinity(&r[i]);
+ } else {
/* Use destination's x coordinates as scratch space */
if (last_i == SIZE_MAX) {
r[i].x = a[i].z;
@@ -148,7 +149,6 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
r[last_i].x = u;
for (i = 0; i < len; i++) {
- r[i].infinity = a[i].infinity;
if (!a[i].infinity) {
secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
}
@@ -311,7 +311,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
* point will be gibberish (z = 0 but infinity = 0).
*/
if (a->infinity) {
- r->infinity = 1;
+ secp256k1_gej_set_infinity(r);
if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 1);
}
diff --git a/src/secp256k1/src/modules/ecdh/main_impl.h b/src/secp256k1/src/modules/ecdh/main_impl.h
index 1ac67086be..5408c9de70 100644
--- a/src/secp256k1/src/modules/ecdh/main_impl.h
+++ b/src/secp256k1/src/modules/ecdh/main_impl.h
@@ -7,8 +7,8 @@
#ifndef SECP256K1_MODULE_ECDH_MAIN_H
#define SECP256K1_MODULE_ECDH_MAIN_H
-#include "include/secp256k1_ecdh.h"
-#include "ecmult_const_impl.h"
+#include "../../../include/secp256k1_ecdh.h"
+#include "../../ecmult_const_impl.h"
static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) {
unsigned char version = (y32[31] & 0x01) | 0x02;
diff --git a/src/secp256k1/src/modules/extrakeys/main_impl.h b/src/secp256k1/src/modules/extrakeys/main_impl.h
index 7390b22718..8607bbede7 100644
--- a/src/secp256k1/src/modules/extrakeys/main_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/main_impl.h
@@ -7,8 +7,8 @@
#ifndef SECP256K1_MODULE_EXTRAKEYS_MAIN_H
#define SECP256K1_MODULE_EXTRAKEYS_MAIN_H
-#include "include/secp256k1.h"
-#include "include/secp256k1_extrakeys.h"
+#include "../../../include/secp256k1.h"
+#include "../../../include/secp256k1_extrakeys.h"
static SECP256K1_INLINE int secp256k1_xonly_pubkey_load(const secp256k1_context* ctx, secp256k1_ge *ge, const secp256k1_xonly_pubkey *pubkey) {
return secp256k1_pubkey_load(ctx, ge, (const secp256k1_pubkey *) pubkey);
@@ -55,6 +55,32 @@ int secp256k1_xonly_pubkey_serialize(const secp256k1_context* ctx, unsigned char
return 1;
}
+int secp256k1_xonly_pubkey_cmp(const secp256k1_context* ctx, const secp256k1_xonly_pubkey* pk0, const secp256k1_xonly_pubkey* pk1) {
+ unsigned char out[2][32];
+ const secp256k1_xonly_pubkey* pk[2];
+ int i;
+
+ VERIFY_CHECK(ctx != NULL);
+ pk[0] = pk0; pk[1] = pk1;
+ for (i = 0; i < 2; i++) {
+ /* If the public key is NULL or invalid, xonly_pubkey_serialize will
+ * call the illegal_callback and return 0. In that case we will
+ * serialize the key as all zeros which is less than any valid public
+ * key. This results in consistent comparisons even if NULL or invalid
+ * pubkeys are involved and prevents edge cases such as sorting
+ * algorithms that use this function and do not terminate as a
+ * result. */
+ if (!secp256k1_xonly_pubkey_serialize(ctx, out[i], pk[i])) {
+ /* Note that xonly_pubkey_serialize should already set the output to
+ * zero in that case, but it's not guaranteed by the API, we can't
+ * test it and writing a VERIFY_CHECK is more complex than
+ * explicitly memsetting (again). */
+ memset(out[i], 0, sizeof(out[i]));
+ }
+ }
+ return secp256k1_memcmp_var(out[0], out[1], sizeof(out[1]));
+}
+
/** Keeps a group element as is if it has an even Y and otherwise negates it.
* y_parity is set to 0 in the former case and to 1 in the latter case.
* Requires that the coordinates of r are normalized. */
diff --git a/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
index 0aca4fb72d..d4a2f5bdf4 100644
--- a/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
@@ -8,7 +8,7 @@
#define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H
#include "src/modules/extrakeys/main_impl.h"
-#include "include/secp256k1_extrakeys.h"
+#include "../../../include/secp256k1_extrakeys.h"
static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) {
secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
diff --git a/src/secp256k1/src/modules/extrakeys/tests_impl.h b/src/secp256k1/src/modules/extrakeys/tests_impl.h
index 9473a7dd48..4a5952714c 100644
--- a/src/secp256k1/src/modules/extrakeys/tests_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/tests_impl.h
@@ -7,7 +7,7 @@
#ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_H
#define SECP256K1_MODULE_EXTRAKEYS_TESTS_H
-#include "secp256k1_extrakeys.h"
+#include "../../../include/secp256k1_extrakeys.h"
static secp256k1_context* api_test_context(int flags, int *ecount) {
secp256k1_context *ctx0 = secp256k1_context_create(flags);
@@ -137,6 +137,43 @@ void test_xonly_pubkey(void) {
secp256k1_context_destroy(verify);
}
+void test_xonly_pubkey_comparison(void) {
+ unsigned char pk1_ser[32] = {
+ 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11,
+ 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23
+ };
+ const unsigned char pk2_ser[32] = {
+ 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d,
+ 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c
+ };
+ secp256k1_xonly_pubkey pk1;
+ secp256k1_xonly_pubkey pk2;
+ int ecount = 0;
+ secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount);
+
+ CHECK(secp256k1_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1);
+ CHECK(secp256k1_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1);
+
+ CHECK(secp256k1_xonly_pubkey_cmp(none, NULL, &pk2) < 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, NULL) > 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk2) == 0);
+ CHECK(ecount == 2);
+ memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0);
+ CHECK(ecount == 6);
+
+ secp256k1_context_destroy(none);
+}
+
void test_xonly_pubkey_tweak(void) {
unsigned char zeros64[64] = { 0 };
unsigned char overflows[32];
@@ -540,6 +577,7 @@ void run_extrakeys_tests(void) {
test_xonly_pubkey_tweak();
test_xonly_pubkey_tweak_check();
test_xonly_pubkey_tweak_recursive();
+ test_xonly_pubkey_comparison();
/* keypair tests */
test_keypair();
diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h
index 7a440a729b..9e19f2a2dc 100644
--- a/src/secp256k1/src/modules/recovery/main_impl.h
+++ b/src/secp256k1/src/modules/recovery/main_impl.h
@@ -7,7 +7,7 @@
#ifndef SECP256K1_MODULE_RECOVERY_MAIN_H
#define SECP256K1_MODULE_RECOVERY_MAIN_H
-#include "include/secp256k1_recovery.h"
+#include "../../../include/secp256k1_recovery.h"
static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) {
(void)ctx;
diff --git a/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
index 0ba9409c69..590a972ed3 100644
--- a/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
+++ b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
@@ -8,7 +8,7 @@
#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
#include "src/modules/recovery/main_impl.h"
-#include "include/secp256k1_recovery.h"
+#include "../../../include/secp256k1_recovery.h"
void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k;
diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h
index 22e1b33a5a..693b78f034 100644
--- a/src/secp256k1/src/modules/schnorrsig/main_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h
@@ -7,9 +7,9 @@
#ifndef SECP256K1_MODULE_SCHNORRSIG_MAIN_H
#define SECP256K1_MODULE_SCHNORRSIG_MAIN_H
-#include "include/secp256k1.h"
-#include "include/secp256k1_schnorrsig.h"
-#include "hash.h"
+#include "../../../include/secp256k1.h"
+#include "../../../include/secp256k1_schnorrsig.h"
+#include "../../hash.h"
/* Initializes SHA256 with fixed midstate. This midstate was computed by applying
* SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */
@@ -43,16 +43,18 @@ static void secp256k1_nonce_function_bip340_sha256_tagged_aux(secp256k1_sha256 *
sha->bytes = 64;
}
-/* algo16 argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340
+/* algo argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340
* by using the correct tagged hash function. */
-static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0";
+static const unsigned char bip340_algo[13] = "BIP0340/nonce";
-static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
+static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC;
+
+static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) {
secp256k1_sha256 sha;
unsigned char masked_key[32];
int i;
- if (algo16 == NULL) {
+ if (algo == NULL) {
return 0;
}
@@ -65,18 +67,14 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
}
}
- /* Tag the hash with algo16 which is important to avoid nonce reuse across
+ /* Tag the hash with algo which is important to avoid nonce reuse across
* algorithms. If this nonce function is used in BIP-340 signing as defined
* in the spec, an optimized tagging implementation is used. */
- if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) {
+ if (algolen == sizeof(bip340_algo)
+ && secp256k1_memcmp_var(algo, bip340_algo, algolen) == 0) {
secp256k1_nonce_function_bip340_sha256_tagged(&sha);
} else {
- int algo16_len = 16;
- /* Remove terminating null bytes */
- while (algo16_len > 0 && !algo16[algo16_len - 1]) {
- algo16_len--;
- }
- secp256k1_sha256_initialize_tagged(&sha, algo16, algo16_len);
+ secp256k1_sha256_initialize_tagged(&sha, algo, algolen);
}
/* Hash (masked-)key||pk||msg using the tagged hash as per the spec */
@@ -86,7 +84,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
secp256k1_sha256_write(&sha, key32, 32);
}
secp256k1_sha256_write(&sha, xonly_pk32, 32);
- secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_write(&sha, msg, msglen);
secp256k1_sha256_finalize(&sha, nonce32);
return 1;
}
@@ -108,23 +106,23 @@ static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) {
sha->bytes = 64;
}
-static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
+static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32)
{
unsigned char buf[32];
secp256k1_sha256 sha;
- /* tagged hash(r.x, pk.x, msg32) */
+ /* tagged hash(r.x, pk.x, msg) */
secp256k1_schnorrsig_sha256_tagged(&sha);
secp256k1_sha256_write(&sha, r32, 32);
secp256k1_sha256_write(&sha, pubkey32, 32);
- secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_write(&sha, msg, msglen);
secp256k1_sha256_finalize(&sha, buf);
/* Set scalar e to the challenge hash modulo the curve order as per
* BIP340. */
secp256k1_scalar_set_b32(e, buf, NULL);
}
-int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) {
+int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) {
secp256k1_scalar sk;
secp256k1_scalar e;
secp256k1_scalar k;
@@ -139,7 +137,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(sig64 != NULL);
- ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(msg != NULL || msglen == 0);
ARG_CHECK(keypair != NULL);
if (noncefp == NULL) {
@@ -156,7 +154,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_scalar_get_b32(seckey, &sk);
secp256k1_fe_get_b32(pk_buf, &pk.x);
- ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata);
+ ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata);
secp256k1_scalar_set_b32(&k, buf, NULL);
ret &= !secp256k1_scalar_is_zero(&k);
secp256k1_scalar_cmov(&k, &secp256k1_scalar_one, !ret);
@@ -174,7 +172,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_fe_normalize_var(&r.x);
secp256k1_fe_get_b32(&sig64[0], &r.x);
- secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf);
secp256k1_scalar_mul(&e, &e, &sk);
secp256k1_scalar_add(&e, &e, &k);
secp256k1_scalar_get_b32(&sig64[32], &e);
@@ -187,7 +185,26 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
return ret;
}
-int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_xonly_pubkey *pubkey) {
+int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, unsigned char *aux_rand32) {
+ return secp256k1_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, secp256k1_nonce_function_bip340, aux_rand32);
+}
+
+int secp256k1_schnorrsig_sign_custom(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_keypair *keypair, secp256k1_schnorrsig_extraparams *extraparams) {
+ secp256k1_nonce_function_hardened noncefp = NULL;
+ void *ndata = NULL;
+ VERIFY_CHECK(ctx != NULL);
+
+ if (extraparams != NULL) {
+ ARG_CHECK(secp256k1_memcmp_var(extraparams->magic,
+ schnorrsig_extraparams_magic,
+ sizeof(extraparams->magic)) == 0);
+ noncefp = extraparams->noncefp;
+ ndata = extraparams->ndata;
+ }
+ return secp256k1_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata);
+}
+
+int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_xonly_pubkey *pubkey) {
secp256k1_scalar s;
secp256k1_scalar e;
secp256k1_gej rj;
@@ -201,7 +218,7 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
ARG_CHECK(sig64 != NULL);
- ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(msg != NULL || msglen == 0);
ARG_CHECK(pubkey != NULL);
if (!secp256k1_fe_set_b32(&rx, &sig64[0])) {
@@ -219,7 +236,7 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
/* Compute e. */
secp256k1_fe_get_b32(buf, &pk.x);
- secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf);
/* Compute rj = s*G + (-e)*pkj */
secp256k1_scalar_negate(&e, &e);
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
index b4a428729f..d8df9dd2df 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
@@ -7,7 +7,7 @@
#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H
#define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H
-#include "include/secp256k1_schnorrsig.h"
+#include "../../../include/secp256k1_schnorrsig.h"
#include "src/modules/schnorrsig/main_impl.h"
static const unsigned char invalid_pubkey_bytes[][32] = {
@@ -58,15 +58,19 @@ static const unsigned char invalid_pubkey_bytes[][32] = {
#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0]))
-static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg,
+ size_t msglen,
const unsigned char *key32, const unsigned char *xonly_pk32,
- const unsigned char *algo16, void* data) {
+ const unsigned char *algo, size_t algolen,
+ void* data) {
secp256k1_scalar s;
int *idata = data;
- (void)msg32;
+ (void)msg;
+ (void)msglen;
(void)key32;
(void)xonly_pk32;
- (void)algo16;
+ (void)algo;
+ (void)algolen;
secp256k1_scalar_set_int(&s, *idata);
secp256k1_scalar_get_b32(nonce32, &s);
return 1;
@@ -101,7 +105,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
secp256k1_scalar e;
unsigned char msg32[32];
secp256k1_testrand256(msg32);
- secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32);
+ secp256k1_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
/* Iterate over the possible valid last 32 bytes in the signature.
@@ -119,7 +123,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
secp256k1_testrand256(sig64 + 32);
expect_valid = 0;
}
- valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
+ valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]);
CHECK(valid == expect_valid);
count_valid += valid;
}
@@ -137,6 +141,8 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) {
int d, k;
uint64_t iter = 0;
+ secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
+
/* Loop over keys. */
for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) {
int actual_d = d;
@@ -149,19 +155,21 @@ static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsign
unsigned char sig64[64];
int actual_k = k;
if (skip_section(&iter)) continue;
+ extraparams.noncefp = secp256k1_hardened_nonce_function_smallint;
+ extraparams.ndata = &k;
if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k;
/* Generate random messages until all challenges have been tried. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar e;
secp256k1_testrand256(msg32);
- secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
+ secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
unsigned char expected_s_bytes[32];
secp256k1_scalar_get_b32(expected_s_bytes, &expected_s);
/* Invoke the real function to construct a signature. */
- CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k));
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams));
/* The first 32 bytes must match the xonly pubkey for the specified k. */
CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
/* The last 32 bytes must match the expected s value. */
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
index 338462fc9d..59357afa99 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
@@ -7,16 +7,16 @@
#ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_H
#define SECP256K1_MODULE_SCHNORRSIG_TESTS_H
-#include "secp256k1_schnorrsig.h"
+#include "../../../include/secp256k1_schnorrsig.h"
/* Checks that a bit flip in the n_flip-th argument (that has n_bytes many
* bytes) changes the hash function
*/
-void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) {
+void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) {
unsigned char nonces[2][32];
- CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1);
+ CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1);
secp256k1_testrand_flip(args[n_flip], n_bytes);
- CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1);
+ CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1);
CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0);
}
@@ -34,11 +34,13 @@ void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2)
void run_nonce_function_bip340_tests(void) {
unsigned char tag[13] = "BIP0340/nonce";
unsigned char aux_tag[11] = "BIP0340/aux";
- unsigned char algo16[16] = "BIP0340/nonce\0\0\0";
+ unsigned char algo[13] = "BIP0340/nonce";
+ size_t algolen = sizeof(algo);
secp256k1_sha256 sha;
secp256k1_sha256 sha_optimized;
unsigned char nonce[32];
unsigned char msg[32];
+ size_t msglen = sizeof(msg);
unsigned char key[32];
unsigned char pk[32];
unsigned char aux_rand[32];
@@ -68,33 +70,45 @@ void run_nonce_function_bip340_tests(void) {
args[0] = msg;
args[1] = key;
args[2] = pk;
- args[3] = algo16;
+ args[3] = algo;
args[4] = aux_rand;
for (i = 0; i < count; i++) {
- nonce_function_bip340_bitflip(args, 0, 32);
- nonce_function_bip340_bitflip(args, 1, 32);
- nonce_function_bip340_bitflip(args, 2, 32);
- /* Flip algo16 special case "BIP0340/nonce" */
- nonce_function_bip340_bitflip(args, 3, 16);
- /* Flip algo16 again */
- nonce_function_bip340_bitflip(args, 3, 16);
- nonce_function_bip340_bitflip(args, 4, 32);
+ nonce_function_bip340_bitflip(args, 0, 32, msglen, algolen);
+ nonce_function_bip340_bitflip(args, 1, 32, msglen, algolen);
+ nonce_function_bip340_bitflip(args, 2, 32, msglen, algolen);
+ /* Flip algo special case "BIP0340/nonce" */
+ nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen);
+ /* Flip algo again */
+ nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen);
+ nonce_function_bip340_bitflip(args, 4, 32, msglen, algolen);
}
- /* NULL algo16 is disallowed */
- CHECK(nonce_function_bip340(nonce, msg, key, pk, NULL, NULL) == 0);
- /* Empty algo16 is fine */
- memset(algo16, 0x00, 16);
- CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
- /* algo16 with terminating null bytes is fine */
- algo16[1] = 65;
- CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
- /* Other algo16 is fine */
- memset(algo16, 0xFF, 16);
- CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+ /* NULL algo is disallowed */
+ CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0);
+ CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1);
+ /* Other algo is fine */
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, algo, algolen);
+ CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1);
+
+ for (i = 0; i < count; i++) {
+ unsigned char nonce2[32];
+ uint32_t offset = secp256k1_testrand_int(msglen - 1);
+ size_t msglen_tmp = (msglen + offset) % msglen;
+ size_t algolen_tmp;
+
+ /* Different msglen gives different nonce */
+ CHECK(nonce_function_bip340(nonce2, msg, msglen_tmp, key, pk, algo, algolen, NULL) == 1);
+ CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
+
+ /* Different algolen gives different nonce */
+ offset = secp256k1_testrand_int(algolen - 1);
+ algolen_tmp = (algolen + offset) % algolen;
+ CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1);
+ CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
+ }
/* NULL aux_rand argument is allowed. */
- CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1);
+ CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1);
}
void test_schnorrsig_api(void) {
@@ -103,10 +117,12 @@ void test_schnorrsig_api(void) {
unsigned char sk3[32];
unsigned char msg[32];
secp256k1_keypair keypairs[3];
- secp256k1_keypair invalid_keypair = { 0 };
+ secp256k1_keypair invalid_keypair = {{ 0 }};
secp256k1_xonly_pubkey pk[3];
secp256k1_xonly_pubkey zero_pk;
unsigned char sig[64];
+ secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
+ secp256k1_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL};
/** setup **/
secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
@@ -138,36 +154,60 @@ void test_schnorrsig_api(void) {
/** main test body **/
ecount = 0;
- CHECK(secp256k1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, NULL, NULL) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL) == 0);
+ CHECK(ecount == 6);
+
+ ecount = 0;
+ CHECK(secp256k1_schnorrsig_sign_custom(none, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 1);
- CHECK(secp256k1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign_custom(vrfy, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 2);
- CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1);
CHECK(ecount == 2);
- CHECK(secp256k1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 3);
- CHECK(secp256k1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 4);
- CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, NULL, 0, &keypairs[0], &extraparams) == 1);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), NULL, &extraparams) == 0);
CHECK(ecount == 5);
- CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1);
CHECK(ecount == 6);
+ CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0);
+ CHECK(ecount == 7);
ecount = 0;
- CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1);
- CHECK(secp256k1_schnorrsig_verify(none, sig, msg, &pk[0]) == 0);
+ CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1);
+ CHECK(secp256k1_schnorrsig_verify(none, sig, msg, sizeof(msg), &pk[0]) == 0);
CHECK(ecount == 1);
- CHECK(secp256k1_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0);
+ CHECK(secp256k1_schnorrsig_verify(sign, sig, msg, sizeof(msg), &pk[0]) == 0);
CHECK(ecount == 2);
- CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &pk[0]) == 1);
CHECK(ecount == 2);
- CHECK(secp256k1_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, NULL, msg, sizeof(msg), &pk[0]) == 0);
CHECK(ecount == 3);
- CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, sizeof(msg), &pk[0]) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, 0, &pk[0]) == 0);
CHECK(ecount == 4);
- CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, NULL) == 0);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), NULL) == 0);
CHECK(ecount == 5);
- CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0);
+ CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &zero_pk) == 0);
CHECK(ecount == 6);
secp256k1_context_destroy(none);
@@ -179,7 +219,7 @@ void test_schnorrsig_api(void) {
/* Checks that hash initialized by secp256k1_schnorrsig_sha256_tagged has the
* expected state. */
void test_schnorrsig_sha256_tagged(void) {
- char tag[17] = "BIP0340/challenge";
+ unsigned char tag[17] = "BIP0340/challenge";
secp256k1_sha256 sha;
secp256k1_sha256 sha_optimized;
@@ -190,19 +230,19 @@ void test_schnorrsig_sha256_tagged(void) {
/* Helper function for schnorrsig_bip_vectors
* Signs the message and checks that it's the same as expected_sig. */
-void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) {
+void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg32, const unsigned char *expected_sig) {
unsigned char sig[64];
secp256k1_keypair keypair;
secp256k1_xonly_pubkey pk, pk_expected;
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg32, &keypair, aux_rand));
CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0);
CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
- CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg32, 32, &pk));
}
/* Helper function for schnorrsig_bip_vectors
@@ -211,7 +251,7 @@ void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized
secp256k1_xonly_pubkey pk;
CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk, pk_serialized));
- CHECK(expected == secp256k1_schnorrsig_verify(ctx, sig, msg32, &pk));
+ CHECK(expected == secp256k1_schnorrsig_verify(ctx, sig, msg32, 32, &pk));
}
/* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See
@@ -634,22 +674,26 @@ void test_schnorrsig_bip_vectors(void) {
}
/* Nonce function that returns constant 0 */
-static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
- (void) msg32;
+static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) {
+ (void) msg;
+ (void) msglen;
(void) key32;
(void) xonly_pk32;
- (void) algo16;
+ (void) algo;
+ (void) algolen;
(void) data;
(void) nonce32;
return 0;
}
/* Nonce function that sets nonce to 0 */
-static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
- (void) msg32;
+static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) {
+ (void) msg;
+ (void) msglen;
(void) key32;
(void) xonly_pk32;
- (void) algo16;
+ (void) algo;
+ (void) algolen;
(void) data;
memset(nonce32, 0, 32);
@@ -657,11 +701,13 @@ static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32,
}
/* Nonce function that sets nonce to 0xFF...0xFF */
-static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) {
- (void) msg32;
+static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) {
+ (void) msg;
+ (void) msglen;
(void) key32;
(void) xonly_pk32;
- (void) algo16;
+ (void) algo;
+ (void) algolen;
(void) data;
memset(nonce32, 0xFF, 32);
@@ -670,24 +716,45 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha
void test_schnorrsig_sign(void) {
unsigned char sk[32];
+ secp256k1_xonly_pubkey pk;
secp256k1_keypair keypair;
const unsigned char msg[32] = "this is a msg for a schnorrsig..";
unsigned char sig[64];
+ unsigned char sig2[64];
unsigned char zeros64[64] = { 0 };
+ secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
+ unsigned char aux_rand[32];
secp256k1_testrand256(sk);
+ secp256k1_testrand256(aux_rand);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk));
/* Test different nonce functions */
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk));
memset(sig, 1, sizeof(sig));
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
+ extraparams.noncefp = nonce_function_failing;
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0);
CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
memset(&sig, 1, sizeof(sig));
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
+ extraparams.noncefp = nonce_function_0;
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0);
CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
- CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
+ memset(&sig, 1, sizeof(sig));
+ extraparams.noncefp = nonce_function_overflowing;
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk));
+
+ /* When using the default nonce function, schnorrsig_sign_custom produces
+ * the same result as schnorrsig_sign with aux_rand = extraparams.ndata */
+ extraparams.noncefp = NULL;
+ extraparams.ndata = aux_rand;
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1);
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig2, msg, &keypair, extraparams.ndata) == 1);
+ CHECK(secp256k1_memcmp_var(sig, sig2, sizeof(sig)) == 0);
}
#define N_SIGS 3
@@ -709,8 +776,8 @@ void test_schnorrsig_sign_verify(void) {
for (i = 0; i < N_SIGS; i++) {
secp256k1_testrand256(msg[i]);
- CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
- CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], sizeof(msg[i]), &pk));
}
{
@@ -720,36 +787,54 @@ void test_schnorrsig_sign_verify(void) {
size_t byte_idx = secp256k1_testrand_int(32);
unsigned char xorbyte = secp256k1_testrand_int(254)+1;
sig[sig_idx][byte_idx] ^= xorbyte;
- CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
sig[sig_idx][byte_idx] ^= xorbyte;
byte_idx = secp256k1_testrand_int(32);
sig[sig_idx][32+byte_idx] ^= xorbyte;
- CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
sig[sig_idx][32+byte_idx] ^= xorbyte;
byte_idx = secp256k1_testrand_int(32);
msg[sig_idx][byte_idx] ^= xorbyte;
- CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
msg[sig_idx][byte_idx] ^= xorbyte;
/* Check that above bitflips have been reversed correctly */
- CHECK(secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
}
/* Test overflowing s */
- CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
- CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk));
memset(&sig[0][32], 0xFF, 32);
- CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk));
/* Test negative s */
- CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL));
- CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL));
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk));
secp256k1_scalar_set_b32(&s, &sig[0][32], NULL);
secp256k1_scalar_negate(&s, &s);
secp256k1_scalar_get_b32(&sig[0][32], &s);
- CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk));
+ CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk));
+
+ /* The empty message can be signed & verified */
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig[0], NULL, 0, &keypair, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], NULL, 0, &pk) == 1);
+
+ {
+ /* Test varying message lengths */
+ unsigned char msg_large[32 * 8];
+ uint32_t msglen = secp256k1_testrand_int(sizeof(msg_large));
+ for (i = 0; i < sizeof(msg_large); i += 32) {
+ secp256k1_testrand256(&msg_large[i]);
+ }
+ CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig[0], msg_large, msglen, &keypair, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 1);
+ /* Verification for a random wrong message length fails */
+ msglen = (msglen + (sizeof(msg_large) - 1)) % sizeof(msg_large);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 0);
+ }
}
#undef N_SIGS
@@ -777,10 +862,10 @@ void test_schnorrsig_taproot(void) {
/* Key spend */
secp256k1_testrand256(msg);
- CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1);
/* Verify key spend */
CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);
- CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1);
+ CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &output_pk) == 1);
/* Script spend */
CHECK(secp256k1_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1);
diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c
index aef3f99ac3..9908cab864 100644
--- a/src/secp256k1/src/secp256k1.c
+++ b/src/secp256k1/src/secp256k1.c
@@ -4,8 +4,10 @@
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
-#include "include/secp256k1.h"
-#include "include/secp256k1_preallocated.h"
+#define SECP256K1_BUILD
+
+#include "../include/secp256k1.h"
+#include "../include/secp256k1_preallocated.h"
#include "assumptions.h"
#include "util.h"
@@ -21,6 +23,10 @@
#include "scratch_impl.h"
#include "selftest.h"
+#ifdef SECP256K1_NO_BUILD
+# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c"
+#endif
+
#if defined(VALGRIND)
# include <valgrind/memcheck.h>
#endif
@@ -316,6 +322,32 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o
return ret;
}
+int secp256k1_ec_pubkey_cmp(const secp256k1_context* ctx, const secp256k1_pubkey* pubkey0, const secp256k1_pubkey* pubkey1) {
+ unsigned char out[2][33];
+ const secp256k1_pubkey* pk[2];
+ int i;
+
+ VERIFY_CHECK(ctx != NULL);
+ pk[0] = pubkey0; pk[1] = pubkey1;
+ for (i = 0; i < 2; i++) {
+ size_t out_size = sizeof(out[i]);
+ /* If the public key is NULL or invalid, ec_pubkey_serialize will call
+ * the illegal_callback and return 0. In that case we will serialize the
+ * key as all zeros which is less than any valid public key. This
+ * results in consistent comparisons even if NULL or invalid pubkeys are
+ * involved and prevents edge cases such as sorting algorithms that use
+ * this function and do not terminate as a result. */
+ if (!secp256k1_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) {
+ /* Note that ec_pubkey_serialize should already set the output to
+ * zero in that case, but it's not guaranteed by the API, we can't
+ * test it and writing a VERIFY_CHECK is more complex than
+ * explicitly memsetting (again). */
+ memset(out[i], 0, sizeof(out[i]));
+ }
+ }
+ return secp256k1_memcmp_var(out[0], out[1], sizeof(out[0]));
+}
+
static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) {
(void)ctx;
if (sizeof(secp256k1_scalar) == 32) {
@@ -758,6 +790,19 @@ int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *
return 1;
}
+int secp256k1_tagged_sha256(const secp256k1_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) {
+ secp256k1_sha256 sha;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(hash32 != NULL);
+ ARG_CHECK(tag != NULL);
+ ARG_CHECK(msg != NULL);
+
+ secp256k1_sha256_initialize_tagged(&sha, tag, taglen);
+ secp256k1_sha256_write(&sha, msg, msglen);
+ secp256k1_sha256_finalize(&sha, hash32);
+ return 1;
+}
+
#ifdef ENABLE_MODULE_ECDH
# include "modules/ecdh/main_impl.h"
#endif
diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h
index e643778f36..c8d30ef6a8 100644
--- a/src/secp256k1/src/testrand_impl.h
+++ b/src/secp256k1/src/testrand_impl.h
@@ -127,7 +127,7 @@ static void secp256k1_testrand_init(const char* hexseed) {
pos++;
}
} else {
- FILE *frand = fopen("/dev/urandom", "r");
+ FILE *frand = fopen("/dev/urandom", "rb");
if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
uint64_t t = time(NULL) * (uint64_t)1337;
fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
index a146394305..99d9468e29 100644
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -15,8 +15,8 @@
#include <time.h>
#include "secp256k1.c"
-#include "include/secp256k1.h"
-#include "include/secp256k1_preallocated.h"
+#include "../include/secp256k1.h"
+#include "../include/secp256k1_preallocated.h"
#include "testrand_impl.h"
#include "util.h"
@@ -30,8 +30,8 @@ void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps)
# endif
#endif
-#include "contrib/lax_der_parsing.c"
-#include "contrib/lax_der_privatekey_parsing.c"
+#include "../contrib/lax_der_parsing.c"
+#include "../contrib/lax_der_privatekey_parsing.c"
#include "modinv32_impl.h"
#ifdef SECP256K1_WIDEMUL_INT128
@@ -564,6 +564,38 @@ void run_rfc6979_hmac_sha256_tests(void) {
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
}
+void run_tagged_sha256_tests(void) {
+ int ecount = 0;
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ unsigned char tag[32] = { 0 };
+ unsigned char msg[32] = { 0 };
+ unsigned char hash32[32];
+ unsigned char hash_expected[32] = {
+ 0x04, 0x7A, 0x5E, 0x17, 0xB5, 0x86, 0x47, 0xC1,
+ 0x3C, 0xC6, 0xEB, 0xC0, 0xAA, 0x58, 0x3B, 0x62,
+ 0xFB, 0x16, 0x43, 0x32, 0x68, 0x77, 0x40, 0x6C,
+ 0xE2, 0x76, 0x55, 0x9A, 0x3B, 0xDE, 0x55, 0xB3
+ };
+
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+
+ /* API test */
+ CHECK(secp256k1_tagged_sha256(none, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1);
+ CHECK(secp256k1_tagged_sha256(none, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_tagged_sha256(none, hash32, NULL, 0, msg, sizeof(msg)) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_tagged_sha256(none, hash32, tag, sizeof(tag), NULL, 0) == 0);
+ CHECK(ecount == 3);
+
+ /* Static test vector */
+ memcpy(tag, "tag", 3);
+ memcpy(msg, "msg", 3);
+ CHECK(secp256k1_tagged_sha256(none, hash32, tag, 3, msg, 3) == 1);
+ CHECK(secp256k1_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0);
+ secp256k1_context_destroy(none);
+}
+
/***** RANDOM TESTS *****/
void test_rand_bits(int rand32, int bits) {
@@ -2508,6 +2540,70 @@ void run_field_misc(void) {
}
}
+void test_fe_mul(const secp256k1_fe* a, const secp256k1_fe* b, int use_sqr)
+{
+ secp256k1_fe c, an, bn;
+ /* Variables in BE 32-byte format. */
+ unsigned char a32[32], b32[32], c32[32];
+ /* Variables in LE 16x uint16_t format. */
+ uint16_t a16[16], b16[16], c16[16];
+ /* Field modulus in LE 16x uint16_t format. */
+ static const uint16_t m16[16] = {
+ 0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ };
+ uint16_t t16[32];
+ int i;
+
+ /* Compute C = A * B in fe format. */
+ c = *a;
+ if (use_sqr) {
+ secp256k1_fe_sqr(&c, &c);
+ } else {
+ secp256k1_fe_mul(&c, &c, b);
+ }
+
+ /* Convert A, B, C into LE 16x uint16_t format. */
+ an = *a;
+ bn = *b;
+ secp256k1_fe_normalize_var(&c);
+ secp256k1_fe_normalize_var(&an);
+ secp256k1_fe_normalize_var(&bn);
+ secp256k1_fe_get_b32(a32, &an);
+ secp256k1_fe_get_b32(b32, &bn);
+ secp256k1_fe_get_b32(c32, &c);
+ for (i = 0; i < 16; ++i) {
+ a16[i] = a32[31 - 2*i] + ((uint16_t)a32[30 - 2*i] << 8);
+ b16[i] = b32[31 - 2*i] + ((uint16_t)b32[30 - 2*i] << 8);
+ c16[i] = c32[31 - 2*i] + ((uint16_t)c32[30 - 2*i] << 8);
+ }
+ /* Compute T = A * B in LE 16x uint16_t format. */
+ mulmod256(t16, a16, b16, m16);
+ /* Compare */
+ CHECK(secp256k1_memcmp_var(t16, c16, 32) == 0);
+}
+
+void run_fe_mul(void) {
+ int i;
+ for (i = 0; i < 100 * count; ++i) {
+ secp256k1_fe a, b, c, d;
+ random_fe(&a);
+ random_field_element_magnitude(&a);
+ random_fe(&b);
+ random_field_element_magnitude(&b);
+ random_fe_test(&c);
+ random_field_element_magnitude(&c);
+ random_fe_test(&d);
+ random_field_element_magnitude(&d);
+ test_fe_mul(&a, &a, 1);
+ test_fe_mul(&c, &c, 1);
+ test_fe_mul(&a, &b, 0);
+ test_fe_mul(&a, &c, 0);
+ test_fe_mul(&c, &b, 0);
+ test_fe_mul(&c, &d, 0);
+ }
+}
+
void run_sqr(void) {
secp256k1_fe x, s;
@@ -2595,7 +2691,7 @@ void test_inverse_scalar(secp256k1_scalar* out, const secp256k1_scalar* x, int v
{
secp256k1_scalar l, r, t;
- (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&l, x); /* l = 1/x */
+ (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&l, x); /* l = 1/x */
if (out) *out = l;
if (secp256k1_scalar_is_zero(x)) {
CHECK(secp256k1_scalar_is_zero(&l));
@@ -2605,9 +2701,9 @@ void test_inverse_scalar(secp256k1_scalar* out, const secp256k1_scalar* x, int v
CHECK(secp256k1_scalar_is_one(&t)); /* x*(1/x) == 1 */
secp256k1_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */
if (secp256k1_scalar_is_zero(&r)) return;
- (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&r, &r); /* r = 1/(x-1) */
+ (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&r, &r); /* r = 1/(x-1) */
secp256k1_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */
- (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&l, &l); /* l = 1/(1/x-1) */
+ (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */
secp256k1_scalar_add(&l, &l, &secp256k1_scalar_one); /* l = 1/(1/x-1)+1 */
secp256k1_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */
CHECK(secp256k1_scalar_is_zero(&l)); /* l == 0 */
@@ -3101,20 +3197,34 @@ void test_ge(void) {
/* Test batch gej -> ge conversion with many infinities. */
for (i = 0; i < 4 * runs + 1; i++) {
+ int odd;
random_group_element_test(&ge[i]);
+ odd = secp256k1_fe_is_odd(&ge[i].x);
+ CHECK(odd == 0 || odd == 1);
/* randomly set half the points to infinity */
- if(secp256k1_fe_is_odd(&ge[i].x)) {
+ if (odd == i % 2) {
secp256k1_ge_set_infinity(&ge[i]);
}
secp256k1_gej_set_ge(&gej[i], &ge[i]);
}
- /* batch invert */
+ /* batch convert */
secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1);
/* check result */
for (i = 0; i < 4 * runs + 1; i++) {
ge_equals_gej(&ge[i], &gej[i]);
}
+ /* Test batch gej -> ge conversion with all infinities. */
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_gej_set_infinity(&gej[i]);
+ }
+ /* batch convert */
+ secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1);
+ /* check result */
+ for (i = 0; i < 4 * runs + 1; i++) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i]));
+ }
+
free(ge);
free(gej);
}
@@ -5434,6 +5544,55 @@ void test_random_pubkeys(void) {
}
}
+void run_pubkey_comparison(void) {
+ unsigned char pk1_ser[33] = {
+ 0x02,
+ 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11,
+ 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23
+ };
+ const unsigned char pk2_ser[33] = {
+ 0x02,
+ 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d,
+ 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c
+ };
+ secp256k1_pubkey pk1;
+ secp256k1_pubkey pk2;
+ int32_t ecount = 0;
+
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1);
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1);
+
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, NULL, &pk2) < 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, NULL) > 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0);
+ CHECK(ecount == 2);
+ {
+ secp256k1_pubkey pk_tmp;
+ memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0);
+ CHECK(ecount == 6);
+ }
+
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+
+ /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in
+ * an uncompressed encoding, these would have the opposite ordering */
+ pk1_ser[0] = 3;
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0);
+ CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0);
+}
+
void run_random_pubkeys(void) {
int i;
for (i = 0; i < 10*count; i++) {
@@ -6408,7 +6567,7 @@ int main(int argc, char **argv) {
count = strtol(argv[1], NULL, 0);
} else {
const char* env = getenv("SECP256K1_TEST_ITERS");
- if (env) {
+ if (env && strlen(env) > 0) {
count = strtol(env, NULL, 0);
}
}
@@ -6442,6 +6601,7 @@ int main(int argc, char **argv) {
run_sha256_tests();
run_hmac_sha256_tests();
run_rfc6979_hmac_sha256_tests();
+ run_tagged_sha256_tests();
/* scalar tests */
run_scalar_tests();
@@ -6449,6 +6609,7 @@ int main(int argc, char **argv) {
/* field tests */
run_field_misc();
run_field_convert();
+ run_fe_mul();
run_sqr();
run_sqrt();
@@ -6485,6 +6646,7 @@ int main(int argc, char **argv) {
#endif
/* ecdsa tests */
+ run_pubkey_comparison();
run_random_pubkeys();
run_ecdsa_der_parse();
run_ecdsa_sign_verify();
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
index 2bb5381446..5b9a3035d9 100644
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -10,7 +10,6 @@
#include <stdio.h>
#include <stdlib.h>
-
#include <time.h>
#undef USE_ECMULT_STATIC_PRECOMPUTATION
@@ -20,10 +19,10 @@
#define EXHAUSTIVE_TEST_ORDER 13
#endif
-#include "include/secp256k1.h"
+#include "secp256k1.c"
+#include "../include/secp256k1.h"
#include "assumptions.h"
#include "group.h"
-#include "secp256k1.c"
#include "testrand_impl.h"
static int count = 2;
@@ -303,6 +302,7 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
if (skip_section(&iter)) continue;
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
+ int ret;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
unsigned char sk32[32], msg32[32];
@@ -311,7 +311,8 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
secp256k1_scalar_get_b32(sk32, &sk);
secp256k1_scalar_get_b32(msg32, &msg);
- secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+ ret = secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+ CHECK(ret == 1);
secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
/* Note that we compute expected_r *after* signing -- this is important
diff --git a/src/secp256k1/src/valgrind_ctime_test.c b/src/secp256k1/src/valgrind_ctime_test.c
index cfca5a196e..ea6d4b3deb 100644
--- a/src/secp256k1/src/valgrind_ctime_test.c
+++ b/src/secp256k1/src/valgrind_ctime_test.c
@@ -7,24 +7,24 @@
#include <valgrind/memcheck.h>
#include <stdio.h>
-#include "include/secp256k1.h"
+#include "../include/secp256k1.h"
#include "assumptions.h"
#include "util.h"
#ifdef ENABLE_MODULE_ECDH
-# include "include/secp256k1_ecdh.h"
+# include "../include/secp256k1_ecdh.h"
#endif
#ifdef ENABLE_MODULE_RECOVERY
-# include "include/secp256k1_recovery.h"
+# include "../include/secp256k1_recovery.h"
#endif
#ifdef ENABLE_MODULE_EXTRAKEYS
-# include "include/secp256k1_extrakeys.h"
+# include "../include/secp256k1_extrakeys.h"
#endif
#ifdef ENABLE_MODULE_SCHNORRSIG
-#include "include/secp256k1_schnorrsig.h"
+#include "../include/secp256k1_schnorrsig.h"
#endif
void run_tests(secp256k1_context *ctx, unsigned char *key);
@@ -166,7 +166,7 @@ void run_tests(secp256k1_context *ctx, unsigned char *key) {
ret = secp256k1_keypair_create(ctx, &keypair, key);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
CHECK(ret == 1);
- ret = secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL);
+ ret = secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
CHECK(ret == 1);
#endif
diff --git a/src/signet.cpp b/src/signet.cpp
index 1ba8502287..aafd1999ee 100644
--- a/src/signet.cpp
+++ b/src/signet.cpp
@@ -141,7 +141,7 @@ bool CheckSignetBlockSolution(const CBlock& block, const Consensus::Params& cons
PrecomputedTransactionData txdata;
txdata.Init(signet_txs->m_to_sign, {signet_txs->m_to_spend.vout[0]});
- TransactionSignatureChecker sigcheck(&signet_txs->m_to_sign, /*nIn=*/ 0, /*amount=*/ signet_txs->m_to_spend.vout[0].nValue, txdata, MissingDataBehavior::ASSERT_FAIL);
+ TransactionSignatureChecker sigcheck(&signet_txs->m_to_sign, /* nInIn= */ 0, /* amountIn= */ signet_txs->m_to_spend.vout[0].nValue, txdata, MissingDataBehavior::ASSERT_FAIL);
if (!VerifyScript(scriptSig, signet_txs->m_to_spend.vout[0].scriptPubKey, &witness, BLOCK_SCRIPT_VERIFY_FLAGS, sigcheck)) {
LogPrint(BCLog::VALIDATION, "CheckSignetBlockSolution: Errors in block (block solution invalid)\n");
diff --git a/src/sync.cpp b/src/sync.cpp
index a2b62c2286..eace86d9dd 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -9,6 +9,7 @@
#include <sync.h>
#include <logging.h>
+#include <logging/timer.h>
#include <tinyformat.h>
#include <util/strencodings.h>
#include <util/threadnames.h>
@@ -23,16 +24,10 @@
#include <utility>
#include <vector>
-#ifdef DEBUG_LOCKCONTENTION
-#if !defined(HAVE_THREAD_LOCAL)
-static_assert(false, "thread_local is not supported");
-#endif
-void PrintLockContention(const char* pszName, const char* pszFile, int nLine)
+void LockContention(const char* pszName, const char* pszFile, int nLine)
{
- LogPrintf("LOCKCONTENTION: %s\n", pszName);
- LogPrintf("Locker: %s:%d\n", pszFile, nLine);
+ LOG_TIME_MICROS_WITH_CATEGORY(strprintf("%s, %s:%d", pszName, pszFile, nLine), BCLog::LOCK);
}
-#endif /* DEBUG_LOCKCONTENTION */
#ifdef DEBUG_LOCKORDER
//
diff --git a/src/sync.h b/src/sync.h
index 146c228592..bf15c0b4eb 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -126,9 +126,8 @@ using RecursiveMutex = AnnotatedMixin<std::recursive_mutex>;
/** Wrapped mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<std::mutex> Mutex;
-#ifdef DEBUG_LOCKCONTENTION
-void PrintLockContention(const char* pszName, const char* pszFile, int nLine);
-#endif
+/** Prints a lock contention to the log */
+void LockContention(const char* pszName, const char* pszFile, int nLine);
/** Wrapper around std::unique_lock style lock for Mutex. */
template <typename Mutex, typename Base = typename Mutex::UniqueLock>
@@ -138,22 +137,18 @@ private:
void Enter(const char* pszName, const char* pszFile, int nLine)
{
EnterCritical(pszName, pszFile, nLine, Base::mutex());
-#ifdef DEBUG_LOCKCONTENTION
- if (!Base::try_lock()) {
- PrintLockContention(pszName, pszFile, nLine);
-#endif
- Base::lock();
-#ifdef DEBUG_LOCKCONTENTION
- }
-#endif
+ if (Base::try_lock()) return;
+ LockContention(pszName, pszFile, nLine); // log the contention
+ Base::lock();
}
bool TryEnter(const char* pszName, const char* pszFile, int nLine)
{
EnterCritical(pszName, pszFile, nLine, Base::mutex(), true);
Base::try_lock();
- if (!Base::owns_lock())
+ if (!Base::owns_lock()) {
LeaveCritical();
+ }
return Base::owns_lock();
}
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index eb5c37b34d..e1b5df9502 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -1,7 +1,10 @@
// Copyright (c) 2012-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <addrdb.h>
#include <addrman.h>
+#include <chainparams.h>
#include <test/data/asmap.raw.h>
#include <test/util/setup_common.h>
#include <util/asmap.h>
@@ -15,27 +18,73 @@
#include <optional>
#include <string>
+using namespace std::literals;
+
+class CAddrManSerializationMock : public CAddrMan
+{
+public:
+ virtual void Serialize(CDataStream& s) const = 0;
+
+ CAddrManSerializationMock()
+ : CAddrMan(/* asmap */ std::vector<bool>(), /* deterministic */ true, /* consistency_check_ratio */ 100)
+ {}
+};
+
+class CAddrManUncorrupted : public CAddrManSerializationMock
+{
+public:
+ void Serialize(CDataStream& s) const override
+ {
+ CAddrMan::Serialize(s);
+ }
+};
+
+class CAddrManCorrupted : public CAddrManSerializationMock
+{
+public:
+ void Serialize(CDataStream& s) const override
+ {
+ // Produces corrupt output that claims addrman has 20 addrs when it only has one addr.
+ unsigned char nVersion = 1;
+ s << nVersion;
+ s << ((unsigned char)32);
+ s << uint256::ONE;
+ s << 10; // nNew
+ s << 10; // nTried
+
+ int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
+ s << nUBuckets;
+
+ CService serv;
+ BOOST_CHECK(Lookup("252.1.1.1", serv, 7777, false));
+ CAddress addr = CAddress(serv, NODE_NONE);
+ CNetAddr resolved;
+ BOOST_CHECK(LookupHost("252.2.2.2", resolved, false));
+ CAddrInfo info = CAddrInfo(addr, resolved);
+ s << info;
+ }
+};
+
+static CDataStream AddrmanToStream(const CAddrManSerializationMock& _addrman)
+{
+ CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION);
+ ssPeersIn << Params().MessageStart();
+ ssPeersIn << _addrman;
+ std::string str = ssPeersIn.str();
+ std::vector<unsigned char> vchData(str.begin(), str.end());
+ return CDataStream(vchData, SER_DISK, CLIENT_VERSION);
+}
+
class CAddrManTest : public CAddrMan
{
private:
bool deterministic;
public:
explicit CAddrManTest(bool makeDeterministic = true,
- std::vector<bool> asmap = std::vector<bool>())
+ std::vector<bool> asmap = std::vector<bool>())
+ : CAddrMan(asmap, makeDeterministic, /* consistency_check_ratio */ 100)
{
- if (makeDeterministic) {
- // Set addrman addr placement to be deterministic.
- MakeDeterministic();
- }
deterministic = makeDeterministic;
- m_asmap = asmap;
- }
-
- //! Ensure that bucket placement is always the same for testing purposes.
- void MakeDeterministic()
- {
- nKey.SetNull();
- insecure_rand = FastRandomContext(true);
}
CAddrInfo* Find(const CNetAddr& addr, int* pnId = nullptr)
@@ -76,22 +125,12 @@ public:
{
int64_t nLastSuccess = 1;
// Set last good connection in the deep past.
- Good(addr, true, nLastSuccess);
+ Good(addr, nLastSuccess);
bool count_failure = false;
int64_t nLastTry = GetAdjustedTime()-61;
Attempt(addr, count_failure, nLastTry);
}
-
- void Clear()
- {
- CAddrMan::Clear();
- if (deterministic) {
- nKey.SetNull();
- insecure_rand = FastRandomContext(true);
- }
- }
-
};
static CNetAddr ResolveIP(const std::string& ip)
@@ -125,27 +164,27 @@ BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(addrman_simple)
{
- CAddrManTest addrman;
+ auto addrman = std::make_unique<CAddrManTest>();
CNetAddr source = ResolveIP("252.2.2.2");
// Test: Does Addrman respond correctly when empty.
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
- CAddrInfo addr_null = addrman.Select();
+ BOOST_CHECK_EQUAL(addrman->size(), 0U);
+ CAddrInfo addr_null = addrman->Select();
BOOST_CHECK_EQUAL(addr_null.ToString(), "[::]:0");
// Test: Does Addrman::Add work as expected.
CService addr1 = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(addrman.Add(CAddress(addr1, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
- CAddrInfo addr_ret1 = addrman.Select();
+ BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ CAddrInfo addr_ret1 = addrman->Select();
BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
// Test: Does IP address deduplication work correctly.
// Expected dup IP should not be added.
CService addr1_dup = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(!addrman.Add(CAddress(addr1_dup, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
+ BOOST_CHECK(!addrman->Add({CAddress(addr1_dup, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
// Test: New table has one addr and we add a diff addr we should
@@ -155,21 +194,16 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// success.
CService addr2 = ResolveService("250.1.1.2", 8333);
- BOOST_CHECK(addrman.Add(CAddress(addr2, NODE_NONE), source));
- BOOST_CHECK(addrman.size() >= 1);
+ BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->size() >= 1);
- // Test: AddrMan::Clear() should empty the new table.
- addrman.Clear();
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
- CAddrInfo addr_null2 = addrman.Select();
- BOOST_CHECK_EQUAL(addr_null2.ToString(), "[::]:0");
-
- // Test: AddrMan::Add multiple addresses works as expected
+ // Test: reset addrman and test AddrMan::Add multiple addresses works as expected
+ addrman = std::make_unique<CAddrManTest>();
std::vector<CAddress> vAddr;
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
- BOOST_CHECK(addrman.Add(vAddr, source));
- BOOST_CHECK(addrman.size() >= 1);
+ BOOST_CHECK(addrman->Add(vAddr, source));
+ BOOST_CHECK(addrman->size() >= 1);
}
BOOST_AUTO_TEST_CASE(addrman_ports)
@@ -182,11 +216,11 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
// Test 7; Addr with same IP but diff port does not replace existing addr.
CService addr1 = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(addrman.Add(CAddress(addr1, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
CService addr1_port = ResolveService("250.1.1.1", 8334);
- BOOST_CHECK(!addrman.Add(CAddress(addr1_port, NODE_NONE), source));
+ BOOST_CHECK(!addrman.Add({CAddress(addr1_port, NODE_NONE)}, source));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
CAddrInfo addr_ret2 = addrman.Select();
BOOST_CHECK_EQUAL(addr_ret2.ToString(), "250.1.1.1:8333");
@@ -209,7 +243,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
// Test: Select from new with 1 addr in new.
CService addr1 = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(addrman.Add(CAddress(addr1, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
bool newOnly = true;
@@ -233,20 +267,20 @@ BOOST_AUTO_TEST_CASE(addrman_select)
CService addr3 = ResolveService("250.3.2.2", 9999);
CService addr4 = ResolveService("250.3.3.3", 9999);
- BOOST_CHECK(addrman.Add(CAddress(addr2, NODE_NONE), ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Add(CAddress(addr3, NODE_NONE), ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Add(CAddress(addr4, NODE_NONE), ResolveService("250.4.1.1", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr2, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr3, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr4, NODE_NONE)}, ResolveService("250.4.1.1", 8333)));
// Add three addresses to tried table.
CService addr5 = ResolveService("250.4.4.4", 8333);
CService addr6 = ResolveService("250.4.5.5", 7777);
CService addr7 = ResolveService("250.4.6.6", 8333);
- BOOST_CHECK(addrman.Add(CAddress(addr5, NODE_NONE), ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr5, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
addrman.Good(CAddress(addr5, NODE_NONE));
- BOOST_CHECK(addrman.Add(CAddress(addr6, NODE_NONE), ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr6, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
addrman.Good(CAddress(addr6, NODE_NONE));
- BOOST_CHECK(addrman.Add(CAddress(addr7, NODE_NONE), ResolveService("250.1.1.3", 8333)));
+ BOOST_CHECK(addrman.Add({CAddress(addr7, NODE_NONE)}, ResolveService("250.1.1.3", 8333)));
addrman.Good(CAddress(addr7, NODE_NONE));
// Test: 6 addrs + 1 addr from last test = 7.
@@ -266,24 +300,27 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
CNetAddr source = ResolveIP("252.2.2.2");
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
+ uint32_t num_addrs{0};
- for (unsigned int i = 1; i < 18; i++) {
- CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
+
+ while (num_addrs < 22) { // Magic number! 250.1.1.1 - 250.1.1.22 do not collide with deterministic key = 1
+ CService addr = ResolveService("250.1.1." + ToString(++num_addrs));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
//Test: No collision in new table yet.
- BOOST_CHECK_EQUAL(addrman.size(), i);
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
}
//Test: new table collision!
- CService addr1 = ResolveService("250.1.1.18");
- BOOST_CHECK(addrman.Add(CAddress(addr1, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 17U);
-
- CService addr2 = ResolveService("250.1.1.19");
- BOOST_CHECK(addrman.Add(CAddress(addr2, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 18U);
+ CService addr1 = ResolveService("250.1.1." + ToString(++num_addrs));
+ uint32_t collisions{1};
+ BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
+
+ CService addr2 = ResolveService("250.1.1." + ToString(++num_addrs));
+ BOOST_CHECK(addrman.Add({CAddress(addr2, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
}
BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
@@ -292,25 +329,28 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
CNetAddr source = ResolveIP("252.2.2.2");
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
+ uint32_t num_addrs{0};
+
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
- for (unsigned int i = 1; i < 80; i++) {
- CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ while (num_addrs < 64) { // Magic number! 250.1.1.1 - 250.1.1.64 do not collide with deterministic key = 1
+ CService addr = ResolveService("250.1.1." + ToString(++num_addrs));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(CAddress(addr, NODE_NONE));
//Test: No collision in tried table yet.
- BOOST_CHECK_EQUAL(addrman.size(), i);
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
}
//Test: tried table collision!
- CService addr1 = ResolveService("250.1.1.80");
- BOOST_CHECK(addrman.Add(CAddress(addr1, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 79U);
-
- CService addr2 = ResolveService("250.1.1.81");
- BOOST_CHECK(addrman.Add(CAddress(addr2, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 80U);
+ CService addr1 = ResolveService("250.1.1." + ToString(++num_addrs));
+ uint32_t collisions{1};
+ BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
+
+ CService addr2 = ResolveService("250.1.1." + ToString(++num_addrs));
+ BOOST_CHECK(addrman.Add({CAddress(addr2, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
}
BOOST_AUTO_TEST_CASE(addrman_find)
@@ -326,9 +366,9 @@ BOOST_AUTO_TEST_CASE(addrman_find)
CNetAddr source1 = ResolveIP("250.1.2.1");
CNetAddr source2 = ResolveIP("250.1.2.2");
- BOOST_CHECK(addrman.Add(addr1, source1));
- BOOST_CHECK(!addrman.Add(addr2, source2));
- BOOST_CHECK(addrman.Add(addr3, source1));
+ BOOST_CHECK(addrman.Add({addr1}, source1));
+ BOOST_CHECK(!addrman.Add({addr2}, source2));
+ BOOST_CHECK(addrman.Add({addr3}, source1));
// Test: ensure Find returns an IP matching what we searched on.
CAddrInfo* info1 = addrman.Find(addr1);
@@ -410,11 +450,8 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
CNetAddr source2 = ResolveIP("250.2.3.3");
// Test: Ensure GetAddr works with new addresses.
- BOOST_CHECK(addrman.Add(addr1, source1));
- BOOST_CHECK(addrman.Add(addr2, source2));
- BOOST_CHECK(addrman.Add(addr3, source1));
- BOOST_CHECK(addrman.Add(addr4, source2));
- BOOST_CHECK(addrman.Add(addr5, source1));
+ BOOST_CHECK(addrman.Add({addr1, addr3, addr5}, source1));
+ BOOST_CHECK(addrman.Add({addr2, addr4}, source2));
BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0, /* network */ std::nullopt).size(), 5U);
// Net processing asks for 23% of addresses. 23% of 5 is 1 rounded down.
@@ -435,7 +472,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
// Ensure that for all addrs in addrman, isTerrible == false.
addr.nTime = GetAdjustedTime();
- addrman.Add(addr, ResolveIP(strAddr));
+ addrman.Add({addr}, ResolveIP(strAddr));
if (i % 8 == 0)
addrman.Good(addr);
}
@@ -721,23 +758,23 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
{
std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
- CAddrManTest addrman_asmap1(true, asmap1);
- CAddrManTest addrman_asmap1_dup(true, asmap1);
- CAddrManTest addrman_noasmap;
+ auto addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
+ auto addrman_asmap1_dup = std::make_unique<CAddrManTest>(true, asmap1);
+ auto addrman_noasmap = std::make_unique<CAddrManTest>();
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CNetAddr default_source;
- addrman_asmap1.Add(addr, default_source);
+ addrman_asmap1->Add({addr}, default_source);
- stream << addrman_asmap1;
+ stream << *addrman_asmap1;
// serizalizing/deserializing addrman with the same asmap
- stream >> addrman_asmap1_dup;
+ stream >> *addrman_asmap1_dup;
- std::pair<int, int> bucketAndEntry_asmap1 = addrman_asmap1.GetBucketAndEntry(addr);
- std::pair<int, int> bucketAndEntry_asmap1_dup = addrman_asmap1_dup.GetBucketAndEntry(addr);
+ std::pair<int, int> bucketAndEntry_asmap1 = addrman_asmap1->GetBucketAndEntry(addr);
+ std::pair<int, int> bucketAndEntry_asmap1_dup = addrman_asmap1_dup->GetBucketAndEntry(addr);
BOOST_CHECK(bucketAndEntry_asmap1.second != -1);
BOOST_CHECK(bucketAndEntry_asmap1_dup.second != -1);
@@ -745,44 +782,83 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
BOOST_CHECK(bucketAndEntry_asmap1.second == bucketAndEntry_asmap1_dup.second);
// deserializing asmaped peers.dat to non-asmaped addrman
- stream << addrman_asmap1;
- stream >> addrman_noasmap;
- std::pair<int, int> bucketAndEntry_noasmap = addrman_noasmap.GetBucketAndEntry(addr);
+ stream << *addrman_asmap1;
+ stream >> *addrman_noasmap;
+ std::pair<int, int> bucketAndEntry_noasmap = addrman_noasmap->GetBucketAndEntry(addr);
BOOST_CHECK(bucketAndEntry_noasmap.second != -1);
BOOST_CHECK(bucketAndEntry_asmap1.first != bucketAndEntry_noasmap.first);
BOOST_CHECK(bucketAndEntry_asmap1.second != bucketAndEntry_noasmap.second);
// deserializing non-asmaped peers.dat to asmaped addrman
- addrman_asmap1.Clear();
- addrman_noasmap.Clear();
- addrman_noasmap.Add(addr, default_source);
- stream << addrman_noasmap;
- stream >> addrman_asmap1;
- std::pair<int, int> bucketAndEntry_asmap1_deser = addrman_asmap1.GetBucketAndEntry(addr);
+ addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
+ addrman_noasmap = std::make_unique<CAddrManTest>();
+ addrman_noasmap->Add({addr}, default_source);
+ stream << *addrman_noasmap;
+ stream >> *addrman_asmap1;
+ std::pair<int, int> bucketAndEntry_asmap1_deser = addrman_asmap1->GetBucketAndEntry(addr);
BOOST_CHECK(bucketAndEntry_asmap1_deser.second != -1);
BOOST_CHECK(bucketAndEntry_asmap1_deser.first != bucketAndEntry_noasmap.first);
BOOST_CHECK(bucketAndEntry_asmap1_deser.first == bucketAndEntry_asmap1_dup.first);
BOOST_CHECK(bucketAndEntry_asmap1_deser.second == bucketAndEntry_asmap1_dup.second);
// used to map to different buckets, now maps to the same bucket.
- addrman_asmap1.Clear();
- addrman_noasmap.Clear();
+ addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
+ addrman_noasmap = std::make_unique<CAddrManTest>();
CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE);
- addrman_noasmap.Add(addr, default_source);
- addrman_noasmap.Add(addr2, default_source);
- std::pair<int, int> bucketAndEntry_noasmap_addr1 = addrman_noasmap.GetBucketAndEntry(addr1);
- std::pair<int, int> bucketAndEntry_noasmap_addr2 = addrman_noasmap.GetBucketAndEntry(addr2);
+ addrman_noasmap->Add({addr, addr2}, default_source);
+ std::pair<int, int> bucketAndEntry_noasmap_addr1 = addrman_noasmap->GetBucketAndEntry(addr1);
+ std::pair<int, int> bucketAndEntry_noasmap_addr2 = addrman_noasmap->GetBucketAndEntry(addr2);
BOOST_CHECK(bucketAndEntry_noasmap_addr1.first != bucketAndEntry_noasmap_addr2.first);
BOOST_CHECK(bucketAndEntry_noasmap_addr1.second != bucketAndEntry_noasmap_addr2.second);
- stream << addrman_noasmap;
- stream >> addrman_asmap1;
- std::pair<int, int> bucketAndEntry_asmap1_deser_addr1 = addrman_asmap1.GetBucketAndEntry(addr1);
- std::pair<int, int> bucketAndEntry_asmap1_deser_addr2 = addrman_asmap1.GetBucketAndEntry(addr2);
+ stream << *addrman_noasmap;
+ stream >> *addrman_asmap1;
+ std::pair<int, int> bucketAndEntry_asmap1_deser_addr1 = addrman_asmap1->GetBucketAndEntry(addr1);
+ std::pair<int, int> bucketAndEntry_asmap1_deser_addr2 = addrman_asmap1->GetBucketAndEntry(addr2);
BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.first == bucketAndEntry_asmap1_deser_addr2.first);
BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.second != bucketAndEntry_asmap1_deser_addr2.second);
}
+BOOST_AUTO_TEST_CASE(remove_invalid)
+{
+ // Confirm that invalid addresses are ignored in unserialization.
+
+ auto addrman = std::make_unique<CAddrManTest>();
+ CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+
+ const CAddress new1{ResolveService("5.5.5.5"), NODE_NONE};
+ const CAddress new2{ResolveService("6.6.6.6"), NODE_NONE};
+ const CAddress tried1{ResolveService("7.7.7.7"), NODE_NONE};
+ const CAddress tried2{ResolveService("8.8.8.8"), NODE_NONE};
+
+ addrman->Add({new1, tried1, new2, tried2}, CNetAddr{});
+ addrman->Good(tried1);
+ addrman->Good(tried2);
+ BOOST_REQUIRE_EQUAL(addrman->size(), 4);
+
+ stream << *addrman;
+
+ const std::string str{stream.str()};
+ size_t pos;
+
+ const char new2_raw[]{6, 6, 6, 6};
+ const uint8_t new2_raw_replacement[]{0, 0, 0, 0}; // 0.0.0.0 is !IsValid()
+ pos = str.find(new2_raw, 0, sizeof(new2_raw));
+ BOOST_REQUIRE(pos != std::string::npos);
+ BOOST_REQUIRE(pos + sizeof(new2_raw_replacement) <= stream.size());
+ memcpy(stream.data() + pos, new2_raw_replacement, sizeof(new2_raw_replacement));
+
+ const char tried2_raw[]{8, 8, 8, 8};
+ const uint8_t tried2_raw_replacement[]{255, 255, 255, 255}; // 255.255.255.255 is !IsValid()
+ pos = str.find(tried2_raw, 0, sizeof(tried2_raw));
+ BOOST_REQUIRE(pos != std::string::npos);
+ BOOST_REQUIRE(pos + sizeof(tried2_raw_replacement) <= stream.size());
+ memcpy(stream.data() + pos, tried2_raw_replacement, sizeof(tried2_raw_replacement));
+
+ addrman = std::make_unique<CAddrManTest>();
+ stream >> *addrman;
+ BOOST_CHECK_EQUAL(addrman->size(), 2);
+}
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
@@ -797,7 +873,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 23; i++) {
CService addr = ResolveService("250.1.1."+ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
// No collisions yet.
@@ -820,11 +896,11 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
{
CAddrManTest addrman;
- // Add twenty two addresses.
+ // Add 35 addresses.
CNetAddr source = ResolveIP("252.2.2.2");
- for (unsigned int i = 1; i < 23; i++) {
+ for (unsigned int i = 1; i < 36; i++) {
CService addr = ResolveService("250.1.1."+ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
// No collision yet.
@@ -832,22 +908,22 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
}
- // Collision between 23 and 19.
- CService addr23 = ResolveService("250.1.1.23");
- BOOST_CHECK(addrman.Add(CAddress(addr23, NODE_NONE), source));
- addrman.Good(addr23);
+ // Collision between 36 and 19.
+ CService addr36 = ResolveService("250.1.1.36");
+ BOOST_CHECK(addrman.Add({CAddress(addr36, NODE_NONE)}, source));
+ addrman.Good(addr36);
- BOOST_CHECK(addrman.size() == 23);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "250.1.1.19:0");
+ BOOST_CHECK(addrman.size() == 36);
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.19:0");
- // 23 should be discarded and 19 not evicted.
+ // 36 should be discarded and 19 not evicted.
addrman.ResolveCollisions();
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
// Lets create two collisions.
- for (unsigned int i = 24; i < 33; i++) {
+ for (unsigned int i = 37; i < 59; i++) {
CService addr = ResolveService("250.1.1."+ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
BOOST_CHECK(addrman.size() == i);
@@ -855,17 +931,17 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
}
// Cause a collision.
- CService addr33 = ResolveService("250.1.1.33");
- BOOST_CHECK(addrman.Add(CAddress(addr33, NODE_NONE), source));
- addrman.Good(addr33);
- BOOST_CHECK(addrman.size() == 33);
+ CService addr59 = ResolveService("250.1.1.59");
+ BOOST_CHECK(addrman.Add({CAddress(addr59, NODE_NONE)}, source));
+ addrman.Good(addr59);
+ BOOST_CHECK(addrman.size() == 59);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "250.1.1.27:0");
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.10:0");
// Cause a second collision.
- BOOST_CHECK(!addrman.Add(CAddress(addr23, NODE_NONE), source));
- addrman.Good(addr23);
- BOOST_CHECK(addrman.size() == 33);
+ BOOST_CHECK(!addrman.Add({CAddress(addr36, NODE_NONE)}, source));
+ addrman.Good(addr36);
+ BOOST_CHECK(addrman.size() == 59);
BOOST_CHECK(addrman.SelectTriedCollision().ToString() != "[::]:0");
addrman.ResolveCollisions();
@@ -881,11 +957,11 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// Empty addrman should return blank addrman info.
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
- // Add twenty two addresses.
+ // Add 35 addresses
CNetAddr source = ResolveIP("252.2.2.2");
- for (unsigned int i = 1; i < 23; i++) {
+ for (unsigned int i = 1; i < 36; i++) {
CService addr = ResolveService("250.1.1."+ToString(i));
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
// No collision yet.
@@ -893,38 +969,111 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
}
- // Collision between 23 and 19.
- CService addr = ResolveService("250.1.1.23");
- BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
+ // Collision between 36 and 19.
+ CService addr = ResolveService("250.1.1.36");
+ BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
- BOOST_CHECK(addrman.size() == 23);
+ BOOST_CHECK_EQUAL(addrman.size(), 36);
CAddrInfo info = addrman.SelectTriedCollision();
- BOOST_CHECK(info.ToString() == "250.1.1.19:0");
+ BOOST_CHECK_EQUAL(info.ToString(), "250.1.1.19:0");
// Ensure test of address fails, so that it is evicted.
addrman.SimConnFail(info);
- // Should swap 23 for 19.
+ // Should swap 36 for 19.
addrman.ResolveCollisions();
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
- // If 23 was swapped for 19, then this should cause no collisions.
- BOOST_CHECK(!addrman.Add(CAddress(addr, NODE_NONE), source));
+ // If 36 was swapped for 19, then this should cause no collisions.
+ BOOST_CHECK(!addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
- // If we insert 19 is should collide with 23.
+ // If we insert 19 it should collide with 36
CService addr19 = ResolveService("250.1.1.19");
- BOOST_CHECK(!addrman.Add(CAddress(addr19, NODE_NONE), source));
+ BOOST_CHECK(!addrman.Add({CAddress(addr19, NODE_NONE)}, source));
addrman.Good(addr19);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "250.1.1.23:0");
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.36:0");
addrman.ResolveCollisions();
BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
}
+BOOST_AUTO_TEST_CASE(caddrdb_read)
+{
+ CAddrManUncorrupted addrmanUncorrupted;
+
+ CService addr1, addr2, addr3;
+ BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false));
+ BOOST_CHECK(Lookup("250.7.2.2", addr2, 9999, false));
+ BOOST_CHECK(Lookup("250.7.3.3", addr3, 9999, false));
+ BOOST_CHECK(Lookup("250.7.3.3"s, addr3, 9999, false));
+ BOOST_CHECK(!Lookup("250.7.3.3\0example.com"s, addr3, 9999, false));
+
+ // Add three addresses to new table.
+ CService source;
+ BOOST_CHECK(Lookup("252.5.1.1", source, 8333, false));
+ std::vector<CAddress> addresses{CAddress(addr1, NODE_NONE), CAddress(addr2, NODE_NONE), CAddress(addr3, NODE_NONE)};
+ BOOST_CHECK(addrmanUncorrupted.Add(addresses, source));
+ BOOST_CHECK(addrmanUncorrupted.size() == 3);
+
+ // Test that the de-serialization does not throw an exception.
+ CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted);
+ bool exceptionThrown = false;
+ CAddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+
+ BOOST_CHECK(addrman1.size() == 0);
+ try {
+ unsigned char pchMsgTmp[4];
+ ssPeers1 >> pchMsgTmp;
+ ssPeers1 >> addrman1;
+ } catch (const std::exception&) {
+ exceptionThrown = true;
+ }
+
+ BOOST_CHECK(addrman1.size() == 3);
+ BOOST_CHECK(exceptionThrown == false);
+
+ // Test that CAddrDB::Read creates an addrman with the correct number of addrs.
+ CDataStream ssPeers2 = AddrmanToStream(addrmanUncorrupted);
+
+ CAddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ BOOST_CHECK(addrman2.size() == 0);
+ BOOST_CHECK(CAddrDB::Read(addrman2, ssPeers2));
+ BOOST_CHECK(addrman2.size() == 3);
+}
+
+
+BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted)
+{
+ CAddrManCorrupted addrmanCorrupted;
+
+ // Test that the de-serialization of corrupted addrman throws an exception.
+ CDataStream ssPeers1 = AddrmanToStream(addrmanCorrupted);
+ bool exceptionThrown = false;
+ CAddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ BOOST_CHECK(addrman1.size() == 0);
+ try {
+ unsigned char pchMsgTmp[4];
+ ssPeers1 >> pchMsgTmp;
+ ssPeers1 >> addrman1;
+ } catch (const std::exception&) {
+ exceptionThrown = true;
+ }
+ // Even through de-serialization failed addrman is not left in a clean state.
+ BOOST_CHECK(addrman1.size() == 1);
+ BOOST_CHECK(exceptionThrown);
+
+ // Test that CAddrDB::Read fails if peers.dat is corrupt
+ CDataStream ssPeers2 = AddrmanToStream(addrmanCorrupted);
+
+ CAddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ BOOST_CHECK(addrman2.size() == 0);
+ BOOST_CHECK(!CAddrDB::Read(addrman2, ssPeers2));
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/bip32_tests.cpp b/src/test/bip32_tests.cpp
index fb16c92647..a89868e1ef 100644
--- a/src/test/bip32_tests.cpp
+++ b/src/test/bip32_tests.cpp
@@ -14,6 +14,8 @@
#include <string>
#include <vector>
+namespace {
+
struct TestDerivation {
std::string pub;
std::string prv;
@@ -99,7 +101,26 @@ TestVector test4 =
"xprv9xJocDuwtYCMNAo3Zw76WENQeAS6WGXQ55RCy7tDJ8oALr4FWkuVoHJeHVAcAqiZLE7Je3vZJHxspZdFHfnBEjHqU5hG1Jaj32dVoS6XLT1",
0);
-static void RunTest(const TestVector &test) {
+const std::vector<std::string> TEST5 = {
+ "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6LBpB85b3D2yc8sfvZU521AAwdZafEz7mnzBBsz4wKY5fTtTQBm",
+ "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFGTQQD3dC4H2D5GBj7vWvSQaaBv5cxi9gafk7NF3pnBju6dwKvH",
+ "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6Txnt3siSujt9RCVYsx4qHZGc62TG4McvMGcAUjeuwZdduYEvFn",
+ "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFGpWnsj83BHtEy5Zt8CcDr1UiRXuWCmTQLxEK9vbz5gPstX92JQ",
+ "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6N8ZMMXctdiCjxTNq964yKkwrkBJJwpzZS4HS2fxvyYUA4q2Xe4",
+ "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFAzHGBP2UuGCqWLTAPLcMtD9y5gkZ6Eq3Rjuahrv17fEQ3Qen6J",
+ "xprv9s2SPatNQ9Vc6GTbVMFPFo7jsaZySyzk7L8n2uqKXJen3KUmvQNTuLh3fhZMBoG3G4ZW1N2kZuHEPY53qmbZzCHshoQnNf4GvELZfqTUrcv",
+ "xpub661no6RGEX3uJkY4bNnPcw4URcQTrSibUZ4NqJEw5eBkv7ovTwgiT91XX27VbEXGENhYRCf7hyEbWrR3FewATdCEebj6znwMfQkhRYHRLpJ",
+ "xprv9s21ZrQH4r4TsiLvyLXqM9P7k1K3EYhA1kkD6xuquB5i39AU8KF42acDyL3qsDbU9NmZn6MsGSUYZEsuoePmjzsB3eFKSUEh3Gu1N3cqVUN",
+ "xpub661MyMwAuDcm6CRQ5N4qiHKrJ39Xe1R1NyfouMKTTWcguwVcfrZJaNvhpebzGerh7gucBvzEQWRugZDuDXjNDRmXzSZe4c7mnTK97pTvGS8",
+ "DMwo58pR1QLEFihHiXPVykYB6fJmsTeHvyTp7hRThAtCX8CvYzgPcn8XnmdfHGMQzT7ayAmfo4z3gY5KfbrZWZ6St24UVf2Qgo6oujFktLHdHY4",
+ "DMwo58pR1QLEFihHiXPVykYB6fJmsTeHvyTp7hRThAtCX8CvYzgPcn8XnmdfHPmHJiEDXkTiJTVV9rHEBUem2mwVbbNfvT2MTcAqj3nesx8uBf9",
+ "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF93Y5wvzdUayhgkkFoicQZcP3y52uPPxFnfoLZB21Teqt1VvEHx",
+ "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzFAzHGBP2UuGCqWLTAPLcMtD5SDKr24z3aiUvKr9bJpdrcLg1y3G",
+ "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6Q5JXayek4PRsn35jii4veMimro1xefsM58PgBMrvdYre8QyULY",
+ "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHL"
+};
+
+void RunTest(const TestVector &test) {
std::vector<unsigned char> seed = ParseHex(test.strHexMaster);
CExtKey key;
CExtPubKey pubkey;
@@ -133,6 +154,8 @@ static void RunTest(const TestVector &test) {
}
}
+} // namespace
+
BOOST_FIXTURE_TEST_SUITE(bip32_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(bip32_test1) {
@@ -151,4 +174,13 @@ BOOST_AUTO_TEST_CASE(bip32_test4) {
RunTest(test4);
}
+BOOST_AUTO_TEST_CASE(bip32_test5) {
+ for (const auto& str : TEST5) {
+ auto dec_extkey = DecodeExtKey(str);
+ auto dec_extpubkey = DecodeExtPubKey(str);
+ BOOST_CHECK_MESSAGE(!dec_extkey.key.IsValid(), "Decoding '" + str + "' as xprv should fail");
+ BOOST_CHECK_MESSAGE(!dec_extpubkey.pubkey.IsValid(), "Decoding '" + str + "' as xpub should fail");
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index edec5f0a31..5b3b39fdb8 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -617,7 +617,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
ChaCha20Poly1305AEAD aead(aead_K_1.data(), aead_K_1.size(), aead_K_2.data(), aead_K_2.size());
// create a chacha20 instance to compare against
- ChaCha20 cmp_ctx(aead_K_2.data(), 32);
+ ChaCha20 cmp_ctx(aead_K_1.data(), 32);
// encipher
bool res = aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, ciphertext_buf.data(), ciphertext_buf.size(), plaintext_buf.data(), plaintext_buf.size(), true);
@@ -708,8 +708,8 @@ BOOST_AUTO_TEST_CASE(chacha20_poly1305_aead_testvector)
"b1a03d5bd2855d60699e7d3a3133fa47be740fe4e4c1f967555e2d9271f31c3a8bd94d54b5ecabbc41ffbb0c90924080");
TestChaCha20Poly1305AEAD(true, 255,
"ff0000f195e66982105ffb640bb7757f579da31602fc93ec01ac56f85ac3c134a4547b733b46413042c9440049176905d3be59ea1c53f15916155c2be8241a38008b9a26bc35941e2444177c8ade6689de95264986d95889fb60e84629c9bd9a5acb1cc118be563eb9b3a4a472f82e09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a475032b63fc385245fe054e3dd5a97a5f576fe064025d3ce042c566ab2c507b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f76dad3979e5c5360c3317166a1c894c94a371876a94df7628fe4eaaf2ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab78fab78c9",
- "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
"ff0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
"c640c1711e3ee904ac35c57ab9791c8a1c408603a90b77a83b54f6c844cb4b06d94e7fc6c800e165acd66147e80ec45a567f6ce66d05ec0cae679dceeb890017",
"3940c1e92da4582ff6f92a776aeb14d014d384eeb30f660dacf70a14a23fd31e91212701334e2ce1acf5199dc84f4d61ddbe6571bca5af874b4c9226c26e650995d157644e1848b96ed6c2102d5489a050e71d29a5a66ece11de5fb5c9558d54da28fe45b0bc4db4e5b88030bfc4a352b4b7068eccf656bae7ad6a35615315fc7c49d4200388d5eca67c2e822e069336c69b40db67e0f3c81209c50f3216a4b89fb3ae1b984b7851a2ec6f68ab12b101ab120e1ea7313bb93b5a0f71185c7fea017ddb92769861c29dba4fbc432280d5dff21b36d1c4c790128b22699950bb18bf74c448cdfe547d8ed4f657d8005fdc0cd7a050c2d46050a44c4376355858981fbe8b184288276e7a93eabc899c4a",
"f039c6689eaeef0456685200feaab9d54bbd9acde4410a3b6f4321296f4a8ca2604b49727d8892c57e005d799b2a38e85e809f20146e08eec75169691c8d4f54a0d51a1e1c7b381e0474eb02f994be9415ef3ffcbd2343f0601e1f3b172a1d494f838824e4df570f8e3b0c04e27966e36c82abd352d07054ef7bd36b84c63f9369afe7ed79b94f953873006b920c3fa251a771de1b63da927058ade119aa898b8c97e42a606b2f6df1e2d957c22f7593c1e2002f4252f4c9ae4bf773499e5cfcfe14dfc1ede26508953f88553bf4a76a802f6a0068d59295b01503fd9a600067624203e880fdf53933b96e1f4d9eb3f4e363dd8165a278ff667a41ee42b9892b077cefff92b93441f7be74cf10e6cd");
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 5668ead1fb..0bfe6eecd9 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
const CChainParams& chainparams = Params();
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr,
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
const CChainParams& chainparams = Params();
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr,
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
CConnman::Options options;
@@ -194,7 +194,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(),
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
CNetAddr tor_netaddr;
BOOST_REQUIRE(
@@ -288,7 +288,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(),
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/fuzz/addrdb.cpp b/src/test/fuzz/addrdb.cpp
deleted file mode 100644
index d15c785673..0000000000
--- a/src/test/fuzz/addrdb.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2020 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <addrdb.h>
-#include <test/fuzz/FuzzedDataProvider.h>
-#include <test/fuzz/fuzz.h>
-#include <test/fuzz/util.h>
-
-#include <cassert>
-#include <cstdint>
-#include <optional>
-#include <string>
-#include <vector>
-
-FUZZ_TARGET(addrdb)
-{
- FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
-
- // The point of this code is to exercise all CBanEntry constructors.
- const CBanEntry ban_entry = [&] {
- switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 2)) {
- case 0:
- return CBanEntry{fuzzed_data_provider.ConsumeIntegral<int64_t>()};
- break;
- case 1: {
- const std::optional<CBanEntry> ban_entry = ConsumeDeserializable<CBanEntry>(fuzzed_data_provider);
- if (ban_entry) {
- return *ban_entry;
- }
- break;
- }
- }
- return CBanEntry{};
- }();
- (void)ban_entry; // currently unused
-}
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index db0b461873..e95126a80f 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -12,6 +12,7 @@
#include <time.h>
#include <util/asmap.h>
+#include <cassert>
#include <cstdint>
#include <optional>
#include <string>
@@ -25,45 +26,233 @@ void initialize_addrman()
class CAddrManDeterministic : public CAddrMan
{
public:
- void MakeDeterministic(const uint256& random_seed)
+ FuzzedDataProvider& m_fuzzed_data_provider;
+
+ explicit CAddrManDeterministic(std::vector<bool> asmap, FuzzedDataProvider& fuzzed_data_provider)
+ : CAddrMan(std::move(asmap), /* deterministic */ true, /* consistency_check_ratio */ 0)
+ , m_fuzzed_data_provider(fuzzed_data_provider)
{
- insecure_rand = FastRandomContext{random_seed};
- Clear();
+ WITH_LOCK(cs, insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)});
+ }
+
+ /**
+ * Generate a random address. Always returns a valid address.
+ */
+ CNetAddr RandAddr() EXCLUSIVE_LOCKS_REQUIRED(cs)
+ {
+ CNetAddr addr;
+ if (m_fuzzed_data_provider.remaining_bytes() > 1 && m_fuzzed_data_provider.ConsumeBool()) {
+ addr = ConsumeNetAddr(m_fuzzed_data_provider);
+ } else {
+ // The networks [1..6] correspond to CNetAddr::BIP155Network (private).
+ static const std::map<uint8_t, uint8_t> net_len_map = {{1, ADDR_IPV4_SIZE},
+ {2, ADDR_IPV6_SIZE},
+ {4, ADDR_TORV3_SIZE},
+ {5, ADDR_I2P_SIZE},
+ {6, ADDR_CJDNS_SIZE}};
+ uint8_t net = insecure_rand.randrange(5) + 1; // [1..5]
+ if (net == 3) {
+ net = 6;
+ }
+
+ CDataStream s(SER_NETWORK, PROTOCOL_VERSION | ADDRV2_FORMAT);
+
+ s << net;
+ s << insecure_rand.randbytes(net_len_map.at(net));
+
+ s >> addr;
+ }
+
+ // Return a dummy IPv4 5.5.5.5 if we generated an invalid address.
+ if (!addr.IsValid()) {
+ in_addr v4_addr = {};
+ v4_addr.s_addr = 0x05050505;
+ addr = CNetAddr{v4_addr};
+ }
+
+ return addr;
+ }
+
+ /**
+ * Fill this addrman with lots of addresses from lots of sources.
+ */
+ void Fill()
+ {
+ LOCK(cs);
+
+ // Add some of the addresses directly to the "tried" table.
+
+ // 0, 1, 2, 3 corresponding to 0%, 100%, 50%, 33%
+ const size_t n = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 3);
+
+ const size_t num_sources = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(10, 50);
+ CNetAddr prev_source;
+ // Use insecure_rand inside the loops instead of m_fuzzed_data_provider because when
+ // the latter is exhausted it just returns 0.
+ for (size_t i = 0; i < num_sources; ++i) {
+ const auto source = RandAddr();
+ const size_t num_addresses = insecure_rand.randrange(500) + 1; // [1..500]
+
+ for (size_t j = 0; j < num_addresses; ++j) {
+ const auto addr = CAddress{CService{RandAddr(), 8333}, NODE_NETWORK};
+ const auto time_penalty = insecure_rand.randrange(100000001);
+#if 1
+ // 2.83 sec to fill.
+ if (n > 0 && mapInfo.size() % n == 0 && mapAddr.find(addr) == mapAddr.end()) {
+ // Add to the "tried" table (if the bucket slot is free).
+ const CAddrInfo dummy{addr, source};
+ const int bucket = dummy.GetTriedBucket(nKey, m_asmap);
+ const int bucket_pos = dummy.GetBucketPosition(nKey, false, bucket);
+ if (vvTried[bucket][bucket_pos] == -1) {
+ int id;
+ CAddrInfo* addr_info = Create(addr, source, &id);
+ vvTried[bucket][bucket_pos] = id;
+ addr_info->fInTried = true;
+ ++nTried;
+ }
+ } else {
+ // Add to the "new" table.
+ Add_(addr, source, time_penalty);
+ }
+#else
+ // 261.91 sec to fill.
+ Add_(addr, source, time_penalty);
+ if (n > 0 && mapInfo.size() % n == 0) {
+ Good_(addr, false, GetTime());
+ }
+#endif
+ // Add 10% of the addresses from more than one source.
+ if (insecure_rand.randrange(10) == 0 && prev_source.IsValid()) {
+ Add_(addr, prev_source, time_penalty);
+ }
+ }
+ prev_source = source;
+ }
+ }
+
+ /**
+ * Compare with another AddrMan.
+ * This compares:
+ * - the values in `mapInfo` (the keys aka ids are ignored)
+ * - vvNew entries refer to the same addresses
+ * - vvTried entries refer to the same addresses
+ */
+ bool operator==(const CAddrManDeterministic& other)
+ {
+ LOCK2(cs, other.cs);
+
+ if (mapInfo.size() != other.mapInfo.size() || nNew != other.nNew ||
+ nTried != other.nTried) {
+ return false;
+ }
+
+ // Check that all values in `mapInfo` are equal to all values in `other.mapInfo`.
+ // Keys may be different.
+
+ using CAddrInfoHasher = std::function<size_t(const CAddrInfo&)>;
+ using CAddrInfoEq = std::function<bool(const CAddrInfo&, const CAddrInfo&)>;
+
+ CNetAddrHash netaddr_hasher;
+
+ CAddrInfoHasher addrinfo_hasher = [&netaddr_hasher](const CAddrInfo& a) {
+ return netaddr_hasher(static_cast<CNetAddr>(a)) ^ netaddr_hasher(a.source) ^
+ a.nLastSuccess ^ a.nAttempts ^ a.nRefCount ^ a.fInTried;
+ };
+
+ CAddrInfoEq addrinfo_eq = [](const CAddrInfo& lhs, const CAddrInfo& rhs) {
+ return static_cast<CNetAddr>(lhs) == static_cast<CNetAddr>(rhs) &&
+ lhs.source == rhs.source && lhs.nLastSuccess == rhs.nLastSuccess &&
+ lhs.nAttempts == rhs.nAttempts && lhs.nRefCount == rhs.nRefCount &&
+ lhs.fInTried == rhs.fInTried;
+ };
+
+ using Addresses = std::unordered_set<CAddrInfo, CAddrInfoHasher, CAddrInfoEq>;
+
+ const size_t num_addresses{mapInfo.size()};
+
+ Addresses addresses{num_addresses, addrinfo_hasher, addrinfo_eq};
+ for (const auto& [id, addr] : mapInfo) {
+ addresses.insert(addr);
+ }
+
+ Addresses other_addresses{num_addresses, addrinfo_hasher, addrinfo_eq};
+ for (const auto& [id, addr] : other.mapInfo) {
+ other_addresses.insert(addr);
+ }
+
+ if (addresses != other_addresses) {
+ return false;
+ }
+
+ auto IdsReferToSameAddress = [&](int id, int other_id) EXCLUSIVE_LOCKS_REQUIRED(cs, other.cs) {
+ if (id == -1 && other_id == -1) {
+ return true;
+ }
+ if ((id == -1 && other_id != -1) || (id != -1 && other_id == -1)) {
+ return false;
+ }
+ return mapInfo.at(id) == other.mapInfo.at(other_id);
+ };
+
+ // Check that `vvNew` contains the same addresses as `other.vvNew`. Notice - `vvNew[i][j]`
+ // contains just an id and the address is to be found in `mapInfo.at(id)`. The ids
+ // themselves may differ between `vvNew` and `other.vvNew`.
+ for (size_t i = 0; i < ADDRMAN_NEW_BUCKET_COUNT; ++i) {
+ for (size_t j = 0; j < ADDRMAN_BUCKET_SIZE; ++j) {
+ if (!IdsReferToSameAddress(vvNew[i][j], other.vvNew[i][j])) {
+ return false;
+ }
+ }
+ }
+
+ // Same for `vvTried`.
+ for (size_t i = 0; i < ADDRMAN_TRIED_BUCKET_COUNT; ++i) {
+ for (size_t j = 0; j < ADDRMAN_BUCKET_SIZE; ++j) {
+ if (!IdsReferToSameAddress(vvTried[i][j], other.vvTried[i][j])) {
+ return false;
+ }
+ }
+ }
+
+ return true;
}
};
+[[nodiscard]] inline std::vector<bool> ConsumeAsmap(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
+ if (!SanityCheckASMap(asmap)) asmap.clear();
+ return asmap;
+}
+
FUZZ_TARGET_INIT(addrman, initialize_addrman)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
SetMockTime(ConsumeTime(fuzzed_data_provider));
- CAddrManDeterministic addr_man;
- addr_man.MakeDeterministic(ConsumeUInt256(fuzzed_data_provider));
+ std::vector<bool> asmap = ConsumeAsmap(fuzzed_data_provider);
+ auto addr_man_ptr = std::make_unique<CAddrManDeterministic>(asmap, fuzzed_data_provider);
if (fuzzed_data_provider.ConsumeBool()) {
- addr_man.m_asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
- if (!SanityCheckASMap(addr_man.m_asmap)) {
- addr_man.m_asmap.clear();
+ const std::vector<uint8_t> serialized_data{ConsumeRandomLengthByteVector(fuzzed_data_provider)};
+ CDataStream ds(serialized_data, SER_DISK, INIT_PROTO_VERSION);
+ const auto ser_version{fuzzed_data_provider.ConsumeIntegral<int32_t>()};
+ ds.SetVersion(ser_version);
+ try {
+ ds >> *addr_man_ptr;
+ } catch (const std::ios_base::failure&) {
+ addr_man_ptr = std::make_unique<CAddrManDeterministic>(asmap, fuzzed_data_provider);
}
}
+ CAddrManDeterministic& addr_man = *addr_man_ptr;
while (fuzzed_data_provider.ConsumeBool()) {
CallOneOf(
fuzzed_data_provider,
[&] {
- addr_man.Clear();
- },
- [&] {
addr_man.ResolveCollisions();
},
[&] {
(void)addr_man.SelectTriedCollision();
},
[&] {
- const std::optional<CAddress> opt_address = ConsumeDeserializable<CAddress>(fuzzed_data_provider);
- const std::optional<CNetAddr> opt_net_addr = ConsumeDeserializable<CNetAddr>(fuzzed_data_provider);
- if (opt_address && opt_net_addr) {
- addr_man.Add(*opt_address, *opt_net_addr, fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 100000000));
- }
- },
- [&] {
std::vector<CAddress> addresses;
while (fuzzed_data_provider.ConsumeBool()) {
const std::optional<CAddress> opt_address = ConsumeDeserializable<CAddress>(fuzzed_data_provider);
@@ -80,7 +269,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
[&] {
const std::optional<CService> opt_service = ConsumeDeserializable<CService>(fuzzed_data_provider);
if (opt_service) {
- addr_man.Good(*opt_service, fuzzed_data_provider.ConsumeBool(), ConsumeTime(fuzzed_data_provider));
+ addr_man.Good(*opt_service, ConsumeTime(fuzzed_data_provider));
}
},
[&] {
@@ -103,12 +292,30 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
});
}
const CAddrMan& const_addr_man{addr_man};
- (void)/*const_*/addr_man.GetAddr(
+ (void)const_addr_man.GetAddr(
/* max_addresses */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
/* max_pct */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
/* network */ std::nullopt);
- (void)/*const_*/addr_man.Select(fuzzed_data_provider.ConsumeBool());
+ (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool());
(void)const_addr_man.size();
CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION);
data_stream << const_addr_man;
}
+
+// Check that serialize followed by unserialize produces the same addrman.
+FUZZ_TARGET_INIT(addrman_serdeser, initialize_addrman)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ SetMockTime(ConsumeTime(fuzzed_data_provider));
+
+ std::vector<bool> asmap = ConsumeAsmap(fuzzed_data_provider);
+ CAddrManDeterministic addr_man1{asmap, fuzzed_data_provider};
+ CAddrManDeterministic addr_man2{asmap, fuzzed_data_provider};
+
+ CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION);
+
+ addr_man1.Fill();
+ data_stream << addr_man1;
+ data_stream >> addr_man2;
+ assert(addr_man1 == addr_man2);
+}
diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp
index 182aabc79b..561cc83c72 100644
--- a/src/test/fuzz/banman.cpp
+++ b/src/test/fuzz/banman.cpp
@@ -41,10 +41,6 @@ static bool operator==(const CBanEntry& lhs, const CBanEntry& rhs)
FUZZ_TARGET_INIT(banman, initialize_banman)
{
- // The complexity is O(N^2), where N is the input size, because each call
- // might call DumpBanlist (or other methods that are at least linear
- // complexity of the input size).
- int limit_max_ops{300};
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
SetMockTime(ConsumeTime(fuzzed_data_provider));
fs::path banlist_file = gArgs.GetDataDirNet() / "fuzzed_banlist";
@@ -52,8 +48,7 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()};
bool force_read_and_write_to_err{false};
if (start_with_corrupted_banlist) {
- const std::string sfx{fuzzed_data_provider.ConsumeBool() ? ".dat" : ".json"};
- assert(WriteBinaryFile(banlist_file.string() + sfx,
+ assert(WriteBinaryFile(banlist_file.string() + ".json",
fuzzed_data_provider.ConsumeRandomLengthString()));
} else {
force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool();
@@ -64,7 +59,11 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
{
BanMan ban_man{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ ConsumeBanTimeOffset(fuzzed_data_provider)};
- while (--limit_max_ops >= 0 && fuzzed_data_provider.ConsumeBool()) {
+ // The complexity is O(N^2), where N is the input size, because each call
+ // might call DumpBanlist (or other methods that are at least linear
+ // complexity of the input size).
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
+ {
CallOneOf(
fuzzed_data_provider,
[&] {
@@ -112,6 +111,5 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
assert(banmap == banmap_read);
}
}
- fs::remove(banlist_file.string() + ".dat");
fs::remove(banlist_file.string() + ".json");
}
diff --git a/src/test/fuzz/blockfilter.cpp b/src/test/fuzz/blockfilter.cpp
index 7fa06085f8..96f049625d 100644
--- a/src/test/fuzz/blockfilter.cpp
+++ b/src/test/fuzz/blockfilter.cpp
@@ -36,9 +36,10 @@ FUZZ_TARGET(blockfilter)
(void)gcs_filter.GetEncoded();
(void)gcs_filter.Match(ConsumeRandomLengthByteVector(fuzzed_data_provider));
GCSFilter::ElementSet element_set;
- while (fuzzed_data_provider.ConsumeBool()) {
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 30000)
+ {
element_set.insert(ConsumeRandomLengthByteVector(fuzzed_data_provider));
- gcs_filter.MatchAny(element_set);
}
+ gcs_filter.MatchAny(element_set);
}
}
diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp
index f452696689..bbdb2c6917 100644
--- a/src/test/fuzz/coins_view.cpp
+++ b/src/test/fuzz/coins_view.cpp
@@ -258,7 +258,7 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view)
// consensus/tx_verify.cpp:130: unsigned int GetP2SHSigOpCount(const CTransaction &, const CCoinsViewCache &): Assertion `!coin.IsSpent()' failed.
return;
}
- const int flags = fuzzed_data_provider.ConsumeIntegral<int>();
+ const auto flags{fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
if (!transaction.vin.empty() && (flags & SCRIPT_VERIFY_WITNESS) != 0 && (flags & SCRIPT_VERIFY_P2SH) == 0) {
// Avoid:
// script/interpreter.cpp:1705: size_t CountWitnessSigOps(const CScript &, const CScript &, const CScriptWitness *, unsigned int): Assertion `(flags & SCRIPT_VERIFY_P2SH) != 0' failed.
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index bbec5943af..01741103e4 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -25,7 +25,7 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
SetMockTime(ConsumeTime(fuzzed_data_provider));
- CAddrMan addrman;
+ CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), addrman, fuzzed_data_provider.ConsumeBool()};
CNetAddr random_netaddr;
CNode random_node = ConsumeNode(fuzzed_data_provider);
@@ -104,12 +104,6 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
connman.RemoveAddedNode(random_string);
},
[&] {
- const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
- if (SanityCheckASMap(asmap)) {
- connman.SetAsmap(asmap);
- }
- },
- [&] {
connman.SetNetworkActive(fuzzed_data_provider.ConsumeBool());
},
[&] {
diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp
index f83747e424..84b95117e2 100644
--- a/src/test/fuzz/crypto.cpp
+++ b/src/test/fuzz/crypto.cpp
@@ -19,10 +19,6 @@
FUZZ_TARGET(crypto)
{
- // Hashing is expensive with sanitizers enabled, so limit the number of
- // calls
- int limit_max_ops{30};
-
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
std::vector<uint8_t> data = ConsumeRandomLengthByteVector(fuzzed_data_provider);
if (data.empty()) {
@@ -40,7 +36,8 @@ FUZZ_TARGET(crypto)
SHA3_256 sha3;
CSipHasher sip_hasher{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>()};
- while (--limit_max_ops >= 0 && fuzzed_data_provider.ConsumeBool()) {
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 30)
+ {
CallOneOf(
fuzzed_data_provider,
[&] {
diff --git a/src/test/fuzz/data_stream.cpp b/src/test/fuzz/data_stream.cpp
index 473caec6ff..8178878c30 100644
--- a/src/test/fuzz/data_stream.cpp
+++ b/src/test/fuzz/data_stream.cpp
@@ -21,6 +21,6 @@ FUZZ_TARGET_INIT(data_stream_addr_man, initialize_data_stream_addr_man)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CDataStream data_stream = ConsumeDataStream(fuzzed_data_provider);
- CAddrMan addr_man;
+ CAddrMan addr_man(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
CAddrDB::Read(addr_man, data_stream);
}
diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp
index 721e4360d0..83ae1680e3 100644
--- a/src/test/fuzz/deserialize.cpp
+++ b/src/test/fuzz/deserialize.cpp
@@ -100,228 +100,213 @@ void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT
} // namespace
FUZZ_TARGET_DESERIALIZE(block_filter_deserialize, {
- BlockFilter block_filter;
- DeserializeFromFuzzingInput(buffer, block_filter);
+ BlockFilter block_filter;
+ DeserializeFromFuzzingInput(buffer, block_filter);
})
FUZZ_TARGET_DESERIALIZE(addr_info_deserialize, {
- CAddrInfo addr_info;
- DeserializeFromFuzzingInput(buffer, addr_info);
+ CAddrInfo addr_info;
+ DeserializeFromFuzzingInput(buffer, addr_info);
})
FUZZ_TARGET_DESERIALIZE(block_file_info_deserialize, {
- CBlockFileInfo block_file_info;
- DeserializeFromFuzzingInput(buffer, block_file_info);
+ CBlockFileInfo block_file_info;
+ DeserializeFromFuzzingInput(buffer, block_file_info);
})
FUZZ_TARGET_DESERIALIZE(block_header_and_short_txids_deserialize, {
- CBlockHeaderAndShortTxIDs block_header_and_short_txids;
- DeserializeFromFuzzingInput(buffer, block_header_and_short_txids);
+ CBlockHeaderAndShortTxIDs block_header_and_short_txids;
+ DeserializeFromFuzzingInput(buffer, block_header_and_short_txids);
})
FUZZ_TARGET_DESERIALIZE(fee_rate_deserialize, {
- CFeeRate fee_rate;
- DeserializeFromFuzzingInput(buffer, fee_rate);
- AssertEqualAfterSerializeDeserialize(fee_rate);
+ CFeeRate fee_rate;
+ DeserializeFromFuzzingInput(buffer, fee_rate);
+ AssertEqualAfterSerializeDeserialize(fee_rate);
})
FUZZ_TARGET_DESERIALIZE(merkle_block_deserialize, {
- CMerkleBlock merkle_block;
- DeserializeFromFuzzingInput(buffer, merkle_block);
+ CMerkleBlock merkle_block;
+ DeserializeFromFuzzingInput(buffer, merkle_block);
})
FUZZ_TARGET_DESERIALIZE(out_point_deserialize, {
- COutPoint out_point;
- DeserializeFromFuzzingInput(buffer, out_point);
- AssertEqualAfterSerializeDeserialize(out_point);
+ COutPoint out_point;
+ DeserializeFromFuzzingInput(buffer, out_point);
+ AssertEqualAfterSerializeDeserialize(out_point);
})
FUZZ_TARGET_DESERIALIZE(partial_merkle_tree_deserialize, {
- CPartialMerkleTree partial_merkle_tree;
- DeserializeFromFuzzingInput(buffer, partial_merkle_tree);
+ CPartialMerkleTree partial_merkle_tree;
+ DeserializeFromFuzzingInput(buffer, partial_merkle_tree);
})
FUZZ_TARGET_DESERIALIZE(pub_key_deserialize, {
- CPubKey pub_key;
- DeserializeFromFuzzingInput(buffer, pub_key);
- AssertEqualAfterSerializeDeserialize(pub_key);
+ CPubKey pub_key;
+ DeserializeFromFuzzingInput(buffer, pub_key);
+ AssertEqualAfterSerializeDeserialize(pub_key);
})
FUZZ_TARGET_DESERIALIZE(script_deserialize, {
- CScript script;
- DeserializeFromFuzzingInput(buffer, script);
-})
-FUZZ_TARGET_DESERIALIZE(sub_net_deserialize, {
- CSubNet sub_net_1;
- DeserializeFromFuzzingInput(buffer, sub_net_1, INIT_PROTO_VERSION);
- AssertEqualAfterSerializeDeserialize(sub_net_1, INIT_PROTO_VERSION);
- CSubNet sub_net_2;
- DeserializeFromFuzzingInput(buffer, sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
- AssertEqualAfterSerializeDeserialize(sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
- CSubNet sub_net_3;
- DeserializeFromFuzzingInput(buffer, sub_net_3);
- AssertEqualAfterSerializeDeserialize(sub_net_3, INIT_PROTO_VERSION | ADDRV2_FORMAT);
+ CScript script;
+ DeserializeFromFuzzingInput(buffer, script);
})
FUZZ_TARGET_DESERIALIZE(tx_in_deserialize, {
- CTxIn tx_in;
- DeserializeFromFuzzingInput(buffer, tx_in);
- AssertEqualAfterSerializeDeserialize(tx_in);
+ CTxIn tx_in;
+ DeserializeFromFuzzingInput(buffer, tx_in);
+ AssertEqualAfterSerializeDeserialize(tx_in);
})
FUZZ_TARGET_DESERIALIZE(flat_file_pos_deserialize, {
- FlatFilePos flat_file_pos;
- DeserializeFromFuzzingInput(buffer, flat_file_pos);
- AssertEqualAfterSerializeDeserialize(flat_file_pos);
+ FlatFilePos flat_file_pos;
+ DeserializeFromFuzzingInput(buffer, flat_file_pos);
+ AssertEqualAfterSerializeDeserialize(flat_file_pos);
})
FUZZ_TARGET_DESERIALIZE(key_origin_info_deserialize, {
- KeyOriginInfo key_origin_info;
- DeserializeFromFuzzingInput(buffer, key_origin_info);
- AssertEqualAfterSerializeDeserialize(key_origin_info);
+ KeyOriginInfo key_origin_info;
+ DeserializeFromFuzzingInput(buffer, key_origin_info);
+ AssertEqualAfterSerializeDeserialize(key_origin_info);
})
FUZZ_TARGET_DESERIALIZE(partially_signed_transaction_deserialize, {
- PartiallySignedTransaction partially_signed_transaction;
- DeserializeFromFuzzingInput(buffer, partially_signed_transaction);
+ PartiallySignedTransaction partially_signed_transaction;
+ DeserializeFromFuzzingInput(buffer, partially_signed_transaction);
})
FUZZ_TARGET_DESERIALIZE(prefilled_transaction_deserialize, {
- PrefilledTransaction prefilled_transaction;
- DeserializeFromFuzzingInput(buffer, prefilled_transaction);
+ PrefilledTransaction prefilled_transaction;
+ DeserializeFromFuzzingInput(buffer, prefilled_transaction);
})
FUZZ_TARGET_DESERIALIZE(psbt_input_deserialize, {
- PSBTInput psbt_input;
- DeserializeFromFuzzingInput(buffer, psbt_input);
+ PSBTInput psbt_input;
+ DeserializeFromFuzzingInput(buffer, psbt_input);
})
FUZZ_TARGET_DESERIALIZE(psbt_output_deserialize, {
- PSBTOutput psbt_output;
- DeserializeFromFuzzingInput(buffer, psbt_output);
+ PSBTOutput psbt_output;
+ DeserializeFromFuzzingInput(buffer, psbt_output);
})
FUZZ_TARGET_DESERIALIZE(block_deserialize, {
- CBlock block;
- DeserializeFromFuzzingInput(buffer, block);
+ CBlock block;
+ DeserializeFromFuzzingInput(buffer, block);
})
FUZZ_TARGET_DESERIALIZE(blocklocator_deserialize, {
- CBlockLocator bl;
- DeserializeFromFuzzingInput(buffer, bl);
+ CBlockLocator bl;
+ DeserializeFromFuzzingInput(buffer, bl);
})
FUZZ_TARGET_DESERIALIZE(blockmerkleroot, {
- CBlock block;
- DeserializeFromFuzzingInput(buffer, block);
- bool mutated;
- BlockMerkleRoot(block, &mutated);
+ CBlock block;
+ DeserializeFromFuzzingInput(buffer, block);
+ bool mutated;
+ BlockMerkleRoot(block, &mutated);
})
FUZZ_TARGET_DESERIALIZE(addrman_deserialize, {
- CAddrMan am;
- DeserializeFromFuzzingInput(buffer, am);
+ CAddrMan am(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ DeserializeFromFuzzingInput(buffer, am);
})
FUZZ_TARGET_DESERIALIZE(blockheader_deserialize, {
- CBlockHeader bh;
- DeserializeFromFuzzingInput(buffer, bh);
-})
-FUZZ_TARGET_DESERIALIZE(banentry_deserialize, {
- CBanEntry be;
- DeserializeFromFuzzingInput(buffer, be);
+ CBlockHeader bh;
+ DeserializeFromFuzzingInput(buffer, bh);
})
FUZZ_TARGET_DESERIALIZE(txundo_deserialize, {
- CTxUndo tu;
- DeserializeFromFuzzingInput(buffer, tu);
+ CTxUndo tu;
+ DeserializeFromFuzzingInput(buffer, tu);
})
FUZZ_TARGET_DESERIALIZE(blockundo_deserialize, {
- CBlockUndo bu;
- DeserializeFromFuzzingInput(buffer, bu);
+ CBlockUndo bu;
+ DeserializeFromFuzzingInput(buffer, bu);
})
FUZZ_TARGET_DESERIALIZE(coins_deserialize, {
- Coin coin;
- DeserializeFromFuzzingInput(buffer, coin);
+ Coin coin;
+ DeserializeFromFuzzingInput(buffer, coin);
})
FUZZ_TARGET_DESERIALIZE(netaddr_deserialize, {
- CNetAddr na;
- DeserializeFromFuzzingInput(buffer, na);
- if (na.IsAddrV1Compatible()) {
- AssertEqualAfterSerializeDeserialize(na);
- }
- AssertEqualAfterSerializeDeserialize(na, INIT_PROTO_VERSION | ADDRV2_FORMAT);
+ CNetAddr na;
+ DeserializeFromFuzzingInput(buffer, na);
+ if (na.IsAddrV1Compatible()) {
+ AssertEqualAfterSerializeDeserialize(na);
+ }
+ AssertEqualAfterSerializeDeserialize(na, INIT_PROTO_VERSION | ADDRV2_FORMAT);
})
FUZZ_TARGET_DESERIALIZE(service_deserialize, {
- CService s;
- DeserializeFromFuzzingInput(buffer, s);
- if (s.IsAddrV1Compatible()) {
- AssertEqualAfterSerializeDeserialize(s);
- }
- AssertEqualAfterSerializeDeserialize(s, INIT_PROTO_VERSION | ADDRV2_FORMAT);
- CService s1;
- DeserializeFromFuzzingInput(buffer, s1, INIT_PROTO_VERSION);
- AssertEqualAfterSerializeDeserialize(s1, INIT_PROTO_VERSION);
- assert(s1.IsAddrV1Compatible());
- CService s2;
- DeserializeFromFuzzingInput(buffer, s2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
- AssertEqualAfterSerializeDeserialize(s2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
+ CService s;
+ DeserializeFromFuzzingInput(buffer, s);
+ if (s.IsAddrV1Compatible()) {
+ AssertEqualAfterSerializeDeserialize(s);
+ }
+ AssertEqualAfterSerializeDeserialize(s, INIT_PROTO_VERSION | ADDRV2_FORMAT);
+ CService s1;
+ DeserializeFromFuzzingInput(buffer, s1, INIT_PROTO_VERSION);
+ AssertEqualAfterSerializeDeserialize(s1, INIT_PROTO_VERSION);
+ assert(s1.IsAddrV1Compatible());
+ CService s2;
+ DeserializeFromFuzzingInput(buffer, s2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
+ AssertEqualAfterSerializeDeserialize(s2, INIT_PROTO_VERSION | ADDRV2_FORMAT);
})
FUZZ_TARGET_DESERIALIZE(messageheader_deserialize, {
- CMessageHeader mh;
- DeserializeFromFuzzingInput(buffer, mh);
- (void)mh.IsCommandValid();
+ CMessageHeader mh;
+ DeserializeFromFuzzingInput(buffer, mh);
+ (void)mh.IsCommandValid();
})
FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_notime, {
- CAddress a;
- DeserializeFromFuzzingInput(buffer, a, INIT_PROTO_VERSION);
- // A CAddress without nTime (as is expected under INIT_PROTO_VERSION) will roundtrip
- // in all 5 formats (with/without nTime, v1/v2, network/disk)
- AssertEqualAfterSerializeDeserialize(a, INIT_PROTO_VERSION);
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
- AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
- AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
+ CAddress a;
+ DeserializeFromFuzzingInput(buffer, a, INIT_PROTO_VERSION);
+ // A CAddress without nTime (as is expected under INIT_PROTO_VERSION) will roundtrip
+ // in all 5 formats (with/without nTime, v1/v2, network/disk)
+ AssertEqualAfterSerializeDeserialize(a, INIT_PROTO_VERSION);
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
+ AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
+ AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
})
FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_withtime, {
- CAddress a;
- DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION);
- // A CAddress in V1 mode will roundtrip in all 4 formats that have nTime.
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
- AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
- AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
+ CAddress a;
+ DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION);
+ // A CAddress in V1 mode will roundtrip in all 4 formats that have nTime.
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
+ AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
+ AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
})
FUZZ_TARGET_DESERIALIZE(address_deserialize_v2, {
- CAddress a;
- DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION | ADDRV2_FORMAT);
- // A CAddress in V2 mode will roundtrip in both V2 formats, and also in the V1 formats
- // with time if it's V1 compatible.
- if (a.IsAddrV1Compatible()) {
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
- AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
- }
- AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
- AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
+ CAddress a;
+ DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION | ADDRV2_FORMAT);
+ // A CAddress in V2 mode will roundtrip in both V2 formats, and also in the V1 formats
+ // with time if it's V1 compatible.
+ if (a.IsAddrV1Compatible()) {
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION);
+ AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK);
+ }
+ AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT);
+ AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK);
})
FUZZ_TARGET_DESERIALIZE(inv_deserialize, {
- CInv i;
- DeserializeFromFuzzingInput(buffer, i);
+ CInv i;
+ DeserializeFromFuzzingInput(buffer, i);
})
FUZZ_TARGET_DESERIALIZE(bloomfilter_deserialize, {
- CBloomFilter bf;
- DeserializeFromFuzzingInput(buffer, bf);
+ CBloomFilter bf;
+ DeserializeFromFuzzingInput(buffer, bf);
})
FUZZ_TARGET_DESERIALIZE(diskblockindex_deserialize, {
- CDiskBlockIndex dbi;
- DeserializeFromFuzzingInput(buffer, dbi);
+ CDiskBlockIndex dbi;
+ DeserializeFromFuzzingInput(buffer, dbi);
})
FUZZ_TARGET_DESERIALIZE(txoutcompressor_deserialize, {
- CTxOut to;
- auto toc = Using<TxOutCompression>(to);
- DeserializeFromFuzzingInput(buffer, toc);
+ CTxOut to;
+ auto toc = Using<TxOutCompression>(to);
+ DeserializeFromFuzzingInput(buffer, toc);
})
FUZZ_TARGET_DESERIALIZE(blocktransactions_deserialize, {
- BlockTransactions bt;
- DeserializeFromFuzzingInput(buffer, bt);
+ BlockTransactions bt;
+ DeserializeFromFuzzingInput(buffer, bt);
})
FUZZ_TARGET_DESERIALIZE(blocktransactionsrequest_deserialize, {
- BlockTransactionsRequest btr;
- DeserializeFromFuzzingInput(buffer, btr);
+ BlockTransactionsRequest btr;
+ DeserializeFromFuzzingInput(buffer, btr);
})
FUZZ_TARGET_DESERIALIZE(snapshotmetadata_deserialize, {
- SnapshotMetadata snapshot_metadata;
- DeserializeFromFuzzingInput(buffer, snapshot_metadata);
+ SnapshotMetadata snapshot_metadata;
+ DeserializeFromFuzzingInput(buffer, snapshot_metadata);
})
FUZZ_TARGET_DESERIALIZE(uint160_deserialize, {
- uint160 u160;
- DeserializeFromFuzzingInput(buffer, u160);
- AssertEqualAfterSerializeDeserialize(u160);
+ uint160 u160;
+ DeserializeFromFuzzingInput(buffer, u160);
+ AssertEqualAfterSerializeDeserialize(u160);
})
FUZZ_TARGET_DESERIALIZE(uint256_deserialize, {
- uint256 u256;
- DeserializeFromFuzzingInput(buffer, u256);
- AssertEqualAfterSerializeDeserialize(u256);
-})
- // Classes intentionally not covered in this file since their deserialization code is
- // fuzzed elsewhere:
- // * Deserialization of CTxOut is fuzzed in test/fuzz/tx_out.cpp
- // * Deserialization of CMutableTransaction is fuzzed in src/test/fuzz/transaction.cpp
+ uint256 u256;
+ DeserializeFromFuzzingInput(buffer, u256);
+ AssertEqualAfterSerializeDeserialize(u256);
+})
+// Classes intentionally not covered in this file since their deserialization code is
+// fuzzed elsewhere:
+// * Deserialization of CTxOut is fuzzed in test/fuzz/tx_out.cpp
+// * Deserialization of CMutableTransaction is fuzzed in src/test/fuzz/transaction.cpp
diff --git a/src/test/fuzz/fuzz.h b/src/test/fuzz/fuzz.h
index 2bad77bdc1..c91c33da67 100644
--- a/src/test/fuzz/fuzz.h
+++ b/src/test/fuzz/fuzz.h
@@ -11,6 +11,13 @@
#include <functional>
#include <string_view>
+/**
+ * Can be used to limit a theoretically unbounded loop. This caps the runtime
+ * to avoid timeouts or OOMs.
+ */
+#define LIMITED_WHILE(condition, limit) \
+ for (unsigned _count{limit}; (condition) && _count; --_count)
+
using FuzzBufferType = Span<const uint8_t>;
using TypeTestOneInput = std::function<void(FuzzBufferType)>;
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index e9fa343896..5d26529837 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -16,8 +16,6 @@
#include <pow.h>
#include <protocol.h>
#include <pubkey.h>
-#include <rpc/util.h>
-#include <script/signingprovider.h>
#include <script/standard.h>
#include <serialize.h>
#include <streams.h>
@@ -85,9 +83,8 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
(void)FormatISO8601Date(i64);
(void)FormatISO8601DateTime(i64);
{
- int64_t parsed_money;
- if (ParseMoney(FormatMoney(i64), parsed_money)) {
- assert(parsed_money == i64);
+ if (std::optional<CAmount> parsed = ParseMoney(FormatMoney(i64))) {
+ assert(parsed.value() == i64);
}
}
(void)GetSizeOfCompactSize(u64);
@@ -128,9 +125,8 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
(void)ToLower(ch);
(void)ToUpper(ch);
{
- int64_t parsed_money;
- if (ParseMoney(ValueFromAmount(i64).getValStr(), parsed_money)) {
- assert(parsed_money == i64);
+ if (std::optional<CAmount> parsed = ParseMoney(ValueFromAmount(i64).getValStr())) {
+ assert(parsed.value() == i64);
}
}
if (i32 >= 0 && i32 <= 16) {
@@ -158,20 +154,6 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
const CKeyID key_id{u160};
const CScriptID script_id{u160};
- // CTxDestination = CNoDestination ∪ PKHash ∪ ScriptHash ∪ WitnessV0ScriptHash ∪ WitnessV0KeyHash ∪ WitnessUnknown
- const PKHash pk_hash{u160};
- const ScriptHash script_hash{u160};
- const WitnessV0KeyHash witness_v0_key_hash{u160};
- const WitnessV0ScriptHash witness_v0_script_hash{u256};
- const std::vector<CTxDestination> destinations{pk_hash, script_hash, witness_v0_key_hash, witness_v0_script_hash};
- const SigningProvider store;
- for (const CTxDestination& destination : destinations) {
- (void)DescribeAddress(destination);
- (void)EncodeDestination(destination);
- (void)GetKeyForDestination(store, destination);
- (void)GetScriptForDestination(destination);
- (void)IsValidDestination(destination);
- }
{
CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
diff --git a/src/test/fuzz/key_io.cpp b/src/test/fuzz/key_io.cpp
index 665ca01fa1..f58bf8b316 100644
--- a/src/test/fuzz/key_io.cpp
+++ b/src/test/fuzz/key_io.cpp
@@ -4,9 +4,6 @@
#include <chainparams.h>
#include <key_io.h>
-#include <rpc/util.h>
-#include <script/signingprovider.h>
-#include <script/standard.h>
#include <test/fuzz/fuzz.h>
#include <cassert>
@@ -39,12 +36,4 @@ FUZZ_TARGET_INIT(key_io, initialize_key_io)
if (ext_pub_key.pubkey.size() == CPubKey::COMPRESSED_SIZE) {
assert(ext_pub_key == DecodeExtPubKey(EncodeExtPubKey(ext_pub_key)));
}
-
- const CTxDestination tx_destination = DecodeDestination(random_string);
- (void)DescribeAddress(tx_destination);
- (void)GetKeyForDestination(/* store */ {}, tx_destination);
- (void)GetScriptForDestination(tx_destination);
- (void)IsValidDestination(tx_destination);
-
- (void)IsValidDestinationString(random_string);
}
diff --git a/src/test/fuzz/kitchen_sink.cpp b/src/test/fuzz/kitchen_sink.cpp
index 908e9a1c83..82f3a306c5 100644
--- a/src/test/fuzz/kitchen_sink.cpp
+++ b/src/test/fuzz/kitchen_sink.cpp
@@ -13,6 +13,7 @@
#include <array>
#include <cstdint>
+#include <optional>
#include <vector>
namespace {
@@ -46,11 +47,10 @@ FUZZ_TARGET(kitchen_sink)
const OutputType output_type = fuzzed_data_provider.PickValueInArray(OUTPUT_TYPES);
const std::string& output_type_string = FormatOutputType(output_type);
- OutputType output_type_parsed;
- const bool parsed = ParseOutputType(output_type_string, output_type_parsed);
+ const std::optional<OutputType> parsed = ParseOutputType(output_type_string);
assert(parsed);
- assert(output_type == output_type_parsed);
- (void)ParseOutputType(fuzzed_data_provider.ConsumeRandomLengthString(64), output_type_parsed);
+ assert(output_type == parsed.value());
+ (void)ParseOutputType(fuzzed_data_provider.ConsumeRandomLengthString(64));
const std::vector<uint8_t> bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider);
const std::vector<bool> bits = BytesToBits(bytes);
diff --git a/src/test/fuzz/multiplication_overflow.cpp b/src/test/fuzz/multiplication_overflow.cpp
index 0f054529a6..c7251650c2 100644
--- a/src/test/fuzz/multiplication_overflow.cpp
+++ b/src/test/fuzz/multiplication_overflow.cpp
@@ -2,6 +2,10 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#if defined(HAVE_CONFIG_H)
+#include <config/bitcoin-config.h>
+#endif
+
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -10,14 +14,6 @@
#include <string>
#include <vector>
-#if defined(__has_builtin)
-#if __has_builtin(__builtin_mul_overflow)
-#define HAVE_BUILTIN_MUL_OVERFLOW
-#endif
-#elif defined(__GNUC__)
-#define HAVE_BUILTIN_MUL_OVERFLOW
-#endif
-
namespace {
template <typename T>
void TestMultiplicationOverflow(FuzzedDataProvider& fuzzed_data_provider)
diff --git a/src/test/fuzz/net.cpp b/src/test/fuzz/net.cpp
index 20d8581312..ff0259c182 100644
--- a/src/test/fuzz/net.cpp
+++ b/src/test/fuzz/net.cpp
@@ -38,15 +38,12 @@ FUZZ_TARGET_INIT(net, initialize_net)
node.CloseSocketDisconnect();
},
[&] {
- node.MaybeSetAddrName(fuzzed_data_provider.ConsumeRandomLengthString(32));
- },
- [&] {
const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
if (!SanityCheckASMap(asmap)) {
return;
}
CNodeStats stats;
- node.copyStats(stats, asmap);
+ node.CopyStats(stats, asmap);
},
[&] {
const CNode* add_ref_node = node.AddRef();
@@ -82,7 +79,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
}
(void)node.GetAddrLocal();
- (void)node.GetAddrName();
(void)node.GetId();
(void)node.GetLocalNonce();
(void)node.GetLocalServices();
diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp
index f9d8129ca9..6cb81901cb 100644
--- a/src/test/fuzz/netaddress.cpp
+++ b/src/test/fuzz/netaddress.cpp
@@ -54,7 +54,7 @@ FUZZ_TARGET(netaddress)
(void)net_addr.IsRFC3927();
(void)net_addr.IsRFC3964();
if (net_addr.IsRFC4193()) {
- assert(net_addr.GetNetwork() == Network::NET_ONION || net_addr.GetNetwork() == Network::NET_INTERNAL || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ assert(net_addr.GetNetwork() == Network::NET_INTERNAL || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
}
(void)net_addr.IsRFC4380();
(void)net_addr.IsRFC4843();
diff --git a/src/test/fuzz/parse_numbers.cpp b/src/test/fuzz/parse_numbers.cpp
index 2c546e9b4a..69e58c3f63 100644
--- a/src/test/fuzz/parse_numbers.cpp
+++ b/src/test/fuzz/parse_numbers.cpp
@@ -12,8 +12,7 @@ FUZZ_TARGET(parse_numbers)
{
const std::string random_string(buffer.begin(), buffer.end());
- CAmount amount;
- (void)ParseMoney(random_string, amount);
+ (void)ParseMoney(random_string);
double d;
(void)ParseDouble(random_string, &d);
diff --git a/src/test/fuzz/prevector.cpp b/src/test/fuzz/prevector.cpp
index 51956bbe9e..d4b3ed501f 100644
--- a/src/test/fuzz/prevector.cpp
+++ b/src/test/fuzz/prevector.cpp
@@ -209,7 +209,8 @@ FUZZ_TARGET(prevector)
FuzzedDataProvider prov(buffer.data(), buffer.size());
prevector_tester<8, int> test;
- while (prov.remaining_bytes()) {
+ LIMITED_WHILE(prov.remaining_bytes(), 3000)
+ {
switch (prov.ConsumeIntegralInRange<int>(0, 13 + 3 * (test.size() > 0))) {
case 0:
test.insert(prov.ConsumeIntegralInRange<size_t>(0, test.size()), prov.ConsumeIntegral<int>());
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index c4e4d4c785..7b99193ad0 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -58,19 +58,7 @@ void initialize_process_message()
static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>();
g_setup = testing_setup.get();
-
- // Temporary debug for https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=35027
- {
- LOCK(::cs_main);
- assert(CheckDiskSpace(gArgs.GetDataDirNet()));
- assert(CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * g_setup->m_node.chainman->ActiveChainstate().CoinsTip().GetCacheSize()));
- }
for (int i = 0; i < 2 * COINBASE_MATURITY; i++) {
- {
- LOCK(::cs_main);
- assert(CheckDiskSpace(gArgs.GetDataDirNet()));
- assert(CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * g_setup->m_node.chainman->ActiveChainstate().CoinsTip().GetCacheSize()));
- }
MineBlock(g_setup->m_node, CScript() << OP_TRUE);
}
SyncWithValidationInterfaceQueue();
diff --git a/src/test/fuzz/rolling_bloom_filter.cpp b/src/test/fuzz/rolling_bloom_filter.cpp
index 07059cce76..b9ed497e68 100644
--- a/src/test/fuzz/rolling_bloom_filter.cpp
+++ b/src/test/fuzz/rolling_bloom_filter.cpp
@@ -21,7 +21,8 @@ FUZZ_TARGET(rolling_bloom_filter)
CRollingBloomFilter rolling_bloom_filter{
fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, 1000),
0.999 / fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, std::numeric_limits<unsigned int>::max())};
- while (fuzzed_data_provider.remaining_bytes() > 0) {
+ LIMITED_WHILE(fuzzed_data_provider.remaining_bytes() > 0, 3000)
+ {
CallOneOf(
fuzzed_data_provider,
[&] {
@@ -32,13 +33,10 @@ FUZZ_TARGET(rolling_bloom_filter)
assert(present);
},
[&] {
- const std::optional<uint256> u256 = ConsumeDeserializable<uint256>(fuzzed_data_provider);
- if (!u256) {
- return;
- }
- (void)rolling_bloom_filter.contains(*u256);
- rolling_bloom_filter.insert(*u256);
- const bool present = rolling_bloom_filter.contains(*u256);
+ const uint256 u256{ConsumeUInt256(fuzzed_data_provider)};
+ (void)rolling_bloom_filter.contains(u256);
+ rolling_bloom_filter.insert(u256);
+ const bool present = rolling_bloom_filter.contains(u256);
assert(present);
},
[&] {
diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp
index b87bcf2ef5..950ee45d1d 100644
--- a/src/test/fuzz/script.cpp
+++ b/src/test/fuzz/script.cpp
@@ -6,8 +6,10 @@
#include <compressor.h>
#include <core_io.h>
#include <core_memusage.h>
+#include <key_io.h>
#include <policy/policy.h>
#include <pubkey.h>
+#include <rpc/util.h>
#include <script/descriptor.h>
#include <script/interpreter.h>
#include <script/script.h>
@@ -184,26 +186,26 @@ FUZZ_TARGET_INIT(script, initialize_script)
}
{
- WitnessUnknown witness_unknown_1{};
- witness_unknown_1.version = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
- const std::vector<uint8_t> witness_unknown_program_1 = fuzzed_data_provider.ConsumeBytes<uint8_t>(40);
- witness_unknown_1.length = witness_unknown_program_1.size();
- std::copy(witness_unknown_program_1.begin(), witness_unknown_program_1.end(), witness_unknown_1.program);
-
- WitnessUnknown witness_unknown_2{};
- witness_unknown_2.version = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
- const std::vector<uint8_t> witness_unknown_program_2 = fuzzed_data_provider.ConsumeBytes<uint8_t>(40);
- witness_unknown_2.length = witness_unknown_program_2.size();
- std::copy(witness_unknown_program_2.begin(), witness_unknown_program_2.end(), witness_unknown_2.program);
-
- (void)(witness_unknown_1 == witness_unknown_2);
- (void)(witness_unknown_1 < witness_unknown_2);
- }
+ const CTxDestination tx_destination_1{
+ fuzzed_data_provider.ConsumeBool() ?
+ DecodeDestination(fuzzed_data_provider.ConsumeRandomLengthString()) :
+ ConsumeTxDestination(fuzzed_data_provider)};
+ const CTxDestination tx_destination_2{ConsumeTxDestination(fuzzed_data_provider)};
+ const std::string encoded_dest{EncodeDestination(tx_destination_1)};
+ const UniValue json_dest{DescribeAddress(tx_destination_1)};
+ Assert(tx_destination_1 == DecodeDestination(encoded_dest));
+ (void)GetKeyForDestination(/* store */ {}, tx_destination_1);
+ const CScript dest{GetScriptForDestination(tx_destination_1)};
+ const bool valid{IsValidDestination(tx_destination_1)};
+ Assert(dest.empty() != valid);
+
+ Assert(valid == IsValidDestinationString(encoded_dest));
- {
- const CTxDestination tx_destination_1 = ConsumeTxDestination(fuzzed_data_provider);
- const CTxDestination tx_destination_2 = ConsumeTxDestination(fuzzed_data_provider);
- (void)(tx_destination_1 == tx_destination_2);
(void)(tx_destination_1 < tx_destination_2);
+ if (tx_destination_1 == tx_destination_2) {
+ Assert(encoded_dest == EncodeDestination(tx_destination_2));
+ Assert(json_dest.write() == DescribeAddress(tx_destination_2).write());
+ Assert(dest == GetScriptForDestination(tx_destination_2));
+ }
}
}
diff --git a/src/test/fuzz/script_sign.cpp b/src/test/fuzz/script_sign.cpp
index fe850a6959..684324c36e 100644
--- a/src/test/fuzz/script_sign.cpp
+++ b/src/test/fuzz/script_sign.cpp
@@ -13,6 +13,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <util/translation.h>
#include <cassert>
#include <cstdint>
@@ -135,7 +136,7 @@ FUZZ_TARGET_INIT(script_sign, initialize_script_sign)
}
coins[*outpoint] = *coin;
}
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
(void)SignTransaction(sign_transaction_tx_to, &provider, coins, fuzzed_data_provider.ConsumeIntegral<int>(), input_errors);
}
}
diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp
index 286375f7ae..0c1b45b86c 100644
--- a/src/test/fuzz/string.cpp
+++ b/src/test/fuzz/string.cpp
@@ -66,8 +66,7 @@ FUZZ_TARGET(string)
(void)ParseNonRFCJSONValue(random_string_1);
} catch (const std::runtime_error&) {
}
- OutputType output_type;
- (void)ParseOutputType(random_string_1, output_type);
+ (void)ParseOutputType(random_string_1);
(void)RemovePrefix(random_string_1, random_string_2);
(void)ResolveErrMsg(random_string_1, random_string_2);
try {
diff --git a/src/test/fuzz/system.cpp b/src/test/fuzz/system.cpp
index b25dcfcd3b..0f53939eac 100644
--- a/src/test/fuzz/system.cpp
+++ b/src/test/fuzz/system.cpp
@@ -31,7 +31,8 @@ FUZZ_TARGET(system)
SetupHelpOptions(args_manager);
}
- while (fuzzed_data_provider.ConsumeBool()) {
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 3000)
+ {
CallOneOf(
fuzzed_data_provider,
[&] {
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index bab34ea340..6201cc813c 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -142,7 +142,8 @@ FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
return c.out.nValue;
};
- while (fuzzed_data_provider.ConsumeBool()) {
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
+ {
{
// Total supply is the mempool fee + all outpoints
CAmount supply_now{WITH_LOCK(tx_pool.cs, return tx_pool.GetTotalFee())};
@@ -305,7 +306,8 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
CTxMemPool tx_pool_{/* estimator */ nullptr, /* check_ratio */ 1};
MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
- while (fuzzed_data_provider.ConsumeBool()) {
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
+ {
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
if (fuzzed_data_provider.ConsumeBool()) {
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index bcf0b0ce72..0d87f687d3 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <pubkey.h>
#include <test/fuzz/util.h>
#include <test/util/script.h>
#include <util/rbf.h>
@@ -304,3 +305,196 @@ uint32_t ConsumeSequence(FuzzedDataProvider& fuzzed_data_provider) noexcept
}) :
fuzzed_data_provider.ConsumeIntegral<uint32_t>();
}
+
+CTxDestination ConsumeTxDestination(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ CTxDestination tx_destination;
+ const size_t call_size{CallOneOf(
+ fuzzed_data_provider,
+ [&] {
+ tx_destination = CNoDestination{};
+ },
+ [&] {
+ tx_destination = PKHash{ConsumeUInt160(fuzzed_data_provider)};
+ },
+ [&] {
+ tx_destination = ScriptHash{ConsumeUInt160(fuzzed_data_provider)};
+ },
+ [&] {
+ tx_destination = WitnessV0ScriptHash{ConsumeUInt256(fuzzed_data_provider)};
+ },
+ [&] {
+ tx_destination = WitnessV0KeyHash{ConsumeUInt160(fuzzed_data_provider)};
+ },
+ [&] {
+ tx_destination = WitnessV1Taproot{XOnlyPubKey{ConsumeUInt256(fuzzed_data_provider)}};
+ },
+ [&] {
+ WitnessUnknown witness_unknown{};
+ witness_unknown.version = fuzzed_data_provider.ConsumeIntegralInRange(2, 16);
+ std::vector<uint8_t> witness_unknown_program_1{fuzzed_data_provider.ConsumeBytes<uint8_t>(40)};
+ if (witness_unknown_program_1.size() < 2) {
+ witness_unknown_program_1 = {0, 0};
+ }
+ witness_unknown.length = witness_unknown_program_1.size();
+ std::copy(witness_unknown_program_1.begin(), witness_unknown_program_1.end(), witness_unknown.program);
+ tx_destination = witness_unknown;
+ })};
+ Assert(call_size == std::variant_size_v<CTxDestination>);
+ return tx_destination;
+}
+
+CTxMemPoolEntry ConsumeTxMemPoolEntry(FuzzedDataProvider& fuzzed_data_provider, const CTransaction& tx) noexcept
+{
+ // Avoid:
+ // policy/feerate.cpp:28:34: runtime error: signed integer overflow: 34873208148477500 * 1000 cannot be represented in type 'long'
+ //
+ // Reproduce using CFeeRate(348732081484775, 10).GetFeePerK()
+ const CAmount fee = std::min<CAmount>(ConsumeMoney(fuzzed_data_provider), std::numeric_limits<CAmount>::max() / static_cast<CAmount>(100000));
+ assert(MoneyRange(fee));
+ const int64_t time = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ const unsigned int entry_height = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
+ const bool spends_coinbase = fuzzed_data_provider.ConsumeBool();
+ const unsigned int sig_op_cost = fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(0, MAX_BLOCK_SIGOPS_COST);
+ return CTxMemPoolEntry{MakeTransactionRef(tx), fee, time, entry_height, spends_coinbase, sig_op_cost, {}};
+}
+
+bool ContainsSpentInput(const CTransaction& tx, const CCoinsViewCache& inputs) noexcept
+{
+ for (const CTxIn& tx_in : tx.vin) {
+ const Coin& coin = inputs.AccessCoin(tx_in.prevout);
+ if (coin.IsSpent()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION});
+ CNetAddr net_addr;
+ if (network == Network::NET_IPV4) {
+ in_addr v4_addr = {};
+ v4_addr.s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
+ net_addr = CNetAddr{v4_addr};
+ } else if (network == Network::NET_IPV6) {
+ if (fuzzed_data_provider.remaining_bytes() >= 16) {
+ in6_addr v6_addr = {};
+ memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16);
+ net_addr = CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
+ }
+ } else if (network == Network::NET_INTERNAL) {
+ net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
+ } else if (network == Network::NET_ONION) {
+ net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32));
+ }
+ return net_addr;
+}
+
+FILE* FuzzedFileProvider::open()
+{
+ SetFuzzedErrNo(m_fuzzed_data_provider);
+ if (m_fuzzed_data_provider.ConsumeBool()) {
+ return nullptr;
+ }
+ std::string mode;
+ CallOneOf(
+ m_fuzzed_data_provider,
+ [&] {
+ mode = "r";
+ },
+ [&] {
+ mode = "r+";
+ },
+ [&] {
+ mode = "w";
+ },
+ [&] {
+ mode = "w+";
+ },
+ [&] {
+ mode = "a";
+ },
+ [&] {
+ mode = "a+";
+ });
+#if defined _GNU_SOURCE && !defined __ANDROID__
+ const cookie_io_functions_t io_hooks = {
+ FuzzedFileProvider::read,
+ FuzzedFileProvider::write,
+ FuzzedFileProvider::seek,
+ FuzzedFileProvider::close,
+ };
+ return fopencookie(this, mode.c_str(), io_hooks);
+#else
+ (void)mode;
+ return nullptr;
+#endif
+}
+
+ssize_t FuzzedFileProvider::read(void* cookie, char* buf, size_t size)
+{
+ FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
+ SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
+ if (buf == nullptr || size == 0 || fuzzed_file->m_fuzzed_data_provider.ConsumeBool()) {
+ return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
+ }
+ const std::vector<uint8_t> random_bytes = fuzzed_file->m_fuzzed_data_provider.ConsumeBytes<uint8_t>(size);
+ if (random_bytes.empty()) {
+ return 0;
+ }
+ std::memcpy(buf, random_bytes.data(), random_bytes.size());
+ if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)random_bytes.size())) {
+ return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
+ }
+ fuzzed_file->m_offset += random_bytes.size();
+ return random_bytes.size();
+}
+
+ssize_t FuzzedFileProvider::write(void* cookie, const char* buf, size_t size)
+{
+ FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
+ SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
+ const ssize_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(0, size);
+ if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)n)) {
+ return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
+ }
+ fuzzed_file->m_offset += n;
+ return n;
+}
+
+int FuzzedFileProvider::seek(void* cookie, int64_t* offset, int whence)
+{
+ assert(whence == SEEK_SET || whence == SEEK_CUR || whence == SEEK_END);
+ FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
+ SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
+ int64_t new_offset = 0;
+ if (whence == SEEK_SET) {
+ new_offset = *offset;
+ } else if (whence == SEEK_CUR) {
+ if (AdditionOverflow(fuzzed_file->m_offset, *offset)) {
+ return -1;
+ }
+ new_offset = fuzzed_file->m_offset + *offset;
+ } else if (whence == SEEK_END) {
+ const int64_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 4096);
+ if (AdditionOverflow(n, *offset)) {
+ return -1;
+ }
+ new_offset = n + *offset;
+ }
+ if (new_offset < 0) {
+ return -1;
+ }
+ fuzzed_file->m_offset = new_offset;
+ *offset = new_offset;
+ return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0);
+}
+
+int FuzzedFileProvider::close(void* cookie)
+{
+ FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
+ SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
+ return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0);
+}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 023dcdb3e5..bb017b3497 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -37,7 +37,7 @@
#include <vector>
template <typename... Callables>
-void CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables)
+size_t CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables)
{
constexpr size_t call_size{sizeof...(callables)};
static_assert(call_size >= 1);
@@ -45,6 +45,7 @@ void CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables)
size_t i{0};
((i++ == call_index ? callables() : void()), ...);
+ return call_size;
}
template <typename Collection>
@@ -163,51 +164,9 @@ template <typename WeakEnumType, size_t size>
return UintToArith256(ConsumeUInt256(fuzzed_data_provider));
}
-[[nodiscard]] inline CTxMemPoolEntry ConsumeTxMemPoolEntry(FuzzedDataProvider& fuzzed_data_provider, const CTransaction& tx) noexcept
-{
- // Avoid:
- // policy/feerate.cpp:28:34: runtime error: signed integer overflow: 34873208148477500 * 1000 cannot be represented in type 'long'
- //
- // Reproduce using CFeeRate(348732081484775, 10).GetFeePerK()
- const CAmount fee = std::min<CAmount>(ConsumeMoney(fuzzed_data_provider), std::numeric_limits<CAmount>::max() / static_cast<CAmount>(100000));
- assert(MoneyRange(fee));
- const int64_t time = fuzzed_data_provider.ConsumeIntegral<int64_t>();
- const unsigned int entry_height = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
- const bool spends_coinbase = fuzzed_data_provider.ConsumeBool();
- const unsigned int sig_op_cost = fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(0, MAX_BLOCK_SIGOPS_COST);
- return CTxMemPoolEntry{MakeTransactionRef(tx), fee, time, entry_height, spends_coinbase, sig_op_cost, {}};
-}
+[[nodiscard]] CTxMemPoolEntry ConsumeTxMemPoolEntry(FuzzedDataProvider& fuzzed_data_provider, const CTransaction& tx) noexcept;
-[[nodiscard]] inline CTxDestination ConsumeTxDestination(FuzzedDataProvider& fuzzed_data_provider) noexcept
-{
- CTxDestination tx_destination;
- CallOneOf(
- fuzzed_data_provider,
- [&] {
- tx_destination = CNoDestination{};
- },
- [&] {
- tx_destination = PKHash{ConsumeUInt160(fuzzed_data_provider)};
- },
- [&] {
- tx_destination = ScriptHash{ConsumeUInt160(fuzzed_data_provider)};
- },
- [&] {
- tx_destination = WitnessV0ScriptHash{ConsumeUInt256(fuzzed_data_provider)};
- },
- [&] {
- tx_destination = WitnessV0KeyHash{ConsumeUInt160(fuzzed_data_provider)};
- },
- [&] {
- WitnessUnknown witness_unknown{};
- witness_unknown.version = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
- const std::vector<uint8_t> witness_unknown_program_1 = fuzzed_data_provider.ConsumeBytes<uint8_t>(40);
- witness_unknown.length = witness_unknown_program_1.size();
- std::copy(witness_unknown_program_1.begin(), witness_unknown_program_1.end(), witness_unknown.program);
- tx_destination = witness_unknown;
- });
- return tx_destination;
-}
+[[nodiscard]] CTxDestination ConsumeTxDestination(FuzzedDataProvider& fuzzed_data_provider) noexcept;
template <typename T>
[[nodiscard]] bool MultiplicationOverflow(const T i, const T j) noexcept
@@ -243,16 +202,7 @@ template <class T>
return std::numeric_limits<T>::max() - i < j;
}
-[[nodiscard]] inline bool ContainsSpentInput(const CTransaction& tx, const CCoinsViewCache& inputs) noexcept
-{
- for (const CTxIn& tx_in : tx.vin) {
- const Coin& coin = inputs.AccessCoin(tx_in.prevout);
- if (coin.IsSpent()) {
- return true;
- }
- }
- return false;
-}
+[[nodiscard]] bool ContainsSpentInput(const CTransaction& tx, const CCoinsViewCache& inputs) noexcept;
/**
* Sets errno to a value selected from the given std::array `errnos`.
@@ -287,27 +237,7 @@ inline void SetFuzzedErrNo(FuzzedDataProvider& fuzzed_data_provider) noexcept
return result;
}
-inline CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept
-{
- const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION});
- CNetAddr net_addr;
- if (network == Network::NET_IPV4) {
- in_addr v4_addr = {};
- v4_addr.s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
- net_addr = CNetAddr{v4_addr};
- } else if (network == Network::NET_IPV6) {
- if (fuzzed_data_provider.remaining_bytes() >= 16) {
- in6_addr v6_addr = {};
- memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16);
- net_addr = CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
- }
- } else if (network == Network::NET_INTERNAL) {
- net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
- } else if (network == Network::NET_ONION) {
- net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32));
- }
- return net_addr;
-}
+CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept;
inline CSubNet ConsumeSubNet(FuzzedDataProvider& fuzzed_data_provider) noexcept
{
@@ -357,112 +287,15 @@ public:
{
}
- FILE* open()
- {
- SetFuzzedErrNo(m_fuzzed_data_provider);
- if (m_fuzzed_data_provider.ConsumeBool()) {
- return nullptr;
- }
- std::string mode;
- CallOneOf(
- m_fuzzed_data_provider,
- [&] {
- mode = "r";
- },
- [&] {
- mode = "r+";
- },
- [&] {
- mode = "w";
- },
- [&] {
- mode = "w+";
- },
- [&] {
- mode = "a";
- },
- [&] {
- mode = "a+";
- });
-#if defined _GNU_SOURCE && !defined __ANDROID__
- const cookie_io_functions_t io_hooks = {
- FuzzedFileProvider::read,
- FuzzedFileProvider::write,
- FuzzedFileProvider::seek,
- FuzzedFileProvider::close,
- };
- return fopencookie(this, mode.c_str(), io_hooks);
-#else
- (void)mode;
- return nullptr;
-#endif
- }
+ FILE* open();
- static ssize_t read(void* cookie, char* buf, size_t size)
- {
- FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
- SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
- if (buf == nullptr || size == 0 || fuzzed_file->m_fuzzed_data_provider.ConsumeBool()) {
- return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
- }
- const std::vector<uint8_t> random_bytes = fuzzed_file->m_fuzzed_data_provider.ConsumeBytes<uint8_t>(size);
- if (random_bytes.empty()) {
- return 0;
- }
- std::memcpy(buf, random_bytes.data(), random_bytes.size());
- if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)random_bytes.size())) {
- return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
- }
- fuzzed_file->m_offset += random_bytes.size();
- return random_bytes.size();
- }
+ static ssize_t read(void* cookie, char* buf, size_t size);
- static ssize_t write(void* cookie, const char* buf, size_t size)
- {
- FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
- SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
- const ssize_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(0, size);
- if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)n)) {
- return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
- }
- fuzzed_file->m_offset += n;
- return n;
- }
+ static ssize_t write(void* cookie, const char* buf, size_t size);
- static int seek(void* cookie, int64_t* offset, int whence)
- {
- assert(whence == SEEK_SET || whence == SEEK_CUR || whence == SEEK_END);
- FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
- SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
- int64_t new_offset = 0;
- if (whence == SEEK_SET) {
- new_offset = *offset;
- } else if (whence == SEEK_CUR) {
- if (AdditionOverflow(fuzzed_file->m_offset, *offset)) {
- return -1;
- }
- new_offset = fuzzed_file->m_offset + *offset;
- } else if (whence == SEEK_END) {
- const int64_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 4096);
- if (AdditionOverflow(n, *offset)) {
- return -1;
- }
- new_offset = n + *offset;
- }
- if (new_offset < 0) {
- return -1;
- }
- fuzzed_file->m_offset = new_offset;
- *offset = new_offset;
- return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0);
- }
+ static int seek(void* cookie, int64_t* offset, int whence);
- static int close(void* cookie)
- {
- FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
- SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
- return fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<int>(-1, 0);
- }
+ static int close(void* cookie);
};
[[nodiscard]] inline FuzzedFileProvider ConsumeFile(FuzzedDataProvider& fuzzed_data_provider) noexcept
diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp
index e99c6e0fc8..e2e31c62d7 100644
--- a/src/test/logging_tests.cpp
+++ b/src/test/logging_tests.cpp
@@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(logging_timer)
SetMockTime(1);
auto micro_timer = BCLog::Timer<std::chrono::microseconds>("tests", "end_msg");
SetMockTime(2);
- BOOST_CHECK_EQUAL(micro_timer.LogMsg("test micros"), "tests: test micros (1000000.00μs)");
+ BOOST_CHECK_EQUAL(micro_timer.LogMsg("test micros"), "tests: test micros (1000000μs)");
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index e20c5e4e8f..7f44dcf20e 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2020 The Bitcoin Core developers
+// Copyright (c) 2011-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -16,6 +16,7 @@
#include <util/system.h>
#include <util/time.h>
#include <validation.h>
+#include <versionbits.h>
#include <test/util/setup_common.h>
@@ -51,36 +52,25 @@ BlockAssembler MinerTestingSetup::AssemblerForTest(const CChainParams& params)
constexpr static struct {
unsigned char extranonce;
unsigned int nonce;
-} blockinfo[] = {
- {4, 0xa4a3e223}, {2, 0x15c32f9e}, {1, 0x0375b547}, {1, 0x7004a8a5},
- {2, 0xce440296}, {2, 0x52cfe198}, {1, 0x77a72cd0}, {2, 0xbb5d6f84},
- {2, 0x83f30c2c}, {1, 0x48a73d5b}, {1, 0xef7dcd01}, {2, 0x6809c6c4},
- {2, 0x0883ab3c}, {1, 0x087bbbe2}, {2, 0x2104a814}, {2, 0xdffb6daa},
- {1, 0xee8a0a08}, {2, 0xba4237c1}, {1, 0xa70349dc}, {1, 0x344722bb},
- {3, 0xd6294733}, {2, 0xec9f5c94}, {2, 0xca2fbc28}, {1, 0x6ba4f406},
- {2, 0x015d4532}, {1, 0x6e119b7c}, {2, 0x43e8f314}, {2, 0x27962f38},
- {2, 0xb571b51b}, {2, 0xb36bee23}, {2, 0xd17924a8}, {2, 0x6bc212d9},
- {1, 0x630d4948}, {2, 0x9a4c4ebb}, {2, 0x554be537}, {1, 0xd63ddfc7},
- {2, 0xa10acc11}, {1, 0x759a8363}, {2, 0xfb73090d}, {1, 0xe82c6a34},
- {1, 0xe33e92d7}, {3, 0x658ef5cb}, {2, 0xba32ff22}, {5, 0x0227a10c},
- {1, 0xa9a70155}, {5, 0xd096d809}, {1, 0x37176174}, {1, 0x830b8d0f},
- {1, 0xc6e3910e}, {2, 0x823f3ca8}, {1, 0x99850849}, {1, 0x7521fb81},
- {1, 0xaacaabab}, {1, 0xd645a2eb}, {5, 0x7aea1781}, {5, 0x9d6e4b78},
- {1, 0x4ce90fd8}, {1, 0xabdc832d}, {6, 0x4a34f32a}, {2, 0xf2524c1c},
- {2, 0x1bbeb08a}, {1, 0xad47f480}, {1, 0x9f026aeb}, {1, 0x15a95049},
- {2, 0xd1cb95b2}, {2, 0xf84bbda5}, {1, 0x0fa62cd1}, {1, 0xe05f9169},
- {1, 0x78d194a9}, {5, 0x3e38147b}, {5, 0x737ba0d4}, {1, 0x63378e10},
- {1, 0x6d5f91cf}, {2, 0x88612eb8}, {2, 0xe9639484}, {1, 0xb7fabc9d},
- {2, 0x19b01592}, {1, 0x5a90dd31}, {2, 0x5bd7e028}, {2, 0x94d00323},
- {1, 0xa9b9c01a}, {1, 0x3a40de61}, {1, 0x56e7eec7}, {5, 0x859f7ef6},
- {1, 0xfd8e5630}, {1, 0x2b0c9f7f}, {1, 0xba700e26}, {1, 0x7170a408},
- {1, 0x70de86a8}, {1, 0x74d64cd5}, {1, 0x49e738a1}, {2, 0x6910b602},
- {0, 0x643c565f}, {1, 0x54264b3f}, {2, 0x97ea6396}, {2, 0x55174459},
- {2, 0x03e8779a}, {1, 0x98f34d8f}, {1, 0xc07b2b07}, {1, 0xdfe29668},
- {1, 0x3141c7c1}, {1, 0xb3b595f4}, {1, 0x735abf08}, {5, 0x623bfbce},
- {2, 0xd351e722}, {1, 0xf4ca48c9}, {1, 0x5b19c670}, {1, 0xa164bf0e},
- {2, 0xbbbeb305}, {2, 0xfe1c810a},
-};
+} BLOCKINFO[]{{8, 582909131}, {0, 971462344}, {2, 1169481553}, {6, 66147495}, {7, 427785981}, {8, 80538907},
+ {8, 207348013}, {2, 1951240923}, {4, 215054351}, {1, 491520534}, {8, 1282281282}, {4, 639565734},
+ {3, 248274685}, {8, 1160085976}, {6, 396349768}, {5, 393780549}, {5, 1096899528}, {4, 965381630},
+ {0, 728758712}, {5, 318638310}, {3, 164591898}, {2, 274234550}, {2, 254411237}, {7, 561761812},
+ {2, 268342573}, {0, 402816691}, {1, 221006382}, {6, 538872455}, {7, 393315655}, {4, 814555937},
+ {7, 504879194}, {6, 467769648}, {3, 925972193}, {2, 200581872}, {3, 168915404}, {8, 430446262},
+ {5, 773507406}, {3, 1195366164}, {0, 433361157}, {3, 297051771}, {0, 558856551}, {2, 501614039},
+ {3, 528488272}, {2, 473587734}, {8, 230125274}, {2, 494084400}, {4, 357314010}, {8, 60361686},
+ {7, 640624687}, {3, 480441695}, {8, 1424447925}, {4, 752745419}, {1, 288532283}, {6, 669170574},
+ {5, 1900907591}, {3, 555326037}, {3, 1121014051}, {0, 545835650}, {8, 189196651}, {5, 252371575},
+ {0, 199163095}, {6, 558895874}, {6, 1656839784}, {6, 815175452}, {6, 718677851}, {5, 544000334},
+ {0, 340113484}, {6, 850744437}, {4, 496721063}, {8, 524715182}, {6, 574361898}, {6, 1642305743},
+ {6, 355110149}, {5, 1647379658}, {8, 1103005356}, {7, 556460625}, {3, 1139533992}, {5, 304736030},
+ {2, 361539446}, {2, 143720360}, {6, 201939025}, {7, 423141476}, {4, 574633709}, {3, 1412254823},
+ {4, 873254135}, {0, 341817335}, {6, 53501687}, {3, 179755410}, {5, 172209688}, {8, 516810279},
+ {4, 1228391489}, {8, 325372589}, {6, 550367589}, {0, 876291812}, {7, 412454120}, {7, 717202854},
+ {2, 222677843}, {6, 251778867}, {7, 842004420}, {7, 194762829}, {4, 96668841}, {1, 925485796},
+ {0, 792342903}, {6, 678455063}, {6, 773251385}, {5, 186617471}, {6, 883189502}, {7, 396077336},
+ {8, 254702874}, {0, 455592851}};
static CBlockIndex CreateBlockIndex(int nHeight, CBlockIndex* active_chain_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
@@ -220,20 +210,18 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
// We can't make transactions until we have inputs
// Therefore, load 110 blocks :)
- static_assert(std::size(blockinfo) == 110, "Should have 110 blocks to import");
+ static_assert(std::size(BLOCKINFO) == 110, "Should have 110 blocks to import");
int baseheight = 0;
std::vector<CTransactionRef> txFirst;
- for (const auto& bi : blockinfo) {
+ for (const auto& bi : BLOCKINFO) {
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
{
LOCK(cs_main);
- pblock->nVersion = 1;
+ pblock->nVersion = VERSIONBITS_TOP_BITS;
pblock->nTime = m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1;
CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.nVersion = 1;
- txCoinbase.vin[0].scriptSig = CScript();
- txCoinbase.vin[0].scriptSig.push_back(bi.extranonce);
- txCoinbase.vin[0].scriptSig.push_back(m_node.chainman->ActiveChain().Height());
+ txCoinbase.vin[0].scriptSig = CScript{} << (m_node.chainman->ActiveChain().Height() + 1) << bi.extranonce;
txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
txCoinbase.vout[0].scriptPubKey = CScript();
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
diff --git a/src/test/net_peer_eviction_tests.cpp b/src/test/net_peer_eviction_tests.cpp
index 4bfd487b86..5eb280b498 100644
--- a/src/test/net_peer_eviction_tests.cpp
+++ b/src/test/net_peer_eviction_tests.cpp
@@ -17,28 +17,6 @@
BOOST_FIXTURE_TEST_SUITE(net_peer_eviction_tests, BasicTestingSetup)
-std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_candidates, FastRandomContext& random_context)
-{
- std::vector<NodeEvictionCandidate> candidates;
- for (int id = 0; id < n_candidates; ++id) {
- candidates.push_back({
- /* id */ id,
- /* nTimeConnected */ static_cast<int64_t>(random_context.randrange(100)),
- /* m_min_ping_time */ std::chrono::microseconds{random_context.randrange(100)},
- /* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)),
- /* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)),
- /* fRelevantServices */ random_context.randbool(),
- /* fRelayTxes */ random_context.randbool(),
- /* fBloomFilter */ random_context.randbool(),
- /* nKeyedNetGroup */ random_context.randrange(100),
- /* prefer_evict */ random_context.randbool(),
- /* m_is_local */ random_context.randbool(),
- /* m_network */ ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
- });
- }
- return candidates;
-}
-
// Create `num_peers` random nodes, apply setup function `candidate_setup_fn`,
// call ProtectEvictionCandidatesByRatio() to apply protection logic, and then
// return true if all of `protected_peer_ids` and none of `unprotected_peer_ids`
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 46f88c1282..29938d4ede 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -2,8 +2,6 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <addrdb.h>
-#include <addrman.h>
#include <chainparams.h>
#include <clientversion.h>
#include <cstdint>
@@ -29,64 +27,6 @@
using namespace std::literals;
-class CAddrManSerializationMock : public CAddrMan
-{
-public:
- virtual void Serialize(CDataStream& s) const = 0;
-
- //! Ensure that bucket placement is always the same for testing purposes.
- void MakeDeterministic()
- {
- nKey.SetNull();
- insecure_rand = FastRandomContext(true);
- }
-};
-
-class CAddrManUncorrupted : public CAddrManSerializationMock
-{
-public:
- void Serialize(CDataStream& s) const override
- {
- CAddrMan::Serialize(s);
- }
-};
-
-class CAddrManCorrupted : public CAddrManSerializationMock
-{
-public:
- void Serialize(CDataStream& s) const override
- {
- // Produces corrupt output that claims addrman has 20 addrs when it only has one addr.
- unsigned char nVersion = 1;
- s << nVersion;
- s << ((unsigned char)32);
- s << nKey;
- s << 10; // nNew
- s << 10; // nTried
-
- int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30);
- s << nUBuckets;
-
- CService serv;
- BOOST_CHECK(Lookup("252.1.1.1", serv, 7777, false));
- CAddress addr = CAddress(serv, NODE_NONE);
- CNetAddr resolved;
- BOOST_CHECK(LookupHost("252.2.2.2", resolved, false));
- CAddrInfo info = CAddrInfo(addr, resolved);
- s << info;
- }
-};
-
-static CDataStream AddrmanToStream(const CAddrManSerializationMock& _addrman)
-{
- CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION);
- ssPeersIn << Params().MessageStart();
- ssPeersIn << _addrman;
- std::string str = ssPeersIn.str();
- std::vector<unsigned char> vchData(str.begin(), str.end());
- return CDataStream(vchData, SER_DISK, CLIENT_VERSION);
-}
-
BOOST_FIXTURE_TEST_SUITE(net_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(cnode_listen_port)
@@ -101,82 +41,6 @@ BOOST_AUTO_TEST_CASE(cnode_listen_port)
BOOST_CHECK(port == altPort);
}
-BOOST_AUTO_TEST_CASE(caddrdb_read)
-{
- CAddrManUncorrupted addrmanUncorrupted;
- addrmanUncorrupted.MakeDeterministic();
-
- CService addr1, addr2, addr3;
- BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false));
- BOOST_CHECK(Lookup("250.7.2.2", addr2, 9999, false));
- BOOST_CHECK(Lookup("250.7.3.3", addr3, 9999, false));
- BOOST_CHECK(Lookup("250.7.3.3"s, addr3, 9999, false));
- BOOST_CHECK(!Lookup("250.7.3.3\0example.com"s, addr3, 9999, false));
-
- // Add three addresses to new table.
- CService source;
- BOOST_CHECK(Lookup("252.5.1.1", source, 8333, false));
- BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr1, NODE_NONE), source));
- BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr2, NODE_NONE), source));
- BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr3, NODE_NONE), source));
-
- // Test that the de-serialization does not throw an exception.
- CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted);
- bool exceptionThrown = false;
- CAddrMan addrman1;
-
- BOOST_CHECK(addrman1.size() == 0);
- try {
- unsigned char pchMsgTmp[4];
- ssPeers1 >> pchMsgTmp;
- ssPeers1 >> addrman1;
- } catch (const std::exception&) {
- exceptionThrown = true;
- }
-
- BOOST_CHECK(addrman1.size() == 3);
- BOOST_CHECK(exceptionThrown == false);
-
- // Test that CAddrDB::Read creates an addrman with the correct number of addrs.
- CDataStream ssPeers2 = AddrmanToStream(addrmanUncorrupted);
-
- CAddrMan addrman2;
- BOOST_CHECK(addrman2.size() == 0);
- BOOST_CHECK(CAddrDB::Read(addrman2, ssPeers2));
- BOOST_CHECK(addrman2.size() == 3);
-}
-
-
-BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted)
-{
- CAddrManCorrupted addrmanCorrupted;
- addrmanCorrupted.MakeDeterministic();
-
- // Test that the de-serialization of corrupted addrman throws an exception.
- CDataStream ssPeers1 = AddrmanToStream(addrmanCorrupted);
- bool exceptionThrown = false;
- CAddrMan addrman1;
- BOOST_CHECK(addrman1.size() == 0);
- try {
- unsigned char pchMsgTmp[4];
- ssPeers1 >> pchMsgTmp;
- ssPeers1 >> addrman1;
- } catch (const std::exception&) {
- exceptionThrown = true;
- }
- // Even through de-serialization failed addrman is not left in a clean state.
- BOOST_CHECK(addrman1.size() == 1);
- BOOST_CHECK(exceptionThrown);
-
- // Test that CAddrDB::Read leaves addrman in a clean state if de-serialization fails.
- CDataStream ssPeers2 = AddrmanToStream(addrmanCorrupted);
-
- CAddrMan addrman2;
- BOOST_CHECK(addrman2.size() == 0);
- BOOST_CHECK(!CAddrDB::Read(addrman2, ssPeers2));
- BOOST_CHECK(addrman2.size() == 0);
-}
-
BOOST_AUTO_TEST_CASE(cnode_simple_test)
{
SOCKET hSocket = INVALID_SOCKET;
@@ -763,37 +627,42 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test)
BOOST_AUTO_TEST_CASE(LimitedAndReachable_Network)
{
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true);
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true);
- BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true);
+ BOOST_CHECK(IsReachable(NET_IPV4));
+ BOOST_CHECK(IsReachable(NET_IPV6));
+ BOOST_CHECK(IsReachable(NET_ONION));
+ BOOST_CHECK(IsReachable(NET_I2P));
SetReachable(NET_IPV4, false);
SetReachable(NET_IPV6, false);
SetReachable(NET_ONION, false);
+ SetReachable(NET_I2P, false);
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), false);
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), false);
- BOOST_CHECK_EQUAL(IsReachable(NET_ONION), false);
+ BOOST_CHECK(!IsReachable(NET_IPV4));
+ BOOST_CHECK(!IsReachable(NET_IPV6));
+ BOOST_CHECK(!IsReachable(NET_ONION));
+ BOOST_CHECK(!IsReachable(NET_I2P));
SetReachable(NET_IPV4, true);
SetReachable(NET_IPV6, true);
SetReachable(NET_ONION, true);
+ SetReachable(NET_I2P, true);
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true);
- BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true);
- BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true);
+ BOOST_CHECK(IsReachable(NET_IPV4));
+ BOOST_CHECK(IsReachable(NET_IPV6));
+ BOOST_CHECK(IsReachable(NET_ONION));
+ BOOST_CHECK(IsReachable(NET_I2P));
}
BOOST_AUTO_TEST_CASE(LimitedAndReachable_NetworkCaseUnroutableAndInternal)
{
- BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true);
- BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true);
+ BOOST_CHECK(IsReachable(NET_UNROUTABLE));
+ BOOST_CHECK(IsReachable(NET_INTERNAL));
SetReachable(NET_UNROUTABLE, false);
SetReachable(NET_INTERNAL, false);
- BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true); // Ignored for both networks
- BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true);
+ BOOST_CHECK(IsReachable(NET_UNROUTABLE)); // Ignored for both networks
+ BOOST_CHECK(IsReachable(NET_INTERNAL));
}
CNetAddr UtilBuildAddress(unsigned char p1, unsigned char p2, unsigned char p3, unsigned char p4)
@@ -812,10 +681,10 @@ BOOST_AUTO_TEST_CASE(LimitedAndReachable_CNetAddr)
CNetAddr addr = UtilBuildAddress(0x001, 0x001, 0x001, 0x001); // 1.1.1.1
SetReachable(NET_IPV4, true);
- BOOST_CHECK_EQUAL(IsReachable(addr), true);
+ BOOST_CHECK(IsReachable(addr));
SetReachable(NET_IPV4, false);
- BOOST_CHECK_EQUAL(IsReachable(addr), false);
+ BOOST_CHECK(!IsReachable(addr));
SetReachable(NET_IPV4, true); // have to reset this, because this is stateful.
}
@@ -827,12 +696,12 @@ BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle)
SetReachable(NET_IPV4, true);
- BOOST_CHECK_EQUAL(IsLocal(addr), false);
- BOOST_CHECK_EQUAL(AddLocal(addr, 1000), true);
- BOOST_CHECK_EQUAL(IsLocal(addr), true);
+ BOOST_CHECK(!IsLocal(addr));
+ BOOST_CHECK(AddLocal(addr, 1000));
+ BOOST_CHECK(IsLocal(addr));
RemoveLocal(addr);
- BOOST_CHECK_EQUAL(IsLocal(addr), false);
+ BOOST_CHECK(!IsLocal(addr));
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 62fd81673d..2c39cbffb9 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -123,7 +123,7 @@ static ScriptError_t ParseScriptError(const std::string& name)
BOOST_FIXTURE_TEST_SUITE(script_tests, BasicTestingSetup)
-void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScriptWitness& scriptWitness, int flags, const std::string& message, int scriptError, CAmount nValue = 0)
+void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScriptWitness& scriptWitness, uint32_t flags, const std::string& message, int scriptError, CAmount nValue = 0)
{
bool expect = (scriptError == SCRIPT_ERR_OK);
if (flags & SCRIPT_VERIFY_CLEANSTACK) {
@@ -139,8 +139,8 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
// Verify that removing flags from a passing test or adding flags to a failing test does not change the result.
for (int i = 0; i < 16; ++i) {
- int extra_flags = InsecureRandBits(16);
- int combined_flags = expect ? (flags & ~extra_flags) : (flags | extra_flags);
+ uint32_t extra_flags(InsecureRandBits(16));
+ uint32_t combined_flags{expect ? (flags & ~extra_flags) : (flags | extra_flags)};
// Weed out some invalid flag combinations.
if (combined_flags & SCRIPT_VERIFY_CLEANSTACK && ~combined_flags & (SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS)) continue;
if (combined_flags & SCRIPT_VERIFY_WITNESS && ~combined_flags & SCRIPT_VERIFY_P2SH) continue;
@@ -150,7 +150,7 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
#if defined(HAVE_CONSENSUS_LIB)
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << tx2;
- int libconsensus_flags = flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL;
+ uint32_t libconsensus_flags{flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL};
if (libconsensus_flags == flags) {
int expectedSuccessCode = expect ? 1 : 0;
if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
@@ -258,7 +258,7 @@ private:
bool havePush;
std::vector<unsigned char> push;
std::string comment;
- int flags;
+ uint32_t flags;
int scriptError;
CAmount nValue;
@@ -278,7 +278,7 @@ private:
}
public:
- TestBuilder(const CScript& script_, const std::string& comment_, int flags_, bool P2SH = false, WitnessMode wm = WitnessMode::NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_)
+ TestBuilder(const CScript& script_, const std::string& comment_, uint32_t flags_, bool P2SH = false, WitnessMode wm = WitnessMode::NONE, int witnessversion = 0, CAmount nValue_ = 0) : script(script_), havePush(false), comment(comment_), flags(flags_), scriptError(SCRIPT_ERR_OK), nValue(nValue_)
{
CScript scriptPubKey = script;
if (wm == WitnessMode::PKH) {
@@ -1160,7 +1160,7 @@ SignatureData CombineSignatures(const CTxOut& txout, const CMutableTransaction&
SignatureData data;
data.MergeSignatureData(scriptSig1);
data.MergeSignatureData(scriptSig2);
- ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&tx, 0, txout.nValue), txout.scriptPubKey, data);
+ ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&tx, 0, txout.nValue, SIGHASH_DEFAULT), txout.scriptPubKey, data);
return data;
}
@@ -1677,7 +1677,7 @@ static void AssetTest(const UniValue& test)
const std::vector<CTxOut> prevouts = TxOutsFromJSON(test["prevouts"]);
BOOST_CHECK(prevouts.size() == mtx.vin.size());
size_t idx = test["index"].get_int64();
- unsigned int test_flags = ParseScriptFlags(test["flags"].get_str());
+ uint32_t test_flags{ParseScriptFlags(test["flags"].get_str())};
bool fin = test.exists("final") && test["final"].get_bool();
if (test.exists("success")) {
diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp
index 54e07b0f61..15612e2950 100644
--- a/src/test/serfloat_tests.cpp
+++ b/src/test/serfloat_tests.cpp
@@ -36,9 +36,9 @@ uint64_t TestDouble(double f) {
} // namespace
BOOST_AUTO_TEST_CASE(double_serfloat_tests) {
- BOOST_CHECK_EQUAL(TestDouble(0.0), 0);
+ BOOST_CHECK_EQUAL(TestDouble(0.0), 0U);
BOOST_CHECK_EQUAL(TestDouble(-0.0), 0x8000000000000000);
- BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::infinity()), 0x7ff0000000000000);
+ BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits<double>::infinity()), 0x7ff0000000000000U);
BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits<double>::infinity()), 0xfff0000000000000);
BOOST_CHECK_EQUAL(TestDouble(0.5), 0x3fe0000000000000ULL);
BOOST_CHECK_EQUAL(TestDouble(1.0), 0x3ff0000000000000ULL);
@@ -48,8 +48,8 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) {
// Roundtrip test on IEC559-compatible systems
if (std::numeric_limits<double>::is_iec559) {
- BOOST_CHECK_EQUAL(sizeof(double), 8);
- BOOST_CHECK_EQUAL(sizeof(uint64_t), 8);
+ BOOST_CHECK_EQUAL(sizeof(double), 8U);
+ BOOST_CHECK_EQUAL(sizeof(uint64_t), 8U);
// Test extreme values
TestDouble(std::numeric_limits<double>::min());
TestDouble(-std::numeric_limits<double>::min());
@@ -102,11 +102,12 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) {
Python code to generate the below hashes:
def reversed_hex(x):
- return binascii.hexlify(''.join(reversed(x)))
+ return bytes(reversed(x)).hex()
+
def dsha256(x):
return hashlib.sha256(hashlib.sha256(x).digest()).digest()
- reversed_hex(dsha256(''.join(struct.pack('<d', x) for x in range(0,1000)))) == '43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96'
+ reversed_hex(dsha256(b''.join(struct.pack('<d', x) for x in range(0,1000)))) == '43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96'
*/
BOOST_AUTO_TEST_CASE(doubles)
{
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index 12fc575c1e..db96fd4940 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(GetSigOpCount)
* Verifies script execution of the zeroth scriptPubKey of tx output and
* zeroth scriptSig and witness of tx input.
*/
-static ScriptError VerifyWithFlag(const CTransaction& output, const CMutableTransaction& input, int flags)
+static ScriptError VerifyWithFlag(const CTransaction& output, const CMutableTransaction& input, uint32_t flags)
{
ScriptError error;
CTransaction inputi(input);
@@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(GetTxSigOpCost)
key.MakeNewKey(true);
CPubKey pubkey = key.GetPubKey();
// Default flags
- int flags = SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH;
+ const uint32_t flags{SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH};
// Multisig script (legacy counting)
{
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 7af2b79f37..acd0151e1a 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -119,7 +119,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue)
uint32_t varint = 0;
// Deserialize into r-value
reader >> VARINT(varint);
- BOOST_CHECK_EQUAL(varint, 54321);
+ BOOST_CHECK_EQUAL(varint, 54321U);
BOOST_CHECK(reader.empty());
}
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 40c53cb2ec..24029ea02e 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -446,7 +446,7 @@ static void CreateCreditAndSpend(const FillableSigningProvider& keystore, const
assert(input.vin[0].scriptWitness.stack == inputm.vin[0].scriptWitness.stack);
}
-static void CheckWithFlag(const CTransactionRef& output, const CMutableTransaction& input, int flags, bool success)
+static void CheckWithFlag(const CTransactionRef& output, const CMutableTransaction& input, uint32_t flags, bool success)
{
ScriptError error;
CTransaction inputi(input);
@@ -561,7 +561,7 @@ SignatureData CombineSignatures(const CMutableTransaction& input1, const CMutabl
SignatureData sigdata;
sigdata = DataFromTransaction(input1, 0, tx->vout[0]);
sigdata.MergeSignatureData(DataFromTransaction(input2, 0, tx->vout[0]));
- ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&input1, 0, tx->vout[0].nValue), tx->vout[0].scriptPubKey, sigdata);
+ ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&input1, 0, tx->vout[0].nValue, SIGHASH_ALL), tx->vout[0].scriptPubKey, sigdata);
return sigdata;
}
@@ -765,95 +765,89 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
key.MakeNewKey(true);
t.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
- std::string reason;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ constexpr auto CheckIsStandard = [](const auto& t) {
+ std::string reason;
+ BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ BOOST_CHECK(reason.empty());
+ };
+ constexpr auto CheckIsNotStandard = [](const auto& t, const std::string& reason_in) {
+ std::string reason;
+ BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
+ BOOST_CHECK_EQUAL(reason_in, reason);
+ };
+
+ CheckIsStandard(t);
// Check dust with default relay fee:
- CAmount nDustThreshold = 182 * dustRelayFee.GetFeePerK()/1000;
+ CAmount nDustThreshold = 182 * dustRelayFee.GetFeePerK() / 1000;
BOOST_CHECK_EQUAL(nDustThreshold, 546);
// dust:
t.vout[0].nValue = nDustThreshold - 1;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "dust");
+ CheckIsNotStandard(t, "dust");
// not dust:
t.vout[0].nValue = nDustThreshold;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// Disallowed nVersion
t.nVersion = -1;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "version");
+ CheckIsNotStandard(t, "version");
t.nVersion = 0;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "version");
+ CheckIsNotStandard(t, "version");
t.nVersion = 3;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "version");
+ CheckIsNotStandard(t, "version");
// Allowed nVersion
t.nVersion = 1;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
t.nVersion = 2;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// Check dust with odd relay fee to verify rounding:
// nDustThreshold = 182 * 3702 / 1000
dustRelayFee = CFeeRate(3702);
// dust:
t.vout[0].nValue = 673 - 1;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "dust");
+ CheckIsNotStandard(t, "dust");
// not dust:
t.vout[0].nValue = 673;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
dustRelayFee = CFeeRate(DUST_RELAY_TX_FEE);
t.vout[0].scriptPubKey = CScript() << OP_1;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "scriptpubkey");
+ CheckIsNotStandard(t, "scriptpubkey");
// MAX_OP_RETURN_RELAY-byte TxoutType::NULL_DATA (standard)
t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38");
BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY, t.vout[0].scriptPubKey.size());
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// MAX_OP_RETURN_RELAY+1-byte TxoutType::NULL_DATA (non-standard)
t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3804678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3800");
BOOST_CHECK_EQUAL(MAX_OP_RETURN_RELAY + 1, t.vout[0].scriptPubKey.size());
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "scriptpubkey");
+ CheckIsNotStandard(t, "scriptpubkey");
// Data payload can be encoded in any way...
t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("");
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("00") << ParseHex("01");
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// OP_RESERVED *is* considered to be a PUSHDATA type opcode by IsPushOnly()!
t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RESERVED << -1 << 0 << ParseHex("01") << 2 << 3 << 4 << 5 << 6 << 7 << 8 << 9 << 10 << 11 << 12 << 13 << 14 << 15 << 16;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
t.vout[0].scriptPubKey = CScript() << OP_RETURN << 0 << ParseHex("01") << 2 << ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// ...so long as it only contains PUSHDATA's
t.vout[0].scriptPubKey = CScript() << OP_RETURN << OP_RETURN;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "scriptpubkey");
+ CheckIsNotStandard(t, "scriptpubkey");
// TxoutType::NULL_DATA w/o PUSHDATA
t.vout.resize(1);
t.vout[0].scriptPubKey = CScript() << OP_RETURN;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// Only one TxoutType::NULL_DATA permitted in all cases
t.vout.resize(2);
@@ -861,21 +855,15 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
t.vout[0].nValue = 0;
t.vout[1].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38");
t.vout[1].nValue = 0;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "multi-op-return");
+ CheckIsNotStandard(t, "multi-op-return");
t.vout[0].scriptPubKey = CScript() << OP_RETURN << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38");
t.vout[1].scriptPubKey = CScript() << OP_RETURN;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "multi-op-return");
+ CheckIsNotStandard(t, "multi-op-return");
t.vout[0].scriptPubKey = CScript() << OP_RETURN;
t.vout[1].scriptPubKey = CScript() << OP_RETURN;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "multi-op-return");
+ CheckIsNotStandard(t, "multi-op-return");
// Check large scriptSig (non-standard if size is >1650 bytes)
t.vout.resize(1);
@@ -883,12 +871,10 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
t.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
// OP_PUSHDATA2 with len (3 bytes) + data (1647 bytes) = 1650 bytes
t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(1647, 0); // 1650
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(1648, 0); // 1651
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "scriptsig-size");
+ CheckIsNotStandard(t, "scriptsig-size");
// Check scriptSig format (non-standard if there are any other ops than just PUSHs)
t.vin[0].scriptSig = CScript()
@@ -897,7 +883,7 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
<< std::vector<unsigned char>(235, 0) // OP_PUSHDATA1 x [...x bytes...]
<< std::vector<unsigned char>(1234, 0) // OP_PUSHDATA2 x [...x bytes...]
<< OP_9;
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
const std::vector<unsigned char> non_push_ops = { // arbitrary set of non-push operations
OP_NOP, OP_VERIFY, OP_IF, OP_ROT, OP_3DUP, OP_SIZE, OP_EQUAL, OP_ADD, OP_SUB,
@@ -917,11 +903,10 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
// replace current push-op with each non-push-op
for (auto op : non_push_ops) {
t.vin[0].scriptSig[index] = op;
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "scriptsig-not-pushonly");
+ CheckIsNotStandard(t, "scriptsig-not-pushonly");
}
t.vin[0].scriptSig[index] = orig_op; // restore op
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
}
// Check tx-size (non-standard if transaction weight is > MAX_STANDARD_TX_WEIGHT)
@@ -934,27 +919,47 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
// ===============================
// total: 400000 vbytes
BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400000);
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
// increase output size by one byte, so we end up with 400004 vbytes
t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(20, 0); // output size: 31 bytes
BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400004);
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "tx-size");
+ CheckIsNotStandard(t, "tx-size");
// Check bare multisig (standard if policy flag fIsBareMultisigStd is set)
fIsBareMultisigStd = true;
t.vout[0].scriptPubKey = GetScriptForMultisig(1, {key.GetPubKey()}); // simple 1-of-1
t.vin.resize(1);
t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(65, 0);
- BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ CheckIsStandard(t);
fIsBareMultisigStd = false;
- reason.clear();
- BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
- BOOST_CHECK_EQUAL(reason, "bare-multisig");
+ CheckIsNotStandard(t, "bare-multisig");
fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
+
+ // Check P2WPKH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].nValue = 294;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 293;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2WSH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].nValue = 330;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 329;
+ CheckIsNotStandard(t, "dust");
+
+ // Check future Witness Program versions dust threshold
+ for (int op = OP_2; op <= OP_16; op += 1) {
+ t.vout[0].scriptPubKey = CScript() << (opcodetype)op << ParseHex("ffff");
+ t.vout[0].nValue = 240;
+ CheckIsStandard(t);
+
+ t.vout[0].nValue = 239;
+ CheckIsNotStandard(t, "dust");
+ }
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 23195c0a26..1924ea55b1 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -112,10 +112,15 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache, CCoinsViewCache& active_coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
PrecomputedTransactionData txdata;
- // If we add many more flags, this loop can get too expensive, but we can
- // rewrite in the future to randomly pick a set of flags to evaluate.
- for (uint32_t test_flags=0; test_flags < (1U << 16); test_flags += 1) {
+
+ FastRandomContext insecure_rand(true);
+
+ for (int count = 0; count < 10000; ++count) {
TxValidationState state;
+
+ // Randomly selects flag combinations
+ uint32_t test_flags = (uint32_t) insecure_rand.randrange((SCRIPT_VERIFY_END_MARKER - 1) << 1);
+
// Filter out incompatible flag choices
if ((test_flags & SCRIPT_VERIFY_CLEANSTACK)) {
// CLEANSTACK requires P2SH and WITNESS, see VerifyScript() in
diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp
index 847a490e03..28d7967078 100644
--- a/src/test/util/net.cpp
+++ b/src/test/util/net.cpp
@@ -6,6 +6,9 @@
#include <chainparams.h>
#include <net.h>
+#include <span.h>
+
+#include <vector>
void ConnmanTestMsg::NodeReceiveMsgBytes(CNode& node, Span<const uint8_t> msg_bytes, bool& complete) const
{
@@ -37,3 +40,25 @@ bool ConnmanTestMsg::ReceiveMsgFrom(CNode& node, CSerializedNetMsg& ser_msg) con
NodeReceiveMsgBytes(node, ser_msg.data, complete);
return complete;
}
+
+std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candidates, FastRandomContext& random_context)
+{
+ std::vector<NodeEvictionCandidate> candidates;
+ for (int id = 0; id < n_candidates; ++id) {
+ candidates.push_back({
+ /* id */ id,
+ /* nTimeConnected */ static_cast<int64_t>(random_context.randrange(100)),
+ /* m_min_ping_time */ std::chrono::microseconds{random_context.randrange(100)},
+ /* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)),
+ /* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)),
+ /* fRelevantServices */ random_context.randbool(),
+ /* fRelayTxes */ random_context.randbool(),
+ /* fBloomFilter */ random_context.randbool(),
+ /* nKeyedNetGroup */ random_context.randrange(100),
+ /* prefer_evict */ random_context.randbool(),
+ /* m_is_local */ random_context.randbool(),
+ /* m_network */ ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
+ });
+ }
+ return candidates;
+}
diff --git a/src/test/util/net.h b/src/test/util/net.h
index 1b49a671bd..939ec322ed 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -141,4 +141,6 @@ private:
mutable size_t m_consumed;
};
+std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candidates, FastRandomContext& random_context);
+
#endif // BITCOIN_TEST_UTIL_NET_H
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 748272bb1d..ba6b3e32ea 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -141,12 +141,11 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve
m_node.scheduler->m_service_thread = std::thread(util::TraceThread, "scheduler", [&] { m_node.scheduler->serviceQueue(); });
GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
- pblocktree.reset(new CBlockTreeDB(1 << 20, true));
-
m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>();
m_node.mempool = std::make_unique<CTxMemPool>(m_node.fee_estimator.get(), 1);
m_node.chainman = std::make_unique<ChainstateManager>();
+ m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(1 << 20, true);
// Start script-checking threads. Set g_parallel_script_checks to true so they are used.
constexpr int script_check_threads = 2;
@@ -169,7 +168,6 @@ ChainTestingSetup::~ChainTestingSetup()
m_node.scheduler.reset();
m_node.chainman->Reset();
m_node.chainman.reset();
- pblocktree.reset();
}
TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const char*>& extra_args)
@@ -180,7 +178,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
// instead of unit tests, but for now we need these here.
RegisterAllCoreRPCCommands(tableRPC);
- m_node.chainman->InitializeChainstate(*m_node.mempool);
+ m_node.chainman->InitializeChainstate(m_node.mempool.get());
m_node.chainman->ActiveChainstate().InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
assert(!m_node.chainman->ActiveChainstate().CanFlushToDisk());
@@ -195,11 +193,11 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
- m_node.addrman = std::make_unique<CAddrMan>();
+ m_node.addrman = std::make_unique<CAddrMan>(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests.
m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman,
- m_node.banman.get(), *m_node.scheduler, *m_node.chainman,
+ m_node.banman.get(), *m_node.chainman,
*m_node.mempool, false);
{
CConnman::Options options;
@@ -294,7 +292,7 @@ CMutableTransaction TestChain100Setup::CreateValidMempoolTransaction(CTransactio
input_coins.insert({outpoint_to_spend, utxo_to_spend});
// - Default signature hashing type
int nHashType = SIGHASH_ALL;
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
assert(SignTransaction(mempool_txn, &keystore, input_coins, nHashType, input_errors));
// If submit=true, add transaction to the mempool.
diff --git a/src/test/util/wallet.cpp b/src/test/util/wallet.cpp
index fd6012e9fe..061659818f 100644
--- a/src/test/util/wallet.cpp
+++ b/src/test/util/wallet.cpp
@@ -8,6 +8,7 @@
#include <outputtype.h>
#include <script/standard.h>
#ifdef ENABLE_WALLET
+#include <util/translation.h>
#include <wallet/wallet.h>
#endif
@@ -18,7 +19,7 @@ std::string getnewaddress(CWallet& w)
{
constexpr auto output_type = OutputType::BECH32;
CTxDestination dest;
- std::string error;
+ bilingual_str error;
if (!w.GetNewDestination(output_type, "", dest, error)) assert(false);
return EncodeDestination(dest);
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 7ce38519cf..a62abf9b9c 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -173,6 +173,22 @@ BOOST_AUTO_TEST_CASE(util_Join)
BOOST_CHECK_EQUAL(Join<std::string>({"foo", "bar"}, ", ", op_upper), "FOO, BAR");
}
+BOOST_AUTO_TEST_CASE(util_TrimString)
+{
+ BOOST_CHECK_EQUAL(TrimString(" foo bar "), "foo bar");
+ BOOST_CHECK_EQUAL(TrimString("\t \n \n \f\n\r\t\v\tfoo \n \f\n\r\t\v\tbar\t \n \f\n\r\t\v\t\n "), "foo \n \f\n\r\t\v\tbar");
+ BOOST_CHECK_EQUAL(TrimString("\t \n foo \n\tbar\t \n "), "foo \n\tbar");
+ BOOST_CHECK_EQUAL(TrimString("\t \n foo \n\tbar\t \n ", "fobar"), "\t \n foo \n\tbar\t \n ");
+ BOOST_CHECK_EQUAL(TrimString("foo bar"), "foo bar");
+ BOOST_CHECK_EQUAL(TrimString("foo bar", "fobar"), " ");
+ BOOST_CHECK_EQUAL(TrimString(std::string("\0 foo \0 ", 8)), std::string("\0 foo \0", 7));
+ BOOST_CHECK_EQUAL(TrimString(std::string(" foo ", 5)), std::string("foo", 3));
+ BOOST_CHECK_EQUAL(TrimString(std::string("\t\t\0\0\n\n", 6)), std::string("\0\0", 2));
+ BOOST_CHECK_EQUAL(TrimString(std::string("\x05\x04\x03\x02\x01\x00", 6)), std::string("\x05\x04\x03\x02\x01\x00", 6));
+ BOOST_CHECK_EQUAL(TrimString(std::string("\x05\x04\x03\x02\x01\x00", 6), std::string("\x05\x04\x03\x02\x01", 5)), std::string("\0", 1));
+ BOOST_CHECK_EQUAL(TrimString(std::string("\x05\x04\x03\x02\x01\x00", 6), std::string("\x05\x04\x03\x02\x01\x00", 6)), "");
+}
+
BOOST_AUTO_TEST_CASE(util_FormatParseISO8601DateTime)
{
BOOST_CHECK_EQUAL(FormatISO8601DateTime(1317425777), "2011-09-30T23:36:17Z");
@@ -1222,86 +1238,59 @@ BOOST_AUTO_TEST_CASE(util_FormatMoney)
BOOST_AUTO_TEST_CASE(util_ParseMoney)
{
- CAmount ret = 0;
- BOOST_CHECK(ParseMoney("0.0", ret));
- BOOST_CHECK_EQUAL(ret, 0);
-
- BOOST_CHECK(ParseMoney("12345.6789", ret));
- BOOST_CHECK_EQUAL(ret, (COIN/10000)*123456789);
-
- BOOST_CHECK(ParseMoney("100000000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*100000000);
- BOOST_CHECK(ParseMoney("10000000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*10000000);
- BOOST_CHECK(ParseMoney("1000000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*1000000);
- BOOST_CHECK(ParseMoney("100000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*100000);
- BOOST_CHECK(ParseMoney("10000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*10000);
- BOOST_CHECK(ParseMoney("1000.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*1000);
- BOOST_CHECK(ParseMoney("100.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*100);
- BOOST_CHECK(ParseMoney("10.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN*10);
- BOOST_CHECK(ParseMoney("1.00", ret));
- BOOST_CHECK_EQUAL(ret, COIN);
- BOOST_CHECK(ParseMoney("1", ret));
- BOOST_CHECK_EQUAL(ret, COIN);
- BOOST_CHECK(ParseMoney(" 1", ret));
- BOOST_CHECK_EQUAL(ret, COIN);
- BOOST_CHECK(ParseMoney("1 ", ret));
- BOOST_CHECK_EQUAL(ret, COIN);
- BOOST_CHECK(ParseMoney(" 1 ", ret));
- BOOST_CHECK_EQUAL(ret, COIN);
- BOOST_CHECK(ParseMoney("0.1", ret));
- BOOST_CHECK_EQUAL(ret, COIN/10);
- BOOST_CHECK(ParseMoney("0.01", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100);
- BOOST_CHECK(ParseMoney("0.001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/1000);
- BOOST_CHECK(ParseMoney("0.0001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/10000);
- BOOST_CHECK(ParseMoney("0.00001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100000);
- BOOST_CHECK(ParseMoney("0.000001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/1000000);
- BOOST_CHECK(ParseMoney("0.0000001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/10000000);
- BOOST_CHECK(ParseMoney("0.00000001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100000000);
- BOOST_CHECK(ParseMoney(" 0.00000001 ", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100000000);
- BOOST_CHECK(ParseMoney("0.00000001 ", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100000000);
- BOOST_CHECK(ParseMoney(" 0.00000001", ret));
- BOOST_CHECK_EQUAL(ret, COIN/100000000);
-
- // Parsing amount that can not be represented in ret should fail
- BOOST_CHECK(!ParseMoney("0.000000001", ret));
+ BOOST_CHECK_EQUAL(ParseMoney("0.0").value(), 0);
+
+ BOOST_CHECK_EQUAL(ParseMoney("12345.6789").value(), (COIN/10000)*123456789);
+
+ BOOST_CHECK_EQUAL(ParseMoney("10000000.00").value(), COIN*10000000);
+ BOOST_CHECK_EQUAL(ParseMoney("1000000.00").value(), COIN*1000000);
+ BOOST_CHECK_EQUAL(ParseMoney("100000.00").value(), COIN*100000);
+ BOOST_CHECK_EQUAL(ParseMoney("10000.00").value(), COIN*10000);
+ BOOST_CHECK_EQUAL(ParseMoney("1000.00").value(), COIN*1000);
+ BOOST_CHECK_EQUAL(ParseMoney("100.00").value(), COIN*100);
+ BOOST_CHECK_EQUAL(ParseMoney("10.00").value(), COIN*10);
+ BOOST_CHECK_EQUAL(ParseMoney("1.00").value(), COIN);
+ BOOST_CHECK_EQUAL(ParseMoney("1").value(), COIN);
+ BOOST_CHECK_EQUAL(ParseMoney(" 1").value(), COIN);
+ BOOST_CHECK_EQUAL(ParseMoney("1 ").value(), COIN);
+ BOOST_CHECK_EQUAL(ParseMoney(" 1 ").value(), COIN);
+ BOOST_CHECK_EQUAL(ParseMoney("0.1").value(), COIN/10);
+ BOOST_CHECK_EQUAL(ParseMoney("0.01").value(), COIN/100);
+ BOOST_CHECK_EQUAL(ParseMoney("0.001").value(), COIN/1000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.0001").value(), COIN/10000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.00001").value(), COIN/100000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.000001").value(), COIN/1000000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.0000001").value(), COIN/10000000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.00000001").value(), COIN/100000000);
+ BOOST_CHECK_EQUAL(ParseMoney(" 0.00000001 ").value(), COIN/100000000);
+ BOOST_CHECK_EQUAL(ParseMoney("0.00000001 ").value(), COIN/100000000);
+ BOOST_CHECK_EQUAL(ParseMoney(" 0.00000001").value(), COIN/100000000);
+
+ // Parsing amount that can not be represented should fail
+ BOOST_CHECK(!ParseMoney("100000000.00"));
+ BOOST_CHECK(!ParseMoney("0.000000001"));
// Parsing empty string should fail
- BOOST_CHECK(!ParseMoney("", ret));
- BOOST_CHECK(!ParseMoney(" ", ret));
- BOOST_CHECK(!ParseMoney(" ", ret));
+ BOOST_CHECK(!ParseMoney(""));
+ BOOST_CHECK(!ParseMoney(" "));
+ BOOST_CHECK(!ParseMoney(" "));
// Parsing two numbers should fail
- BOOST_CHECK(!ParseMoney("1 2", ret));
- BOOST_CHECK(!ParseMoney(" 1 2 ", ret));
- BOOST_CHECK(!ParseMoney(" 1.2 3 ", ret));
- BOOST_CHECK(!ParseMoney(" 1 2.3 ", ret));
+ BOOST_CHECK(!ParseMoney("1 2"));
+ BOOST_CHECK(!ParseMoney(" 1 2 "));
+ BOOST_CHECK(!ParseMoney(" 1.2 3 "));
+ BOOST_CHECK(!ParseMoney(" 1 2.3 "));
// Attempted 63 bit overflow should fail
- BOOST_CHECK(!ParseMoney("92233720368.54775808", ret));
+ BOOST_CHECK(!ParseMoney("92233720368.54775808"));
// Parsing negative amounts must fail
- BOOST_CHECK(!ParseMoney("-1", ret));
+ BOOST_CHECK(!ParseMoney("-1"));
// Parsing strings with embedded NUL characters should fail
- BOOST_CHECK(!ParseMoney("\0-1"s, ret));
- BOOST_CHECK(!ParseMoney(STRING_WITH_EMBEDDED_NULL_CHAR, ret));
- BOOST_CHECK(!ParseMoney("1\0"s, ret));
+ BOOST_CHECK(!ParseMoney("\0-1"s));
+ BOOST_CHECK(!ParseMoney(STRING_WITH_EMBEDDED_NULL_CHAR));
+ BOOST_CHECK(!ParseMoney("1\0"s));
}
BOOST_AUTO_TEST_CASE(util_IsHex)
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index e0bc10d660..8f4ff6815b 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -77,6 +77,8 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash)
txCoinbase.vout[1].nValue = txCoinbase.vout[0].nValue;
txCoinbase.vout[0].nValue = 0;
txCoinbase.vin[0].scriptWitness.SetNull();
+ // Always pad with OP_0 at the end to avoid bad-cb-length error
+ txCoinbase.vin[0].scriptSig = CScript{} << WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(prev_hash)->nHeight + 1) << OP_0;
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
return pblock;
@@ -84,8 +86,8 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash)
std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock> pblock)
{
- LOCK(cs_main); // For m_node.chainman->m_blockman.LookupBlockIndex
- GenerateCoinbaseCommitment(*pblock, m_node.chainman->m_blockman.LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus());
+ const CBlockIndex* prev_block{WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
+ GenerateCoinbaseCommitment(*pblock, prev_block, Params().GetConsensus());
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
@@ -93,6 +95,11 @@ std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock>
++(pblock->nNonce);
}
+ // submit block header, so that miner can get the block height from the
+ // global state and the node has the topology of the chain
+ BlockValidationState ignored;
+ BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({pblock->GetBlockHeader()}, ignored, Params()));
+
return pblock;
}
@@ -147,13 +154,6 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
}
bool ignored;
- BlockValidationState state;
- std::vector<CBlockHeader> headers;
- std::transform(blocks.begin(), blocks.end(), std::back_inserter(headers), [](std::shared_ptr<const CBlock> b) { return b->GetBlockHeader(); });
-
- // Process all the headers so we understand the toplogy of the chain
- BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders(headers, state, Params()));
-
// Connect the genesis block and drain any outstanding events
BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(Params(), std::make_shared<CBlock>(Params().GenesisBlock()), true, &ignored));
SyncWithValidationInterfaceQueue();
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index 92d8cf2e7d..315ef22599 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -20,6 +20,7 @@ BOOST_FIXTURE_TEST_SUITE(validation_chainstate_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
{
ChainstateManager manager;
+ WITH_LOCK(::cs_main, manager.m_blockman.m_block_tree_db = std::make_unique<CBlockTreeDB>(1 << 20, true));
CTxMemPool mempool;
//! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
@@ -35,7 +36,7 @@ BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
return outp;
};
- CChainState& c1 = WITH_LOCK(cs_main, return manager.InitializeChainstate(mempool));
+ CChainState& c1 = WITH_LOCK(cs_main, return manager.InitializeChainstate(&mempool));
c1.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23));
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 7c1db9d4b9..0bd378631b 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
// Create a legacy (IBD) chainstate.
//
- CChainState& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(mempool));
+ CChainState& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(&mempool));
chainstates.push_back(&c1);
c1.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
@@ -66,7 +66,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
//
const uint256 snapshot_blockhash = GetRandHash();
CChainState& c2 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(
- mempool, snapshot_blockhash));
+ &mempool, snapshot_blockhash));
chainstates.push_back(&c2);
BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash);
@@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
// Create a legacy (IBD) chainstate.
//
- CChainState& c1 = WITH_LOCK(cs_main, return manager.InitializeChainstate(mempool));
+ CChainState& c1 = WITH_LOCK(cs_main, return manager.InitializeChainstate(&mempool));
chainstates.push_back(&c1);
c1.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
@@ -147,7 +147,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
// Create a snapshot-based chainstate.
//
- CChainState& c2 = WITH_LOCK(cs_main, return manager.InitializeChainstate(mempool, GetRandHash()));
+ CChainState& c2 = WITH_LOCK(cs_main, return manager.InitializeChainstate(&mempool, GetRandHash()));
chainstates.push_back(&c2);
c2.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index a3b344d2c9..22aafcaa6c 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -20,10 +20,9 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
{
CTxMemPool mempool;
BlockManager blockman{};
- CChainState chainstate{mempool, blockman};
+ CChainState chainstate{&mempool, blockman};
chainstate.InitCoinsDB(/*cache_size_bytes*/ 1 << 10, /*in_memory*/ true, /*should_wipe*/ false);
WITH_LOCK(::cs_main, chainstate.InitCoinsCache(1 << 10));
- CTxMemPool tx_pool{};
constexpr bool is_64_bit = sizeof(void*) == 8;
@@ -57,7 +56,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Without any coins in the cache, we shouldn't need to flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
// If the initial memory allocations of cacheCoins don't match these common
@@ -72,7 +71,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
BOOST_TEST_MESSAGE("Exiting cache flush tests early due to unsupported arch");
@@ -93,7 +92,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
}
@@ -101,26 +100,26 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 4; ++i) {
add_coin(view);
print_view_mem_usage(view);
- if (chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
+ if (chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
CoinsCacheSizeState::CRITICAL) {
break;
}
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
// Passing non-zero max mempool usage should allow us more headroom.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
for (int i{0}; i < 3; ++i) {
add_coin(view);
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
}
@@ -136,7 +135,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
BOOST_CHECK(usage_percentage >= 0.9);
BOOST_CHECK(usage_percentage < 1);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, 1 << 10),
CoinsCacheSizeState::LARGE);
}
@@ -144,7 +143,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 1000; ++i) {
add_coin(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool),
+ chainstate.GetCoinsCacheSizeState(),
CoinsCacheSizeState::OK);
}
@@ -152,7 +151,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// preallocated memory that doesn't get reclaimed even after flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
view.SetBestBlock(InsecureRand256());
@@ -160,7 +159,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
}
diff --git a/src/tinyformat.h b/src/tinyformat.h
index bc893ccda5..bedaa14007 100644
--- a/src/tinyformat.h
+++ b/src/tinyformat.h
@@ -797,27 +797,27 @@ inline const char* streamStateFromFormat(std::ostream& out, bool& positionalMode
break;
case 'X':
out.setf(std::ios::uppercase);
- // Falls through
+ [[fallthrough]];
case 'x': case 'p':
out.setf(std::ios::hex, std::ios::basefield);
intConversion = true;
break;
case 'E':
out.setf(std::ios::uppercase);
- // Falls through
+ [[fallthrough]];
case 'e':
out.setf(std::ios::scientific, std::ios::floatfield);
out.setf(std::ios::dec, std::ios::basefield);
break;
case 'F':
out.setf(std::ios::uppercase);
- // Falls through
+ [[fallthrough]];
case 'f':
out.setf(std::ios::fixed, std::ios::floatfield);
break;
case 'A':
out.setf(std::ios::uppercase);
- // Falls through
+ [[fallthrough]];
case 'a':
# ifdef _MSC_VER
// Workaround https://developercommunity.visualstudio.com/content/problem/520472/hexfloat-stream-output-does-not-ignore-precision-a.html
@@ -829,7 +829,7 @@ inline const char* streamStateFromFormat(std::ostream& out, bool& positionalMode
break;
case 'G':
out.setf(std::ios::uppercase);
- // Falls through
+ [[fallthrough]];
case 'g':
out.setf(std::ios::dec, std::ios::basefield);
// As in boost::format, let stream decide float format.
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index a0499fa51f..bb296456ba 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -132,28 +132,35 @@ void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ct
bool TorControlConnection::Connect(const std::string& tor_control_center, const ConnectionCB& _connected, const ConnectionCB& _disconnected)
{
- if (b_conn)
+ if (b_conn) {
Disconnect();
- // Parse tor_control_center address:port
- struct sockaddr_storage connect_to_addr;
- int connect_to_addrlen = sizeof(connect_to_addr);
- if (evutil_parse_sockaddr_port(tor_control_center.c_str(),
- (struct sockaddr*)&connect_to_addr, &connect_to_addrlen)<0) {
+ }
+
+ CService control_service;
+ if (!Lookup(tor_control_center, control_service, 9051, fNameLookup)) {
+ LogPrintf("tor: Failed to look up control center %s\n", tor_control_center);
+ return false;
+ }
+
+ struct sockaddr_storage control_address;
+ socklen_t control_address_len = sizeof(control_address);
+ if (!control_service.GetSockAddr(reinterpret_cast<struct sockaddr*>(&control_address), &control_address_len)) {
LogPrintf("tor: Error parsing socket address %s\n", tor_control_center);
return false;
}
// Create a new socket, set up callbacks and enable notification bits
b_conn = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
- if (!b_conn)
+ if (!b_conn) {
return false;
+ }
bufferevent_setcb(b_conn, TorControlConnection::readcb, nullptr, TorControlConnection::eventcb, this);
bufferevent_enable(b_conn, EV_READ|EV_WRITE);
this->connected = _connected;
this->disconnected = _disconnected;
// Finally, connect to tor_control_center
- if (bufferevent_socket_connect(b_conn, (struct sockaddr*)&connect_to_addr, connect_to_addrlen) < 0) {
+ if (bufferevent_socket_connect(b_conn, reinterpret_cast<struct sockaddr*>(&control_address), control_address_len) < 0) {
LogPrintf("tor: Error connecting to address %s\n", tor_control_center);
return false;
}
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index c5a4bbf1b0..d5a888ac67 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -151,33 +151,17 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
}
}
-bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const
+bool CTxMemPool::CalculateAncestorsAndCheckLimits(size_t entry_size,
+ size_t entry_count,
+ setEntries& setAncestors,
+ CTxMemPoolEntry::Parents& staged_ancestors,
+ uint64_t limitAncestorCount,
+ uint64_t limitAncestorSize,
+ uint64_t limitDescendantCount,
+ uint64_t limitDescendantSize,
+ std::string &errString) const
{
- CTxMemPoolEntry::Parents staged_ancestors;
- const CTransaction &tx = entry.GetTx();
-
- if (fSearchForParents) {
- // Get parents of this transaction that are in the mempool
- // GetMemPoolParents() is only valid for entries in the mempool, so we
- // iterate mapTx to find parents.
- for (unsigned int i = 0; i < tx.vin.size(); i++) {
- std::optional<txiter> piter = GetIter(tx.vin[i].prevout.hash);
- if (piter) {
- staged_ancestors.insert(**piter);
- if (staged_ancestors.size() + 1 > limitAncestorCount) {
- errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount);
- return false;
- }
- }
- }
- } else {
- // If we're not searching for parents, we require this to be an
- // entry in the mempool already.
- txiter it = mapTx.iterator_to(entry);
- staged_ancestors = it->GetMemPoolParentsConst();
- }
-
- size_t totalSizeWithAncestors = entry.GetTxSize();
+ size_t totalSizeWithAncestors = entry_size;
while (!staged_ancestors.empty()) {
const CTxMemPoolEntry& stage = staged_ancestors.begin()->get();
@@ -187,10 +171,10 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr
staged_ancestors.erase(stage);
totalSizeWithAncestors += stageit->GetTxSize();
- if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) {
+ if (stageit->GetSizeWithDescendants() + entry_size > limitDescendantSize) {
errString = strprintf("exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantSize);
return false;
- } else if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) {
+ } else if (stageit->GetCountWithDescendants() + entry_count > limitDescendantCount) {
errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantCount);
return false;
} else if (totalSizeWithAncestors > limitAncestorSize) {
@@ -206,7 +190,7 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr
if (setAncestors.count(parent_it) == 0) {
staged_ancestors.insert(parent);
}
- if (staged_ancestors.size() + setAncestors.size() + 1 > limitAncestorCount) {
+ if (staged_ancestors.size() + setAncestors.size() + entry_count > limitAncestorCount) {
errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount);
return false;
}
@@ -216,6 +200,80 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr
return true;
}
+bool CTxMemPool::CheckPackageLimits(const Package& package,
+ uint64_t limitAncestorCount,
+ uint64_t limitAncestorSize,
+ uint64_t limitDescendantCount,
+ uint64_t limitDescendantSize,
+ std::string &errString) const
+{
+ CTxMemPoolEntry::Parents staged_ancestors;
+ size_t total_size = 0;
+ for (const auto& tx : package) {
+ total_size += GetVirtualTransactionSize(*tx);
+ for (const auto& input : tx->vin) {
+ std::optional<txiter> piter = GetIter(input.prevout.hash);
+ if (piter) {
+ staged_ancestors.insert(**piter);
+ if (staged_ancestors.size() + package.size() > limitAncestorCount) {
+ errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount);
+ return false;
+ }
+ }
+ }
+ }
+ // When multiple transactions are passed in, the ancestors and descendants of all transactions
+ // considered together must be within limits even if they are not interdependent. This may be
+ // stricter than the limits for each individual transaction.
+ setEntries setAncestors;
+ const auto ret = CalculateAncestorsAndCheckLimits(total_size, package.size(),
+ setAncestors, staged_ancestors,
+ limitAncestorCount, limitAncestorSize,
+ limitDescendantCount, limitDescendantSize, errString);
+ // It's possible to overestimate the ancestor/descendant totals.
+ if (!ret) errString.insert(0, "possibly ");
+ return ret;
+}
+
+bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry,
+ setEntries &setAncestors,
+ uint64_t limitAncestorCount,
+ uint64_t limitAncestorSize,
+ uint64_t limitDescendantCount,
+ uint64_t limitDescendantSize,
+ std::string &errString,
+ bool fSearchForParents /* = true */) const
+{
+ CTxMemPoolEntry::Parents staged_ancestors;
+ const CTransaction &tx = entry.GetTx();
+
+ if (fSearchForParents) {
+ // Get parents of this transaction that are in the mempool
+ // GetMemPoolParents() is only valid for entries in the mempool, so we
+ // iterate mapTx to find parents.
+ for (unsigned int i = 0; i < tx.vin.size(); i++) {
+ std::optional<txiter> piter = GetIter(tx.vin[i].prevout.hash);
+ if (piter) {
+ staged_ancestors.insert(**piter);
+ if (staged_ancestors.size() + 1 > limitAncestorCount) {
+ errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount);
+ return false;
+ }
+ }
+ }
+ } else {
+ // If we're not searching for parents, we require this to already be an
+ // entry in the mempool and use the entry's cached parents.
+ txiter it = mapTx.iterator_to(entry);
+ staged_ancestors = it->GetMemPoolParentsConst();
+ }
+
+ return CalculateAncestorsAndCheckLimits(entry.GetTxSize(), /* entry_count */ 1,
+ setAncestors, staged_ancestors,
+ limitAncestorCount, limitAncestorSize,
+ limitDescendantCount, limitDescendantSize, errString);
+}
+
void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors)
{
CTxMemPoolEntry::Parents parents = it->GetMemPoolParents();
diff --git a/src/txmempool.h b/src/txmempool.h
index ae4b16d377..0a84a6e6b1 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -18,6 +18,7 @@
#include <coins.h>
#include <indirectmap.h>
#include <policy/feerate.h>
+#include <policy/packages.h>
#include <primitives/transaction.h>
#include <random.h>
#include <sync.h>
@@ -585,6 +586,25 @@ private:
*/
std::set<uint256> m_unbroadcast_txids GUARDED_BY(cs);
+
+ /**
+ * Helper function to calculate all in-mempool ancestors of staged_ancestors and apply ancestor
+ * and descendant limits (including staged_ancestors thsemselves, entry_size and entry_count).
+ * param@[in] entry_size Virtual size to include in the limits.
+ * param@[in] entry_count How many entries to include in the limits.
+ * param@[in] staged_ancestors Should contain entries in the mempool.
+ * param@[out] setAncestors Will be populated with all mempool ancestors.
+ */
+ bool CalculateAncestorsAndCheckLimits(size_t entry_size,
+ size_t entry_count,
+ setEntries& setAncestors,
+ CTxMemPoolEntry::Parents &staged_ancestors,
+ uint64_t limitAncestorCount,
+ uint64_t limitAncestorSize,
+ uint64_t limitDescendantCount,
+ uint64_t limitDescendantSize,
+ std::string &errString) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
public:
indirectmap<COutPoint, const CTransaction*> mapNextTx GUARDED_BY(cs);
std::map<uint256, CAmount> mapDeltas GUARDED_BY(cs);
@@ -681,6 +701,28 @@ public:
*/
bool CalculateMemPoolAncestors(const CTxMemPoolEntry& entry, setEntries& setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string& errString, bool fSearchForParents = true) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ /** Calculate all in-mempool ancestors of a set of transactions not already in the mempool and
+ * check ancestor and descendant limits. Heuristics are used to estimate the ancestor and
+ * descendant count of all entries if the package were to be added to the mempool. The limits
+ * are applied to the union of all package transactions. For example, if the package has 3
+ * transactions and limitAncestorCount = 25, the union of all 3 sets of ancestors (including the
+ * transactions themselves) must be <= 22.
+ * @param[in] package Transaction package being evaluated for acceptance
+ * to mempool. The transactions need not be direct
+ * ancestors/descendants of each other.
+ * @param[in] limitAncestorCount Max number of txns including ancestors.
+ * @param[in] limitAncestorSize Max virtual size including ancestors.
+ * @param[in] limitDescendantCount Max number of txns including descendants.
+ * @param[in] limitDescendantSize Max virtual size including descendants.
+ * @param[out] errString Populated with error reason if a limit is hit.
+ */
+ bool CheckPackageLimits(const Package& package,
+ uint64_t limitAncestorCount,
+ uint64_t limitAncestorSize,
+ uint64_t limitDescendantCount,
+ uint64_t limitDescendantSize,
+ std::string &errString) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
/** Populate setDescendants with all in-mempool descendants of hash.
* Assumes that setDescendants includes all in-mempool descendants of anything
* already in it. */
diff --git a/src/txorphanage.h b/src/txorphanage.h
index e4266e470a..24c8318f36 100644
--- a/src/txorphanage.h
+++ b/src/txorphanage.h
@@ -47,6 +47,13 @@ public:
* (ie orphans that may have found their final missing parent, and so should be reconsidered for the mempool) */
void AddChildrenToWorkSet(const CTransaction& tx, std::set<uint256>& orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
+ /** Return how many entries exist in the orphange */
+ size_t Size() LOCKS_EXCLUDED(::g_cs_orphans)
+ {
+ LOCK(::g_cs_orphans);
+ return m_orphans.size();
+ }
+
protected:
struct OrphanTx {
CTransactionRef tx;
diff --git a/src/util/epochguard.h b/src/util/epochguard.h
index 1570ec4eb4..3e63e093da 100644
--- a/src/util/epochguard.h
+++ b/src/util/epochguard.h
@@ -40,6 +40,9 @@ public:
Epoch() = default;
Epoch(const Epoch&) = delete;
Epoch& operator=(const Epoch&) = delete;
+ Epoch(Epoch&&) = delete;
+ Epoch& operator=(Epoch&&) = delete;
+ ~Epoch() = default;
bool guarded() const { return m_guarded; }
@@ -51,6 +54,13 @@ public:
// only allow modification via Epoch member functions
friend class Epoch;
Marker& operator=(const Marker&) = delete;
+
+ public:
+ Marker() = default;
+ Marker(const Marker&) = default;
+ Marker(Marker&&) = delete;
+ Marker& operator=(Marker&&) = delete;
+ ~Marker() = default;
};
class SCOPED_LOCKABLE Guard
diff --git a/src/util/getuniquepath.cpp b/src/util/getuniquepath.cpp
index 9839d2f624..6776e7785b 100644
--- a/src/util/getuniquepath.cpp
+++ b/src/util/getuniquepath.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <random.h>
#include <fs.h>
#include <util/strencodings.h>
diff --git a/src/util/hasher.h b/src/util/hasher.h
index fa2fea30d8..9b79a1b5f1 100644
--- a/src/util/hasher.h
+++ b/src/util/hasher.h
@@ -33,10 +33,6 @@ public:
SaltedOutpointHasher();
/**
- * This *must* return size_t. With Boost 1.46 on 32-bit systems the
- * unordered_map will behave unpredictably if the custom hasher returns a
- * uint64_t, resulting in failures when syncing the chain (#4634).
- *
* Having the hash noexcept allows libstdc++'s unordered_map to recalculate
* the hash during rehash, so it does not have to cache the value. This
* reduces node's memory by sizeof(size_t). The required recalculation has
diff --git a/src/util/moneystr.cpp b/src/util/moneystr.cpp
index 3f9ce7dce4..d3f4029607 100644
--- a/src/util/moneystr.cpp
+++ b/src/util/moneystr.cpp
@@ -5,10 +5,13 @@
#include <util/moneystr.h>
+#include <amount.h>
#include <tinyformat.h>
#include <util/strencodings.h>
#include <util/string.h>
+#include <optional>
+
std::string FormatMoney(const CAmount n)
{
// Note: not using straight sprintf here because we do NOT want
@@ -35,14 +38,14 @@ std::string FormatMoney(const CAmount n)
}
-bool ParseMoney(const std::string& money_string, CAmount& nRet)
+std::optional<CAmount> ParseMoney(const std::string& money_string)
{
if (!ValidAsCString(money_string)) {
- return false;
+ return std::nullopt;
}
const std::string str = TrimString(money_string);
if (str.empty()) {
- return false;
+ return std::nullopt;
}
std::string strWhole;
@@ -62,21 +65,25 @@ bool ParseMoney(const std::string& money_string, CAmount& nRet)
break;
}
if (IsSpace(*p))
- return false;
+ return std::nullopt;
if (!IsDigit(*p))
- return false;
+ return std::nullopt;
strWhole.insert(strWhole.end(), *p);
}
if (*p) {
- return false;
+ return std::nullopt;
}
if (strWhole.size() > 10) // guard against 63 bit overflow
- return false;
+ return std::nullopt;
if (nUnits < 0 || nUnits > COIN)
- return false;
+ return std::nullopt;
int64_t nWhole = atoi64(strWhole);
- CAmount nValue = nWhole*COIN + nUnits;
- nRet = nValue;
- return true;
+ CAmount value = nWhole * COIN + nUnits;
+
+ if (!MoneyRange(value)) {
+ return std::nullopt;
+ }
+
+ return value;
}
diff --git a/src/util/moneystr.h b/src/util/moneystr.h
index 2aedbee358..b71dffd0db 100644
--- a/src/util/moneystr.h
+++ b/src/util/moneystr.h
@@ -12,6 +12,7 @@
#include <amount.h>
#include <attributes.h>
+#include <optional>
#include <string>
/* Do not use these functions to represent or parse monetary amounts to or from
@@ -19,6 +20,6 @@
*/
std::string FormatMoney(const CAmount n);
/** Parse an amount denoted in full coins. E.g. "0.0034" supplied on the command line. **/
-[[nodiscard]] bool ParseMoney(const std::string& str, CAmount& nRet);
+std::optional<CAmount> ParseMoney(const std::string& str);
#endif // BITCOIN_UTIL_MONEYSTR_H
diff --git a/src/util/rbf.h b/src/util/rbf.h
index 6a20b37de5..4eb44b904f 100644
--- a/src/util/rbf.h
+++ b/src/util/rbf.h
@@ -11,8 +11,15 @@ class CTransaction;
static const uint32_t MAX_BIP125_RBF_SEQUENCE = 0xfffffffd;
-// Check whether the sequence numbers on this transaction are signaling
-// opt-in to replace-by-fee, according to BIP 125
+/** Check whether the sequence numbers on this transaction are signaling
+* opt-in to replace-by-fee, according to BIP 125.
+* Allow opt-out of transaction replacement by setting
+* nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
+*
+* SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
+* non-replaceable transactions. All inputs rather than just one
+* is for the sake of multi-party protocols, where we don't
+* want a single party to be able to disable replacement. */
bool SignalsOptInRBF(const CTransaction &tx);
#endif // BITCOIN_UTIL_RBF_H
diff --git a/src/util/settings.cpp b/src/util/settings.cpp
index b92b1d30c3..846b34089d 100644
--- a/src/util/settings.cpp
+++ b/src/util/settings.cpp
@@ -60,9 +60,15 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
values.clear();
errors.clear();
+ // Ok for file to not exist
+ if (!fs::exists(path)) return true;
+
fsbridge::ifstream file;
file.open(path);
- if (!file.is_open()) return true; // Ok for file not to exist.
+ if (!file.is_open()) {
+ errors.emplace_back(strprintf("%s. Please check permissions.", path.string()));
+ return false;
+ }
SettingsValue in;
if (!in.read(std::string{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()})) {
diff --git a/src/util/string.h b/src/util/string.h
index b26facc502..5617e4acc1 100644
--- a/src/util/string.h
+++ b/src/util/string.h
@@ -65,6 +65,14 @@ inline std::string Join(const std::vector<std::string>& list, const std::string&
}
/**
+ * Create an unordered multi-line list of items.
+ */
+inline std::string MakeUnorderedList(const std::vector<std::string>& items)
+{
+ return Join(items, "\n", [](const std::string& item) { return "- " + item; });
+}
+
+/**
* Check if a string does not contain any embedded NUL (\0) characters
*/
[[nodiscard]] inline bool ValidAsCString(const std::string& str) noexcept
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 65c16fcd97..08f62f1da7 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -502,11 +502,11 @@ bool ArgsManager::InitSettings(std::string& error)
std::vector<std::string> errors;
if (!ReadSettingsFile(&errors)) {
- error = strprintf("Failed loading settings file:\n- %s\n", Join(errors, "\n- "));
+ error = strprintf("Failed loading settings file:\n%s\n", MakeUnorderedList(errors));
return false;
}
if (!WriteSettingsFile(&errors)) {
- error = strprintf("Failed saving settings file:\n- %s\n", Join(errors, "\n- "));
+ error = strprintf("Failed saving settings file:\n%s\n", MakeUnorderedList(errors));
return false;
}
return true;
@@ -904,6 +904,11 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
const std::string confPath = GetArg("-conf", BITCOIN_CONF_FILENAME);
fsbridge::ifstream stream(GetConfigFile(confPath));
+ // not ok to have a config file specified that cannot be opened
+ if (IsArgSet("-conf") && !stream.good()) {
+ error = strprintf("specified config file \"%s\" could not be opened.", confPath);
+ return false;
+ }
// ok to not have a config file
if (stream.good()) {
if (!ReadConfigStream(stream, confPath, error, ignore_invalid_keys)) {
diff --git a/src/util/system.h b/src/util/system.h
index 3547bad585..3c1399629c 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -205,6 +205,7 @@ protected:
*/
bool UseDefaultSection(const std::string& arg) const EXCLUSIVE_LOCKS_REQUIRED(cs_args);
+ public:
/**
* Get setting value.
*
@@ -219,7 +220,6 @@ protected:
*/
std::vector<util::SettingsValue> GetSettingsList(const std::string& arg) const;
-public:
ArgsManager();
~ArgsManager();
diff --git a/src/util/translation.h b/src/util/translation.h
index 99899ef3c2..62388b568c 100644
--- a/src/util/translation.h
+++ b/src/util/translation.h
@@ -28,6 +28,12 @@ struct bilingual_str {
{
return original.empty();
}
+
+ void clear()
+ {
+ original.clear();
+ translated.clear();
+ }
};
inline bilingual_str operator+(bilingual_str lhs, const bilingual_str& rhs)
diff --git a/src/validation.cpp b/src/validation.cpp
index 65d2dfa3b7..7e3663c465 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -19,13 +19,13 @@
#include <flatfile.h>
#include <hash.h>
#include <index/blockfilterindex.h>
-#include <index/txindex.h>
#include <logging.h>
#include <logging/timer.h>
#include <node/blockstorage.h>
#include <node/coinstats.h>
#include <node/ui_interface.h>
#include <policy/policy.h>
+#include <policy/rbf.h>
#include <policy/settings.h>
#include <pow.h>
#include <primitives/block.h>
@@ -48,6 +48,7 @@
#include <util/rbf.h>
#include <util/strencodings.h>
#include <util/system.h>
+#include <util/trace.h>
#include <util/translation.h>
#include <validationinterface.h>
#include <warnings.h>
@@ -170,8 +171,6 @@ CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlo
return chain.Genesis();
}
-std::unique_ptr<CBlockTreeDB> pblocktree;
-
bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
@@ -329,23 +328,14 @@ static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_
return true;
}
-/* Make mempool consistent after a reorg, by re-adding or recursively erasing
- * disconnected block transactions from the mempool, and also removing any
- * other transactions from the mempool that are no longer valid given the new
- * tip/height.
- *
- * Note: we assume that disconnectpool only contains transactions that are NOT
- * confirmed in the current chain nor already in the mempool (otherwise,
- * in-mempool descendants of such transactions would be removed).
- *
- * Passing fAddToMempool=false will skip trying to add the transactions back,
- * and instead just erase from the mempool as needed.
- */
-
-static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs)
+void CChainState::MaybeUpdateMempoolForReorg(
+ DisconnectedBlockTransactions& disconnectpool,
+ bool fAddToMempool)
{
+ if (!m_mempool) return;
+
AssertLockHeld(cs_main);
- AssertLockHeld(mempool.cs);
+ AssertLockHeld(m_mempool->cs);
std::vector<uint256> vHashUpdate;
// disconnectpool's insertion_order index sorts the entries from
// oldest to newest, but the oldest entry will be the last tx from the
@@ -357,11 +347,13 @@ static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& me
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
// ignore validation errors in resurrected transactions
if (!fAddToMempool || (*it)->IsCoinBase() ||
- AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) {
+ AcceptToMemoryPool(
+ *this, *m_mempool, *it, true /* bypass_limits */).m_result_type !=
+ MempoolAcceptResult::ResultType::VALID) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
- mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
- } else if (mempool.exists((*it)->GetHash())) {
+ m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
+ } else if (m_mempool->exists((*it)->GetHash())) {
vHashUpdate.push_back((*it)->GetHash());
}
++it;
@@ -372,12 +364,16 @@ static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& me
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in
// the disconnectpool that were added back and cleans up the mempool state.
- mempool.UpdateTransactionsFromBlock(vHashUpdate);
+ m_mempool->UpdateTransactionsFromBlock(vHashUpdate);
// We also need to remove any now-immature transactions
- mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ m_mempool->removeForReorg(*this, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
- LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
+ LimitMempoolSize(
+ *m_mempool,
+ this->CoinsTip(),
+ gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
+ std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
}
/**
@@ -419,7 +415,7 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS
}
// Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
- return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
+ return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata);
}
namespace {
@@ -479,8 +475,10 @@ private:
bool m_replacement_transaction;
CAmount m_base_fees;
CAmount m_modified_fees;
- CAmount m_conflicting_fees;
- size_t m_conflicting_size;
+ /** Total modified fees of all transactions being replaced. */
+ CAmount m_conflicting_fees{0};
+ /** Total virtual size of all transactions being replaced. */
+ size_t m_conflicting_size{0};
const CTransactionRef& m_ptx;
const uint256& m_hash;
@@ -587,9 +585,13 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
- // is it already in the memory pool?
- if (m_pool.exists(hash)) {
+ if (m_pool.exists(GenTxid(true, tx.GetWitnessHash()))) {
+ // Exact transaction already exists in the mempool.
return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
+ } else if (m_pool.exists(GenTxid(false, tx.GetHash()))) {
+ // Transaction with the same non-witness data but different witness (same txid, different
+ // wtxid) already exists in the mempool.
+ return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
}
// Check for conflicts with in-memory transactions
@@ -603,14 +605,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
if (!setConflicts.count(ptxConflicting->GetHash()))
{
- // Allow opt-out of transaction replacement by setting
- // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
- //
- // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
- // non-replaceable transactions. All inputs rather than just one
- // is for the sake of multi-party protocols, where we don't
- // want a single party to be able to disable replacement.
- //
// Transactions that don't explicitly signal replaceability are
// *not* replaceable with the current logic, even if one of their
// unconfirmed ancestors signals replaceability. This diverges
@@ -618,16 +612,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
// Applications relying on first-seen mempool behavior should
// check all unconfirmed ancestors; otherwise an opt-in ancestor
// might be replaced, causing removal of this descendant.
- bool fReplacementOptOut = true;
- for (const CTxIn &_txin : ptxConflicting->vin)
- {
- if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
- {
- fReplacementOptOut = false;
- break;
- }
- }
- if (fReplacementOptOut) {
+ if (!SignalsOptInRBF(*ptxConflicting)) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
}
@@ -797,11 +782,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
}
- // Check if it's economically rational to mine this transaction rather
- // than the ones it replaces.
- nConflictingFees = 0;
- nConflictingSize = 0;
- uint64_t nConflictingCount = 0;
// If we don't hold the lock allConflicting might be incomplete; the
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee
@@ -809,9 +789,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
fReplacementTransaction = setConflicts.size();
if (fReplacementTransaction)
{
+ std::string err_string;
CFeeRate newFeeRate(nModifiedFees, nSize);
- std::set<uint256> setConflictsParents;
- const int maxDescendantsToVisit = 100;
for (const auto& mi : setIterConflicting) {
// Don't allow the replacement to reduce the feerate of the
// mempool.
@@ -836,33 +815,26 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
newFeeRate.ToString(),
oldFeeRate.ToString()));
}
+ }
+
+ // Calculate all conflicting entries and enforce Rule #5.
+ if (!GetEntriesForConflicts(tx, m_pool, setIterConflicting, allConflicting, err_string)) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements", err_string);
+ }
+
+ // Check if it's economically rational to mine this transaction rather
+ // than the ones it replaces.
+ for (CTxMemPool::txiter it : allConflicting) {
+ nConflictingFees += it->GetModifiedFee();
+ nConflictingSize += it->GetTxSize();
+ }
+ std::set<uint256> setConflictsParents;
+ for (const auto& mi : setIterConflicting) {
for (const CTxIn &txin : mi->GetTx().vin)
{
setConflictsParents.insert(txin.prevout.hash);
}
-
- nConflictingCount += mi->GetCountWithDescendants();
- }
- // This potentially overestimates the number of actual descendants
- // but we just want to be conservative to avoid doing too much
- // work.
- if (nConflictingCount <= maxDescendantsToVisit) {
- // If not too many to replace, then calculate the set of
- // transactions that would have to be evicted
- for (CTxMemPool::txiter it : setIterConflicting) {
- m_pool.CalculateDescendants(it, allConflicting);
- }
- for (CTxMemPool::txiter it : allConflicting) {
- nConflictingFees += it->GetModifiedFee();
- nConflictingSize += it->GetTxSize();
- }
- } else {
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
- strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
- hash.ToString(),
- nConflictingCount,
- maxDescendantsToVisit));
}
for (unsigned int j = 0; j < tx.vin.size(); j++)
@@ -1080,6 +1052,19 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
m_viewmempool.PackageAddTransaction(ws.m_ptx);
}
+ // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
+ // because it's unnecessary. Also, CPFP carve out can increase the limit for individual
+ // transactions, but this exemption is not extended to packages in CheckPackageLimits().
+ std::string err_string;
+ if (txns.size() > 1 &&
+ !m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants,
+ m_limit_descendant_size, err_string)) {
+ // All transactions must have individually passed mempool ancestor and descendant limits
+ // inside of PreChecks(), so this is separate from an individual transaction error.
+ package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
+ return PackageMempoolAcceptResult(package_state, std::move(results));
+ }
+
for (Workspace& ws : workspaces) {
PrecomputedTransactionData txdata;
if (!PolicyScriptChecks(args, ws, txdata)) {
@@ -1155,33 +1140,6 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx
return result;
}
-CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
-{
- LOCK(cs_main);
-
- if (block_index) {
- CBlock block;
- if (ReadBlockFromDisk(block, block_index, consensusParams)) {
- for (const auto& tx : block.vtx) {
- if (tx->GetHash() == hash) {
- hashBlock = block_index->GetBlockHash();
- return tx;
- }
- }
- }
- return nullptr;
- }
- if (mempool) {
- CTransactionRef ptx = mempool->get(hash);
- if (ptx) return ptx;
- }
- if (g_txindex) {
- CTransactionRef tx;
- if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
- }
- return nullptr;
-}
-
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
{
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
@@ -1208,7 +1166,7 @@ void CoinsViews::InitCache()
m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
}
-CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, std::optional<uint256> from_snapshot_blockhash)
+CChainState::CChainState(CTxMemPool* mempool, BlockManager& blockman, std::optional<uint256> from_snapshot_blockhash)
: m_mempool(mempool),
m_params(::Params()),
m_blockman(blockman),
@@ -1646,13 +1604,8 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens
pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
*pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
{
- flags |= SCRIPT_VERIFY_P2SH;
- }
-
- // Enforce WITNESS rules whenever P2SH is in effect (and the segwit
- // deployment is defined).
- if (flags & SCRIPT_VERIFY_P2SH && DeploymentEnabled(consensusparams, Consensus::DEPLOYMENT_SEGWIT)) {
- flags |= SCRIPT_VERIFY_WITNESS;
+ // Enforce WITNESS rules whenever P2SH is in effect
+ flags |= SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS;
}
// Enforce the DERSIG (BIP66) rule
@@ -1998,23 +1951,31 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
+ TRACE7(validation, block_connected,
+ block.GetHash().ToString().c_str(),
+ pindex->nHeight,
+ block.vtx.size(),
+ nInputs,
+ nSigOpsCost,
+ GetTimeMicros() - nTimeStart, // in microseconds (µs)
+ block.GetHash().data()
+ );
+
return true;
}
-CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
+CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
{
return this->GetCoinsCacheSizeState(
- tx_pool,
m_coinstip_cache_size_bytes,
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
}
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
- const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes)
{
- const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
+ const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
int64_t nTotalSpace =
max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
@@ -2053,7 +2014,7 @@ bool CChainState::FlushStateToDisk(
bool fFlushForPrune = false;
bool fDoFullFlush = false;
- CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool);
+ CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
// make sure we don't prune above the blockfilterindexes bestblocks
@@ -2076,7 +2037,7 @@ bool CChainState::FlushStateToDisk(
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!fHavePruned) {
- pblocktree->WriteFlag("prunedblockfiles", true);
+ m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
fHavePruned = true;
}
}
@@ -2128,7 +2089,7 @@ bool CChainState::FlushStateToDisk(
vBlocks.push_back(*it);
setDirtyBlockIndex.erase(it++);
}
- if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
+ if (!m_blockman.m_block_tree_db->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
return AbortNode(state, "Failed to write to block index database");
}
}
@@ -2204,12 +2165,12 @@ static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
res += warn;
}
-/** Check warning conditions and do some notifications on new chain tip set. */
-static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams, CChainState& active_chainstate)
- EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+void CChainState::UpdateTip(const CBlockIndex* pindexNew)
{
// New best block
- mempool.AddTransactionsUpdated(1);
+ if (m_mempool) {
+ m_mempool->AddTransactionsUpdated(1);
+ }
{
LOCK(g_best_block_mutex);
@@ -2218,11 +2179,11 @@ static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const C
}
bilingual_str warning_messages;
- if (!active_chainstate.IsInitialBlockDownload()) {
+ if (!this->IsInitialBlockDownload()) {
const CBlockIndex* pindex = pindexNew;
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
WarningBitsConditionChecker checker(bit);
- ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
+ ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache[bit]);
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
if (state == ThresholdState::ACTIVE) {
@@ -2237,14 +2198,14 @@ static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const C
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
FormatISO8601DateTime(pindexNew->GetBlockTime()),
- GuessVerificationProgress(chainParams.TxData(), pindexNew), active_chainstate.CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), active_chainstate.CoinsTip().GetCacheSize(),
+ GuessVerificationProgress(m_params.TxData(), pindexNew), this->CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), this->CoinsTip().GetCacheSize(),
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
}
/** Disconnect m_chain's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
- * should make the mempool consistent again by calling UpdateMempoolForReorg.
+ * should make the mempool consistent again by calling MaybeUpdateMempoolForReorg.
* with cs_main held.
*
* If disconnectpool is nullptr, then no disconnected transactions are added to
@@ -2254,7 +2215,7 @@ static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const C
bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool)
{
AssertLockHeld(cs_main);
- AssertLockHeld(m_mempool.cs);
+ if (m_mempool) AssertLockHeld(m_mempool->cs);
CBlockIndex *pindexDelete = m_chain.Tip();
assert(pindexDelete);
@@ -2280,7 +2241,7 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
return false;
}
- if (disconnectpool) {
+ if (disconnectpool && m_mempool) {
// Save transactions to re-add to mempool at end of reorg
for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
disconnectpool->addTransaction(*it);
@@ -2288,14 +2249,14 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
// Drop the earliest entry, and remove its children from the mempool.
auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
- m_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
+ m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
disconnectpool->removeEntry(it);
}
}
m_chain.SetTip(pindexDelete->pprev);
- UpdateTip(m_mempool, pindexDelete->pprev, m_params, *this);
+ UpdateTip(pindexDelete->pprev);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock, pindexDelete);
@@ -2357,7 +2318,7 @@ public:
bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
{
AssertLockHeld(cs_main);
- AssertLockHeld(m_mempool.cs);
+ if (m_mempool) AssertLockHeld(m_mempool->cs);
assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
@@ -2401,11 +2362,13 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
// Remove conflicting transactions from the mempool.;
- m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
- disconnectpool.removeForBlock(blockConnecting.vtx);
+ if (m_mempool) {
+ m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
+ disconnectpool.removeForBlock(blockConnecting.vtx);
+ }
// Update m_chain & related variables.
m_chain.SetTip(pindexNew);
- UpdateTip(m_mempool, pindexNew, m_params, *this);
+ UpdateTip(pindexNew);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
@@ -2495,7 +2458,7 @@ void CChainState::PruneBlockIndexCandidates() {
bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
AssertLockHeld(cs_main);
- AssertLockHeld(m_mempool.cs);
+ if (m_mempool) AssertLockHeld(m_mempool->cs);
const CBlockIndex* pindexOldTip = m_chain.Tip();
const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
@@ -2507,7 +2470,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex
if (!DisconnectTip(state, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
- UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
+ MaybeUpdateMempoolForReorg(disconnectpool, false);
// If we're unable to disconnect a block during normal operation,
// then that is a failure of our local system -- we should abort
@@ -2551,7 +2514,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex
// A system error occurred (disk space, database error, ...).
// Make the mempool consistent with the current tip, just in case
// any observers try to use it before shutdown.
- UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
+ MaybeUpdateMempoolForReorg(disconnectpool, false);
return false;
}
} else {
@@ -2568,9 +2531,9 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex
if (fBlocksDisconnected) {
// If any blocks were disconnected, disconnectpool may be non empty. Add
// any disconnected transactions back to the mempool.
- UpdateMempoolForReorg(*this, m_mempool, disconnectpool, true);
+ MaybeUpdateMempoolForReorg(disconnectpool, true);
}
- m_mempool.check(*this);
+ if (m_mempool) m_mempool->check(*this);
CheckForkWarningConditions();
@@ -2642,7 +2605,8 @@ bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr
{
LOCK(cs_main);
- LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
+ // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
+ LOCK(MempoolMutex());
CBlockIndex* starting_tip = m_chain.Tip();
bool blocks_connected = false;
do {
@@ -2792,7 +2756,9 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pind
LimitValidationInterfaceQueue();
LOCK(cs_main);
- LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
+ // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
+ // called after DisconnectTip without unlocking in between
+ LOCK(MempoolMutex());
if (!m_chain.Contains(pindex)) break;
pindex_was_in_chain = true;
CBlockIndex *invalid_walk_tip = m_chain.Tip();
@@ -2806,7 +2772,7 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pind
// transactions back to the mempool if disconnecting was successful,
// and we're not doing a very deep invalidation (in which case
// keeping the mempool up to date is probably futile anyway).
- UpdateMempoolForReorg(*this, m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
+ MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
if (!ret) return false;
assert(invalid_walk_tip->pprev == m_chain.Tip());
@@ -2977,10 +2943,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
- {
- LOCK(cs_nBlockSequenceId);
- pindex->nSequenceId = nBlockSequenceId++;
- }
+ pindex->nSequenceId = nBlockSequenceId++;
if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
@@ -3099,25 +3062,23 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
std::vector<unsigned char> commitment;
int commitpos = GetWitnessCommitmentIndex(block);
std::vector<unsigned char> ret(32, 0x00);
- if (DeploymentEnabled(consensusParams, Consensus::DEPLOYMENT_SEGWIT)) {
- if (commitpos == NO_WITNESS_COMMITMENT) {
- uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
- CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
- CTxOut out;
- out.nValue = 0;
- out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
- out.scriptPubKey[0] = OP_RETURN;
- out.scriptPubKey[1] = 0x24;
- out.scriptPubKey[2] = 0xaa;
- out.scriptPubKey[3] = 0x21;
- out.scriptPubKey[4] = 0xa9;
- out.scriptPubKey[5] = 0xed;
- memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
- commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
- CMutableTransaction tx(*block.vtx[0]);
- tx.vout.push_back(out);
- block.vtx[0] = MakeTransactionRef(std::move(tx));
- }
+ if (commitpos == NO_WITNESS_COMMITMENT) {
+ uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
+ CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
+ CTxOut out;
+ out.nValue = 0;
+ out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
+ out.scriptPubKey[0] = OP_RETURN;
+ out.scriptPubKey[1] = 0x24;
+ out.scriptPubKey[2] = 0xaa;
+ out.scriptPubKey[3] = 0x21;
+ out.scriptPubKey[4] = 0xa9;
+ out.scriptPubKey[5] = 0xed;
+ memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
+ commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
+ CMutableTransaction tx(*block.vtx[0]);
+ tx.vout.push_back(out);
+ block.vtx[0] = MakeTransactionRef(std::move(tx));
}
UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
return commitment;
@@ -3696,11 +3657,11 @@ CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
bool BlockManager::LoadBlockIndex(
const Consensus::Params& consensus_params,
- CBlockTreeDB& blocktree,
std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
{
- if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
+ if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
return false;
+ }
// Calculate nChainWork
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
@@ -3760,25 +3721,25 @@ void BlockManager::Unload() {
m_block_index.clear();
}
-bool CChainState::LoadBlockIndexDB()
+bool BlockManager::LoadBlockIndexDB(std::set<CBlockIndex*, CBlockIndexWorkComparator>& setBlockIndexCandidates)
{
- if (!m_blockman.LoadBlockIndex(
- m_params.GetConsensus(), *pblocktree,
+ if (!LoadBlockIndex(
+ ::Params().GetConsensus(),
setBlockIndexCandidates)) {
return false;
}
// Load block file info
- pblocktree->ReadLastBlockFile(nLastBlockFile);
+ m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
vinfoBlockFile.resize(nLastBlockFile + 1);
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
- pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
+ m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
}
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
for (int nFile = nLastBlockFile + 1; true; nFile++) {
CBlockFileInfo info;
- if (pblocktree->ReadBlockFileInfo(nFile, info)) {
+ if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
vinfoBlockFile.push_back(info);
} else {
break;
@@ -3788,7 +3749,7 @@ bool CChainState::LoadBlockIndexDB()
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
- for (const std::pair<const uint256, CBlockIndex*>& item : m_blockman.m_block_index) {
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
@@ -3803,13 +3764,13 @@ bool CChainState::LoadBlockIndexDB()
}
// Check whether we have ever pruned block & undo files
- pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
+ m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned)
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
// Check whether we need to continue reindexing
bool fReindexing = false;
- pblocktree->ReadReindexing(fReindexing);
+ m_block_tree_db->ReadReindexing(fReindexing);
if(fReindexing) fReindex = true;
return true;
@@ -3817,10 +3778,11 @@ bool CChainState::LoadBlockIndexDB()
void CChainState::LoadMempool(const ArgsManager& args)
{
+ if (!m_mempool) return;
if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
- ::LoadMempool(m_mempool, *this);
+ ::LoadMempool(*m_mempool, *this);
}
- m_mempool.SetIsLoaded(!ShutdownRequested());
+ m_mempool->SetIsLoaded(!ShutdownRequested());
}
bool CChainState::LoadChainTip()
@@ -4109,7 +4071,7 @@ bool ChainstateManager::LoadBlockIndex()
// Load block index from databases
bool needs_init = fReindex;
if (!fReindex) {
- bool ret = ActiveChainstate().LoadBlockIndexDB();
+ bool ret = m_blockman.LoadBlockIndexDB(ActiveChainstate().setBlockIndexCandidates);
if (!ret) return false;
needs_init = m_blockman.m_block_index.empty();
}
@@ -4684,7 +4646,8 @@ std::vector<CChainState*> ChainstateManager::GetAll()
return out;
}
-CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const std::optional<uint256>& snapshot_blockhash)
+CChainState& ChainstateManager::InitializeChainstate(
+ CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
{
bool is_snapshot = snapshot_blockhash.has_value();
std::unique_ptr<CChainState>& to_modify =
@@ -4763,7 +4726,7 @@ bool ChainstateManager::ActivateSnapshot(
}
auto snapshot_chainstate = WITH_LOCK(::cs_main, return std::make_unique<CChainState>(
- this->ActiveChainstate().m_mempool, m_blockman, base_blockhash));
+ /* mempool */ nullptr, m_blockman, base_blockhash));
{
LOCK(::cs_main);
@@ -4879,7 +4842,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
}
const auto snapshot_cache_state = WITH_LOCK(::cs_main,
- return snapshot_chainstate.GetCoinsCacheSizeState(&snapshot_chainstate.m_mempool));
+ return snapshot_chainstate.GetCoinsCacheSizeState());
if (snapshot_cache_state >=
CoinsCacheSizeState::CRITICAL) {
diff --git a/src/validation.h b/src/validation.h
index 3d66e3161d..d4fcac1d48 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -140,19 +140,7 @@ void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman);
void StartScriptCheckWorkerThreads(int threads_num);
/** Stop all of the script checking worker threads */
void StopScriptCheckWorkerThreads();
-/**
- * Return transaction from the block at block_index.
- * If block_index is not provided, fall back to mempool.
- * If mempool is not provided or the tx couldn't be found in mempool, fall back to g_txindex.
- *
- * @param[in] block_index The block to read from disk, or nullptr
- * @param[in] mempool If block_index is not provided, look in the mempool, if provided
- * @param[in] hash The txid
- * @param[in] consensusParams The params
- * @param[out] hashBlock The hash of block_index, if the tx was found via block_index
- * @returns The tx if found, otherwise nullptr
- */
-CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock);
+
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str{});
@@ -211,7 +199,8 @@ struct PackageMempoolAcceptResult
/**
* Map from wtxid to finished MempoolAcceptResults. The client is responsible
* for keeping track of the transaction objects themselves. If a result is not
- * present, it means validation was unfinished for that transaction.
+ * present, it means validation was unfinished for that transaction. If there
+ * was a package-wide error (see result in m_state), m_tx_results will be empty.
*/
std::map<const uint256, const MempoolAcceptResult> m_tx_results;
@@ -239,7 +228,8 @@ MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPoo
* @param[in] txns Group of transactions which may be independent or contain
* parent-child dependencies. The transactions must not conflict
* with each other, i.e., must not spend the same inputs. If any
-* dependencies exist, parents must appear before children.
+* dependencies exist, parents must appear anywhere in the list
+* before their children.
* @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult for each transaction.
* If a transaction fails, validation will exit early and some results may be missing.
*/
@@ -446,6 +436,10 @@ public:
*/
std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
+ std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
+
+ bool LoadBlockIndexDB(std::set<CBlockIndex*, CBlockIndexWorkComparator>& setBlockIndexCandidates) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
/**
* Load the blocktree off disk and into memory. Populate certain metadata
* per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
@@ -456,7 +450,6 @@ public:
*/
bool LoadBlockIndex(
const Consensus::Params& consensus_params,
- CBlockTreeDB& blocktree,
std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -565,9 +558,8 @@ protected:
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
- RecursiveMutex cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
- int32_t nBlockSequenceId = 1;
+ int32_t nBlockSequenceId GUARDED_BY(::cs_main) = 1;
/** Decreasing counter (used by subsequent preciousblock calls). */
int32_t nBlockReverseSequenceId = -1;
/** chainwork for the last block that preciousblock has been applied to. */
@@ -587,8 +579,9 @@ protected:
*/
mutable std::atomic<bool> m_cached_finished_ibd{false};
- //! mempool that is kept in sync with the chain
- CTxMemPool& m_mempool;
+ //! Optional mempool that is kept in sync with the chain.
+ //! Only the active chainstate has a mempool.
+ CTxMemPool* m_mempool;
const CChainParams& m_params;
@@ -600,7 +593,10 @@ public:
//! CChainState instances.
BlockManager& m_blockman;
- explicit CChainState(CTxMemPool& mempool, BlockManager& blockman, std::optional<uint256> from_snapshot_blockhash = std::nullopt);
+ explicit CChainState(
+ CTxMemPool* mempool,
+ BlockManager& blockman,
+ std::optional<uint256> from_snapshot_blockhash = std::nullopt);
/**
* Initialize the CoinsViews UTXO set database management data structures. The in-memory
@@ -729,7 +725,7 @@ public:
CCoinsViewCache& view, bool fJustCheck = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Apply the effects of a block disconnection on the UTXO set.
- bool DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool.cs);
+ bool DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
// Manual block validity manipulation:
/** Mark a block as precious and reorganize.
@@ -752,7 +748,7 @@ public:
void PruneBlockIndexCandidates();
- void UnloadBlockIndex();
+ void UnloadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/** Check whether we are doing an initial block download (synchronizing from disk or network) */
bool IsInitialBlockDownload() const;
@@ -773,19 +769,17 @@ public:
//! Dictates whether we need to flush the cache to disk or not.
//!
//! @return the state of the size of the coins cache.
- CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
- EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ CoinsCacheSizeState GetCoinsCacheSizeState() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
CoinsCacheSizeState GetCoinsCacheSizeState(
- const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
std::string ToString() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
private:
- bool ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool.cs);
- bool ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool.cs);
+ bool ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
+ bool ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
void InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -796,7 +790,32 @@ private:
void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- bool LoadBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ //! Indirection necessary to make lock annotations work with an optional mempool.
+ RecursiveMutex* MempoolMutex() const LOCK_RETURNED(m_mempool->cs)
+ {
+ return m_mempool ? &m_mempool->cs : nullptr;
+ }
+
+ /**
+ * Make mempool consistent after a reorg, by re-adding or recursively erasing
+ * disconnected block transactions from the mempool, and also removing any
+ * other transactions from the mempool that are no longer valid given the new
+ * tip/height.
+ *
+ * Note: we assume that disconnectpool only contains transactions that are NOT
+ * confirmed in the current chain nor already in the mempool (otherwise,
+ * in-mempool descendants of such transactions would be removed).
+ *
+ * Passing fAddToMempool=false will skip trying to add the transactions back,
+ * and instead just erase from the mempool as needed.
+ */
+ void MaybeUpdateMempoolForReorg(
+ DisconnectedBlockTransactions& disconnectpool,
+ bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
+
+ /** Check warning conditions and do some notifications on new chain tip set. */
+ void UpdateTip(const CBlockIndex* pindexNew)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
friend ChainstateManager;
};
@@ -907,8 +926,10 @@ public:
// constructor
//! @param[in] snapshot_blockhash If given, signify that this chainstate
//! is based on a snapshot.
- CChainState& InitializeChainstate(CTxMemPool& mempool, const std::optional<uint256>& snapshot_blockhash = std::nullopt)
- EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ CChainState& InitializeChainstate(
+ CTxMemPool* mempool,
+ const std::optional<uint256>& snapshot_blockhash = std::nullopt)
+ LIFETIMEBOUND EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! Get all chainstates currently being used.
std::vector<CChainState*> GetAll();
@@ -1016,9 +1037,6 @@ public:
}
};
-/** Global variable that points to the active block tree (protected by cs_main) */
-extern std::unique_ptr<CBlockTreeDB> pblocktree;
-
using FopenFn = std::function<FILE*(const fs::path&, const char*)>;
/** Dump the mempool to disk. */
diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp
index 6d502e1df1..1699424657 100644
--- a/src/wallet/coinselection.cpp
+++ b/src/wallet/coinselection.cpp
@@ -195,7 +195,7 @@ static void ApproximateBestSubset(const std::vector<OutputGroup>& groups, const
//the selection random.
if (nPass == 0 ? insecure_rand.randbool() : !vfIncluded[i])
{
- nTotal += groups[i].m_value;
+ nTotal += groups[i].GetSelectionAmount();
vfIncluded[i] = true;
if (nTotal >= nTargetValue)
{
@@ -205,7 +205,7 @@ static void ApproximateBestSubset(const std::vector<OutputGroup>& groups, const
nBest = nTotal;
vfBest = vfIncluded;
}
- nTotal -= groups[i].m_value;
+ nTotal -= groups[i].GetSelectionAmount();
vfIncluded[i] = false;
}
}
@@ -341,3 +341,30 @@ CAmount OutputGroup::GetSelectionAmount() const
{
return m_subtract_fee_outputs ? m_value : effective_value;
}
+
+CAmount GetSelectionWaste(const std::set<CInputCoin>& inputs, CAmount change_cost, CAmount target, bool use_effective_value)
+{
+ // This function should not be called with empty inputs as that would mean the selection failed
+ assert(!inputs.empty());
+
+ // Always consider the cost of spending an input now vs in the future.
+ CAmount waste = 0;
+ CAmount selected_effective_value = 0;
+ for (const CInputCoin& coin : inputs) {
+ waste += coin.m_fee - coin.m_long_term_fee;
+ selected_effective_value += use_effective_value ? coin.effective_value : coin.txout.nValue;
+ }
+
+ if (change_cost) {
+ // Consider the cost of making change and spending it in the future
+ // If we aren't making change, the caller should've set change_cost to 0
+ assert(change_cost > 0);
+ waste += change_cost;
+ } else {
+ // When we are not making change (change_cost == 0), consider the excess we are throwing away to fees
+ assert(selected_effective_value >= target);
+ waste += selected_effective_value - target;
+ }
+
+ return waste;
+}
diff --git a/src/wallet/coinselection.h b/src/wallet/coinselection.h
index 7a3fb82139..35617d455b 100644
--- a/src/wallet/coinselection.h
+++ b/src/wallet/coinselection.h
@@ -166,6 +166,21 @@ struct OutputGroup
CAmount GetSelectionAmount() const;
};
+/** Compute the waste for this result given the cost of change
+ * and the opportunity cost of spending these inputs now vs in the future.
+ * If change exists, waste = change_cost + inputs * (effective_feerate - long_term_feerate)
+ * If no change, waste = excess + inputs * (effective_feerate - long_term_feerate)
+ * where excess = selected_effective_value - target
+ * change_cost = effective_feerate * change_output_size + long_term_feerate * change_spend_size
+ *
+ * @param[in] inputs The selected inputs
+ * @param[in] change_cost The cost of creating change and spending it in the future. Only used if there is change. Must be 0 if there is no change.
+ * @param[in] target The amount targeted by the coin selection algorithm.
+ * @param[in] use_effective_value Whether to use the input's effective value (when true) or the real value (when false).
+ * @return The waste
+ */
+[[nodiscard]] CAmount GetSelectionWaste(const std::set<CInputCoin>& inputs, CAmount change_cost, CAmount target, bool use_effective_value = true);
+
bool SelectCoinsBnB(std::vector<OutputGroup>& utxo_pool, const CAmount& selection_target, const CAmount& cost_of_change, std::set<CInputCoin>& out_set, CAmount& value_ret);
// Original coin selection algorithm as a fallback
diff --git a/src/wallet/context.h b/src/wallet/context.h
index a83591154f..a382fb9021 100644
--- a/src/wallet/context.h
+++ b/src/wallet/context.h
@@ -5,11 +5,22 @@
#ifndef BITCOIN_WALLET_CONTEXT_H
#define BITCOIN_WALLET_CONTEXT_H
+#include <sync.h>
+
+#include <functional>
+#include <list>
+#include <memory>
+#include <vector>
+
class ArgsManager;
+class CWallet;
namespace interfaces {
class Chain;
+class Wallet;
} // namespace interfaces
+using LoadWalletFn = std::function<void(std::unique_ptr<interfaces::Wallet> wallet)>;
+
//! WalletContext struct containing references to state shared between CWallet
//! instances, like the reference to the chain interface, and the list of opened
//! wallets.
@@ -22,7 +33,10 @@ class Chain;
//! behavior.
struct WalletContext {
interfaces::Chain* chain{nullptr};
- ArgsManager* args{nullptr};
+ ArgsManager* args{nullptr}; // Currently a raw pointer because the memory is not managed by this struct
+ Mutex wallets_mutex;
+ std::vector<std::shared_ptr<CWallet>> wallets GUARDED_BY(wallets_mutex);
+ std::list<LoadWalletFn> wallet_load_fns GUARDED_BY(wallets_mutex);
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the WalletContext struct doesn't need to #include class
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 30fef50c3b..f2de68295e 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -12,6 +12,8 @@
#include <wallet/coincontrol.h>
#include <wallet/feebumper.h>
#include <wallet/fees.h>
+#include <wallet/receive.h>
+#include <wallet/spend.h>
#include <wallet/wallet.h>
//! Check whether transaction has descendant in wallet or mempool, or has been
@@ -30,7 +32,7 @@ static feebumper::Result PreconditionChecks(const CWallet& wallet, const CWallet
}
}
- if (wtx.GetDepthInMainChain() != 0) {
+ if (wallet.GetTxDepthInMainChain(wtx) != 0) {
errors.push_back(Untranslated("Transaction has been mined, or is conflicted with a mined transaction"));
return feebumper::Result::WALLET_ERROR;
}
@@ -48,7 +50,7 @@ static feebumper::Result PreconditionChecks(const CWallet& wallet, const CWallet
// check that original tx consists entirely of our inputs
// if not, we can't bump the fee, because the wallet has no way of knowing the value of the other inputs (thus the fee)
isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
- if (!wallet.IsAllFromMe(*wtx.tx, filter)) {
+ if (!AllInputsMine(wallet, *wtx.tx, filter)) {
errors.push_back(Untranslated("Transaction contains inputs that don't belong to this wallet"));
return feebumper::Result::WALLET_ERROR;
}
@@ -81,7 +83,7 @@ static feebumper::Result CheckFeeRate(const CWallet& wallet, const CWalletTx& wt
// Given old total fee and transaction size, calculate the old feeRate
isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
- CAmount old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
+ CAmount old_fee = CachedTxGetDebit(wallet, wtx, filter) - wtx.tx->GetValueOut();
const int64_t txSize = GetVirtualTransactionSize(*(wtx.tx));
CFeeRate nOldFeeRate(old_fee, txSize);
// Min total fee is old fee + relay fee
@@ -174,7 +176,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
// Fill in recipients(and preserve a single change key if there is one)
std::vector<CRecipient> recipients;
for (const auto& output : wtx.tx->vout) {
- if (!wallet.IsChange(output)) {
+ if (!OutputIsChange(wallet, output)) {
CRecipient recipient = {output.scriptPubKey, output.nValue, false};
recipients.push_back(recipient);
} else {
@@ -185,7 +187,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
}
isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
- old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
+ old_fee = CachedTxGetDebit(wallet, wtx, filter) - wtx.tx->GetValueOut();
if (coin_control.m_feerate) {
// The user provided a feeRate argument.
@@ -220,7 +222,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
int change_pos_in_out = -1; // No requested location for change
bilingual_str fail_reason;
FeeCalculation fee_calc_out;
- if (!wallet.CreateTransaction(recipients, tx_new, fee_ret, change_pos_in_out, fail_reason, new_coin_control, fee_calc_out, false)) {
+ if (!CreateTransaction(wallet, recipients, tx_new, fee_ret, change_pos_in_out, fail_reason, new_coin_control, fee_calc_out, false)) {
errors.push_back(Untranslated("Unable to create transaction.") + Untranslated(" ") + fail_reason);
return Result::WALLET_ERROR;
}
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index eb0d6316c0..bb5f0cceff 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -45,6 +45,7 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const
argsman.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
argsman.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting many (possibly all) or none, instead of selecting on a per-output basis. Privacy is improved as addresses are mostly swept with fewer transactions and outputs are aggregated in clean change addresses. It may result in higher fees due to less optimal coin selection caused by this added limitation and possibly a larger-than-necessary number of inputs being used. Always enabled for wallets with \"avoid_reuse\" enabled, otherwise default: %u.", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
argsman.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-consolidatefeerate=<amt>", strprintf("The maximum feerate (in %s/kvB) at which transaction building may use more inputs than strictly necessary so that the wallet's UTXO pool can be reduced (default: %s).", CURRENCY_UNIT, FormatMoney(DEFAULT_CONSOLIDATE_FEERATE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
argsman.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
argsman.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kvB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). "
"Note: An output is discarded if it is dust at this rate, but we will always discard up to the dust relay fee and a discard fee above that is limited by the fee estimate for the longest target",
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index e33adf94c9..9a8c1e3c02 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -16,13 +16,16 @@
#include <uint256.h>
#include <util/check.h>
#include <util/system.h>
+#include <util/translation.h>
#include <util/ui_change_type.h>
#include <wallet/context.h>
#include <wallet/feebumper.h>
#include <wallet/fees.h>
#include <wallet/ismine.h>
#include <wallet/load.h>
+#include <wallet/receive.h>
#include <wallet/rpcwallet.h>
+#include <wallet/spend.h>
#include <wallet/wallet.h>
#include <memory>
@@ -54,7 +57,7 @@ WalletTx MakeWalletTx(CWallet& wallet, const CWalletTx& wtx)
result.tx = wtx.tx;
result.txin_is_mine.reserve(wtx.tx->vin.size());
for (const auto& txin : wtx.tx->vin) {
- result.txin_is_mine.emplace_back(wallet.IsMine(txin));
+ result.txin_is_mine.emplace_back(InputIsMine(wallet, txin));
}
result.txout_is_mine.reserve(wtx.tx->vout.size());
result.txout_address.reserve(wtx.tx->vout.size());
@@ -66,9 +69,9 @@ WalletTx MakeWalletTx(CWallet& wallet, const CWalletTx& wtx)
wallet.IsMine(result.txout_address.back()) :
ISMINE_NO);
}
- result.credit = wtx.GetCredit(ISMINE_ALL);
- result.debit = wtx.GetDebit(ISMINE_ALL);
- result.change = wtx.GetChange();
+ result.credit = CachedTxGetCredit(wallet, wtx, ISMINE_ALL);
+ result.debit = CachedTxGetDebit(wallet, wtx, ISMINE_ALL);
+ result.change = CachedTxGetChange(wallet, wtx);
result.time = wtx.GetTxTime();
result.value_map = wtx.mapValue;
result.is_coinbase = wtx.IsCoinBase();
@@ -80,15 +83,15 @@ WalletTxStatus MakeWalletTxStatus(const CWallet& wallet, const CWalletTx& wtx)
{
WalletTxStatus result;
result.block_height = wtx.m_confirm.block_height > 0 ? wtx.m_confirm.block_height : std::numeric_limits<int>::max();
- result.blocks_to_maturity = wtx.GetBlocksToMaturity();
- result.depth_in_main_chain = wtx.GetDepthInMainChain();
+ result.blocks_to_maturity = wallet.GetTxBlocksToMaturity(wtx);
+ result.depth_in_main_chain = wallet.GetTxDepthInMainChain(wtx);
result.time_received = wtx.nTimeReceived;
result.lock_time = wtx.tx->nLockTime;
result.is_final = wallet.chain().checkFinalTx(*wtx.tx);
- result.is_trusted = wtx.IsTrusted();
+ result.is_trusted = CachedTxIsTrusted(wallet, wtx);
result.is_abandoned = wtx.isAbandoned();
result.is_coinbase = wtx.IsCoinBase();
- result.is_in_main_chain = wtx.IsInMainChain();
+ result.is_in_main_chain = wallet.IsTxInMainChain(wtx);
return result;
}
@@ -109,7 +112,7 @@ WalletTxOut MakeWalletTxOut(const CWallet& wallet,
class WalletImpl : public Wallet
{
public:
- explicit WalletImpl(const std::shared_ptr<CWallet>& wallet) : m_wallet(wallet) {}
+ explicit WalletImpl(WalletContext& context, const std::shared_ptr<CWallet>& wallet) : m_context(context), m_wallet(wallet) {}
bool encryptWallet(const SecureString& wallet_passphrase) override
{
@@ -130,7 +133,7 @@ public:
bool getNewDestination(const OutputType type, const std::string label, CTxDestination& dest) override
{
LOCK(m_wallet->cs_wallet);
- std::string error;
+ bilingual_str error;
return m_wallet->GetNewDestination(type, label, dest, error);
}
bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) override
@@ -241,7 +244,7 @@ public:
LOCK(m_wallet->cs_wallet);
CTransactionRef tx;
FeeCalculation fee_calc_out;
- if (!m_wallet->CreateTransaction(recipients, tx, fee, change_pos,
+ if (!CreateTransaction(*m_wallet, recipients, tx, fee, change_pos,
fail_reason, coin_control, fee_calc_out, sign)) {
return {};
}
@@ -357,7 +360,7 @@ public:
}
WalletBalances getBalances() override
{
- const auto bal = m_wallet->GetBalance();
+ const auto bal = GetBalance(*m_wallet);
WalletBalances result;
result.balance = bal.m_mine_trusted;
result.unconfirmed_balance = bal.m_mine_untrusted_pending;
@@ -380,15 +383,15 @@ public:
balances = getBalances();
return true;
}
- CAmount getBalance() override { return m_wallet->GetBalance().m_mine_trusted; }
+ CAmount getBalance() override { return GetBalance(*m_wallet).m_mine_trusted; }
CAmount getAvailableBalance(const CCoinControl& coin_control) override
{
- return m_wallet->GetAvailableBalance(&coin_control);
+ return GetAvailableBalance(*m_wallet, &coin_control);
}
isminetype txinIsMine(const CTxIn& txin) override
{
LOCK(m_wallet->cs_wallet);
- return m_wallet->IsMine(txin);
+ return InputIsMine(*m_wallet, txin);
}
isminetype txoutIsMine(const CTxOut& txout) override
{
@@ -403,13 +406,13 @@ public:
CAmount getCredit(const CTxOut& txout, isminefilter filter) override
{
LOCK(m_wallet->cs_wallet);
- return m_wallet->GetCredit(txout, filter);
+ return OutputGetCredit(*m_wallet, txout, filter);
}
CoinsList listCoins() override
{
LOCK(m_wallet->cs_wallet);
CoinsList result;
- for (const auto& entry : m_wallet->ListCoins()) {
+ for (const auto& entry : ListCoins(*m_wallet)) {
auto& group = result[entry.first];
for (const auto& coin : entry.second) {
group.emplace_back(COutPoint(coin.tx->GetHash(), coin.i),
@@ -427,7 +430,7 @@ public:
result.emplace_back();
auto it = m_wallet->mapWallet.find(output.hash);
if (it != m_wallet->mapWallet.end()) {
- int depth = it->second.GetDepthInMainChain();
+ int depth = m_wallet->GetTxDepthInMainChain(it->second);
if (depth >= 0) {
result.back() = MakeWalletTxOut(*m_wallet, it->second, output.n, depth);
}
@@ -457,7 +460,7 @@ public:
CAmount getDefaultMaxTxFee() override { return m_wallet->m_default_max_tx_fee; }
void remove() override
{
- RemoveWallet(m_wallet, false /* load_on_start */);
+ RemoveWallet(m_context, m_wallet, false /* load_on_start */);
}
bool isLegacy() override { return m_wallet->IsLegacy(); }
std::unique_ptr<Handler> handleUnload(UnloadFn fn) override
@@ -493,6 +496,7 @@ public:
}
CWallet* wallet() override { return m_wallet.get(); }
+ WalletContext& m_context;
std::shared_ptr<CWallet> m_wallet;
};
@@ -504,7 +508,7 @@ public:
m_context.chain = &chain;
m_context.args = &args;
}
- ~WalletClientImpl() override { UnloadWallets(); }
+ ~WalletClientImpl() override { UnloadWallets(m_context); }
//! ChainClient methods
void registerRpcs() override
@@ -518,11 +522,11 @@ public:
m_rpc_handlers.emplace_back(m_context.chain->handleRpc(m_rpc_commands.back()));
}
}
- bool verify() override { return VerifyWallets(*m_context.chain); }
- bool load() override { return LoadWallets(*m_context.chain); }
- void start(CScheduler& scheduler) override { return StartWallets(scheduler, *Assert(m_context.args)); }
- void flush() override { return FlushWallets(); }
- void stop() override { return StopWallets(); }
+ bool verify() override { return VerifyWallets(m_context); }
+ bool load() override { return LoadWallets(m_context); }
+ void start(CScheduler& scheduler) override { return StartWallets(m_context, scheduler); }
+ void flush() override { return FlushWallets(m_context); }
+ void stop() override { return StopWallets(m_context); }
void setMockTime(int64_t time) override { return SetMockTime(time); }
//! WalletClient methods
@@ -534,14 +538,14 @@ public:
options.require_create = true;
options.create_flags = wallet_creation_flags;
options.create_passphrase = passphrase;
- return MakeWallet(CreateWallet(*m_context.chain, name, true /* load_on_start */, options, status, error, warnings));
+ return MakeWallet(m_context, CreateWallet(m_context, name, true /* load_on_start */, options, status, error, warnings));
}
std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override
{
DatabaseOptions options;
DatabaseStatus status;
options.require_existing = true;
- return MakeWallet(LoadWallet(*m_context.chain, name, true /* load_on_start */, options, status, error, warnings));
+ return MakeWallet(m_context, LoadWallet(m_context, name, true /* load_on_start */, options, status, error, warnings));
}
std::string getWalletDir() override
{
@@ -558,15 +562,16 @@ public:
std::vector<std::unique_ptr<Wallet>> getWallets() override
{
std::vector<std::unique_ptr<Wallet>> wallets;
- for (const auto& wallet : GetWallets()) {
- wallets.emplace_back(MakeWallet(wallet));
+ for (const auto& wallet : GetWallets(m_context)) {
+ wallets.emplace_back(MakeWallet(m_context, wallet));
}
return wallets;
}
std::unique_ptr<Handler> handleLoadWallet(LoadWalletFn fn) override
{
- return HandleLoadWallet(std::move(fn));
+ return HandleLoadWallet(m_context, std::move(fn));
}
+ WalletContext* context() override { return &m_context; }
WalletContext m_context;
const std::vector<std::string> m_wallet_filenames;
@@ -577,7 +582,7 @@ public:
} // namespace wallet
namespace interfaces {
-std::unique_ptr<Wallet> MakeWallet(const std::shared_ptr<CWallet>& wallet) { return wallet ? std::make_unique<wallet::WalletImpl>(wallet) : nullptr; }
+std::unique_ptr<Wallet> MakeWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet) { return wallet ? std::make_unique<wallet::WalletImpl>(context, wallet) : nullptr; }
std::unique_ptr<WalletClient> MakeWalletClient(Chain& chain, ArgsManager& args)
{
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index dbf9fd46b6..1b841026b8 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -8,18 +8,24 @@
#include <fs.h>
#include <interfaces/chain.h>
#include <scheduler.h>
+#include <util/check.h>
#include <util/string.h>
#include <util/system.h>
#include <util/translation.h>
+#include <wallet/context.h>
+#include <wallet/spend.h>
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
#include <univalue.h>
-bool VerifyWallets(interfaces::Chain& chain)
+bool VerifyWallets(WalletContext& context)
{
- if (gArgs.IsArgSet("-walletdir")) {
- fs::path wallet_dir = gArgs.GetArg("-walletdir", "");
+ interfaces::Chain& chain = *context.chain;
+ ArgsManager& args = *Assert(context.args);
+
+ if (args.IsArgSet("-walletdir")) {
+ fs::path wallet_dir = args.GetArg("-walletdir", "");
boost::system::error_code error;
// The canonical path cleans the path, preventing >1 Berkeley environment instances for the same directory
fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error);
@@ -34,7 +40,7 @@ bool VerifyWallets(interfaces::Chain& chain)
chain.initError(strprintf(_("Specified -walletdir \"%s\" is a relative path"), wallet_dir.string()));
return false;
}
- gArgs.ForceSetArg("-walletdir", canonical_wallet_dir.string());
+ args.ForceSetArg("-walletdir", canonical_wallet_dir.string());
}
LogPrintf("Using wallet directory %s\n", GetWalletDir().string());
@@ -43,25 +49,27 @@ bool VerifyWallets(interfaces::Chain& chain)
// For backwards compatibility if an unnamed top level wallet exists in the
// wallets directory, include it in the default list of wallets to load.
- if (!gArgs.IsArgSet("wallet")) {
+ if (!args.IsArgSet("wallet")) {
DatabaseOptions options;
DatabaseStatus status;
bilingual_str error_string;
options.require_existing = true;
options.verify = false;
if (MakeWalletDatabase("", options, status, error_string)) {
- gArgs.LockSettings([&](util::Settings& settings) {
- util::SettingsValue wallets(util::SettingsValue::VARR);
- wallets.push_back(""); // Default wallet name is ""
- settings.rw_settings["wallet"] = wallets;
- });
+ util::SettingsValue wallets(util::SettingsValue::VARR);
+ wallets.push_back(""); // Default wallet name is ""
+ // Pass write=false because no need to write file and probably
+ // better not to. If unnamed wallet needs to be added next startup
+ // and the setting is empty, this code will just run again.
+ chain.updateRwSetting("wallet", wallets, /* write= */ false);
}
}
// Keep track of each wallet absolute path to detect duplicates.
std::set<fs::path> wallet_paths;
- for (const auto& wallet_file : gArgs.GetArgs("-wallet")) {
+ for (const auto& wallet : chain.getSettingsList("wallet")) {
+ const auto& wallet_file = wallet.get_str();
const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_file);
if (!wallet_paths.insert(path).second) {
@@ -87,11 +95,13 @@ bool VerifyWallets(interfaces::Chain& chain)
return true;
}
-bool LoadWallets(interfaces::Chain& chain)
+bool LoadWallets(WalletContext& context)
{
+ interfaces::Chain& chain = *context.chain;
try {
std::set<fs::path> wallet_paths;
- for (const std::string& name : gArgs.GetArgs("-wallet")) {
+ for (const auto& wallet : chain.getSettingsList("wallet")) {
+ const auto& name = wallet.get_str();
if (!wallet_paths.insert(name).second) {
continue;
}
@@ -106,13 +116,13 @@ bool LoadWallets(interfaces::Chain& chain)
continue;
}
chain.initMessage(_("Loading wallet…").translated);
- std::shared_ptr<CWallet> pwallet = database ? CWallet::Create(&chain, name, std::move(database), options.create_flags, error, warnings) : nullptr;
+ std::shared_ptr<CWallet> pwallet = database ? CWallet::Create(context, name, std::move(database), options.create_flags, error, warnings) : nullptr;
if (!warnings.empty()) chain.initWarning(Join(warnings, Untranslated("\n")));
if (!pwallet) {
chain.initError(error);
return false;
}
- AddWallet(pwallet);
+ AddWallet(context, pwallet);
}
return true;
} catch (const std::runtime_error& e) {
@@ -121,41 +131,41 @@ bool LoadWallets(interfaces::Chain& chain)
}
}
-void StartWallets(CScheduler& scheduler, const ArgsManager& args)
+void StartWallets(WalletContext& context, CScheduler& scheduler)
{
- for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) {
+ for (const std::shared_ptr<CWallet>& pwallet : GetWallets(context)) {
pwallet->postInitProcess();
}
// Schedule periodic wallet flushes and tx rebroadcasts
- if (args.GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) {
- scheduler.scheduleEvery(MaybeCompactWalletDB, std::chrono::milliseconds{500});
+ if (context.args->GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) {
+ scheduler.scheduleEvery([&context] { MaybeCompactWalletDB(context); }, std::chrono::milliseconds{500});
}
- scheduler.scheduleEvery(MaybeResendWalletTxs, std::chrono::milliseconds{1000});
+ scheduler.scheduleEvery([&context] { MaybeResendWalletTxs(context); }, std::chrono::milliseconds{1000});
}
-void FlushWallets()
+void FlushWallets(WalletContext& context)
{
- for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) {
+ for (const std::shared_ptr<CWallet>& pwallet : GetWallets(context)) {
pwallet->Flush();
}
}
-void StopWallets()
+void StopWallets(WalletContext& context)
{
- for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) {
+ for (const std::shared_ptr<CWallet>& pwallet : GetWallets(context)) {
pwallet->Close();
}
}
-void UnloadWallets()
+void UnloadWallets(WalletContext& context)
{
- auto wallets = GetWallets();
+ auto wallets = GetWallets(context);
while (!wallets.empty()) {
auto wallet = wallets.back();
wallets.pop_back();
std::vector<bilingual_str> warnings;
- RemoveWallet(wallet, std::nullopt, warnings);
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt, warnings);
UnloadWallet(std::move(wallet));
}
}
diff --git a/src/wallet/load.h b/src/wallet/load.h
index 7910f0d6e1..e207bc2e09 100644
--- a/src/wallet/load.h
+++ b/src/wallet/load.h
@@ -11,27 +11,28 @@
class ArgsManager;
class CScheduler;
+struct WalletContext;
namespace interfaces {
class Chain;
} // namespace interfaces
//! Responsible for reading and validating the -wallet arguments and verifying the wallet database.
-bool VerifyWallets(interfaces::Chain& chain);
+bool VerifyWallets(WalletContext& context);
//! Load wallet databases.
-bool LoadWallets(interfaces::Chain& chain);
+bool LoadWallets(WalletContext& context);
//! Complete startup of wallets.
-void StartWallets(CScheduler& scheduler, const ArgsManager& args);
+void StartWallets(WalletContext& context, CScheduler& scheduler);
//! Flush all wallets in preparation for shutdown.
-void FlushWallets();
+void FlushWallets(WalletContext& context);
//! Stop all wallets. Wallets will be flushed first.
-void StopWallets();
+void StopWallets(WalletContext& context);
//! Close all wallets.
-void UnloadWallets();
+void UnloadWallets(WalletContext& context);
#endif // BITCOIN_WALLET_LOAD_H
diff --git a/src/wallet/receive.cpp b/src/wallet/receive.cpp
index de81dbf324..98706dcdf8 100644
--- a/src/wallet/receive.cpp
+++ b/src/wallet/receive.cpp
@@ -7,27 +7,27 @@
#include <wallet/transaction.h>
#include <wallet/wallet.h>
-isminetype CWallet::IsMine(const CTxIn &txin) const
+isminetype InputIsMine(const CWallet& wallet, const CTxIn &txin)
{
- AssertLockHeld(cs_wallet);
- std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.hash);
- if (mi != mapWallet.end())
+ AssertLockHeld(wallet.cs_wallet);
+ std::map<uint256, CWalletTx>::const_iterator mi = wallet.mapWallet.find(txin.prevout.hash);
+ if (mi != wallet.mapWallet.end())
{
const CWalletTx& prev = (*mi).second;
if (txin.prevout.n < prev.tx->vout.size())
- return IsMine(prev.tx->vout[txin.prevout.n]);
+ return wallet.IsMine(prev.tx->vout[txin.prevout.n]);
}
return ISMINE_NO;
}
-bool CWallet::IsAllFromMe(const CTransaction& tx, const isminefilter& filter) const
+bool AllInputsMine(const CWallet& wallet, const CTransaction& tx, const isminefilter& filter)
{
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
for (const CTxIn& txin : tx.vin)
{
- auto mi = mapWallet.find(txin.prevout.hash);
- if (mi == mapWallet.end())
+ auto mi = wallet.mapWallet.find(txin.prevout.hash);
+ if (mi == wallet.mapWallet.end())
return false; // any unknown inputs can't be from us
const CWalletTx& prev = (*mi).second;
@@ -35,33 +35,33 @@ bool CWallet::IsAllFromMe(const CTransaction& tx, const isminefilter& filter) co
if (txin.prevout.n >= prev.tx->vout.size())
return false; // invalid input!
- if (!(IsMine(prev.tx->vout[txin.prevout.n]) & filter))
+ if (!(wallet.IsMine(prev.tx->vout[txin.prevout.n]) & filter))
return false;
}
return true;
}
-CAmount CWallet::GetCredit(const CTxOut& txout, const isminefilter& filter) const
+CAmount OutputGetCredit(const CWallet& wallet, const CTxOut& txout, const isminefilter& filter)
{
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
- LOCK(cs_wallet);
- return ((IsMine(txout) & filter) ? txout.nValue : 0);
+ LOCK(wallet.cs_wallet);
+ return ((wallet.IsMine(txout) & filter) ? txout.nValue : 0);
}
-CAmount CWallet::GetCredit(const CTransaction& tx, const isminefilter& filter) const
+CAmount TxGetCredit(const CWallet& wallet, const CTransaction& tx, const isminefilter& filter)
{
CAmount nCredit = 0;
for (const CTxOut& txout : tx.vout)
{
- nCredit += GetCredit(txout, filter);
+ nCredit += OutputGetCredit(wallet, txout, filter);
if (!MoneyRange(nCredit))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
return nCredit;
}
-bool CWallet::IsChange(const CScript& script) const
+bool ScriptIsChange(const CWallet& wallet, const CScript& script)
{
// TODO: fix handling of 'change' outputs. The assumption is that any
// payment to a script that is ours, but is not in the address book
@@ -70,179 +70,177 @@ bool CWallet::IsChange(const CScript& script) const
// a better way of identifying which outputs are 'the send' and which are
// 'the change' will need to be implemented (maybe extend CWalletTx to remember
// which output, if any, was change).
- AssertLockHeld(cs_wallet);
- if (IsMine(script))
+ AssertLockHeld(wallet.cs_wallet);
+ if (wallet.IsMine(script))
{
CTxDestination address;
if (!ExtractDestination(script, address))
return true;
- if (!FindAddressBookEntry(address)) {
+ if (!wallet.FindAddressBookEntry(address)) {
return true;
}
}
return false;
}
-bool CWallet::IsChange(const CTxOut& txout) const
+bool OutputIsChange(const CWallet& wallet, const CTxOut& txout)
{
- return IsChange(txout.scriptPubKey);
+ return ScriptIsChange(wallet, txout.scriptPubKey);
}
-CAmount CWallet::GetChange(const CTxOut& txout) const
+CAmount OutputGetChange(const CWallet& wallet, const CTxOut& txout)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
- return (IsChange(txout) ? txout.nValue : 0);
+ return (OutputIsChange(wallet, txout) ? txout.nValue : 0);
}
-CAmount CWallet::GetChange(const CTransaction& tx) const
+CAmount TxGetChange(const CWallet& wallet, const CTransaction& tx)
{
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
CAmount nChange = 0;
for (const CTxOut& txout : tx.vout)
{
- nChange += GetChange(txout);
+ nChange += OutputGetChange(wallet, txout);
if (!MoneyRange(nChange))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
return nChange;
}
-CAmount CWalletTx::GetCachableAmount(AmountType type, const isminefilter& filter, bool recalculate) const
+static CAmount GetCachableAmount(const CWallet& wallet, const CWalletTx& wtx, CWalletTx::AmountType type, const isminefilter& filter, bool recalculate = false)
{
- auto& amount = m_amounts[type];
+ auto& amount = wtx.m_amounts[type];
if (recalculate || !amount.m_cached[filter]) {
- amount.Set(filter, type == DEBIT ? pwallet->GetDebit(*tx, filter) : pwallet->GetCredit(*tx, filter));
- m_is_cache_empty = false;
+ amount.Set(filter, type == CWalletTx::DEBIT ? wallet.GetDebit(*wtx.tx, filter) : TxGetCredit(wallet, *wtx.tx, filter));
+ wtx.m_is_cache_empty = false;
}
return amount.m_value[filter];
}
-CAmount CWalletTx::GetCredit(const isminefilter& filter) const
+CAmount CachedTxGetCredit(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter)
{
// Must wait until coinbase is safely deep enough in the chain before valuing it
- if (IsImmatureCoinBase())
+ if (wallet.IsTxImmatureCoinBase(wtx))
return 0;
CAmount credit = 0;
if (filter & ISMINE_SPENDABLE) {
// GetBalance can assume transactions in mapWallet won't change
- credit += GetCachableAmount(CREDIT, ISMINE_SPENDABLE);
+ credit += GetCachableAmount(wallet, wtx, CWalletTx::CREDIT, ISMINE_SPENDABLE);
}
if (filter & ISMINE_WATCH_ONLY) {
- credit += GetCachableAmount(CREDIT, ISMINE_WATCH_ONLY);
+ credit += GetCachableAmount(wallet, wtx, CWalletTx::CREDIT, ISMINE_WATCH_ONLY);
}
return credit;
}
-CAmount CWalletTx::GetDebit(const isminefilter& filter) const
+CAmount CachedTxGetDebit(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter)
{
- if (tx->vin.empty())
+ if (wtx.tx->vin.empty())
return 0;
CAmount debit = 0;
if (filter & ISMINE_SPENDABLE) {
- debit += GetCachableAmount(DEBIT, ISMINE_SPENDABLE);
+ debit += GetCachableAmount(wallet, wtx, CWalletTx::DEBIT, ISMINE_SPENDABLE);
}
if (filter & ISMINE_WATCH_ONLY) {
- debit += GetCachableAmount(DEBIT, ISMINE_WATCH_ONLY);
+ debit += GetCachableAmount(wallet, wtx, CWalletTx::DEBIT, ISMINE_WATCH_ONLY);
}
return debit;
}
-CAmount CWalletTx::GetChange() const
+CAmount CachedTxGetChange(const CWallet& wallet, const CWalletTx& wtx)
{
- if (fChangeCached)
- return nChangeCached;
- nChangeCached = pwallet->GetChange(*tx);
- fChangeCached = true;
- return nChangeCached;
+ if (wtx.fChangeCached)
+ return wtx.nChangeCached;
+ wtx.nChangeCached = TxGetChange(wallet, *wtx.tx);
+ wtx.fChangeCached = true;
+ return wtx.nChangeCached;
}
-CAmount CWalletTx::GetImmatureCredit(bool fUseCache) const
+CAmount CachedTxGetImmatureCredit(const CWallet& wallet, const CWalletTx& wtx, bool fUseCache)
{
- if (IsImmatureCoinBase() && IsInMainChain()) {
- return GetCachableAmount(IMMATURE_CREDIT, ISMINE_SPENDABLE, !fUseCache);
+ if (wallet.IsTxImmatureCoinBase(wtx) && wallet.IsTxInMainChain(wtx)) {
+ return GetCachableAmount(wallet, wtx, CWalletTx::IMMATURE_CREDIT, ISMINE_SPENDABLE, !fUseCache);
}
return 0;
}
-CAmount CWalletTx::GetImmatureWatchOnlyCredit(const bool fUseCache) const
+CAmount CachedTxGetImmatureWatchOnlyCredit(const CWallet& wallet, const CWalletTx& wtx, const bool fUseCache)
{
- if (IsImmatureCoinBase() && IsInMainChain()) {
- return GetCachableAmount(IMMATURE_CREDIT, ISMINE_WATCH_ONLY, !fUseCache);
+ if (wallet.IsTxImmatureCoinBase(wtx) && wallet.IsTxInMainChain(wtx)) {
+ return GetCachableAmount(wallet, wtx, CWalletTx::IMMATURE_CREDIT, ISMINE_WATCH_ONLY, !fUseCache);
}
return 0;
}
-CAmount CWalletTx::GetAvailableCredit(bool fUseCache, const isminefilter& filter) const
+CAmount CachedTxGetAvailableCredit(const CWallet& wallet, const CWalletTx& wtx, bool fUseCache, const isminefilter& filter)
{
- if (pwallet == nullptr)
- return 0;
-
// Avoid caching ismine for NO or ALL cases (could remove this check and simplify in the future).
bool allow_cache = (filter & ISMINE_ALL) && (filter & ISMINE_ALL) != ISMINE_ALL;
// Must wait until coinbase is safely deep enough in the chain before valuing it
- if (IsImmatureCoinBase())
+ if (wallet.IsTxImmatureCoinBase(wtx))
return 0;
- if (fUseCache && allow_cache && m_amounts[AVAILABLE_CREDIT].m_cached[filter]) {
- return m_amounts[AVAILABLE_CREDIT].m_value[filter];
+ if (fUseCache && allow_cache && wtx.m_amounts[CWalletTx::AVAILABLE_CREDIT].m_cached[filter]) {
+ return wtx.m_amounts[CWalletTx::AVAILABLE_CREDIT].m_value[filter];
}
- bool allow_used_addresses = (filter & ISMINE_USED) || !pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
+ bool allow_used_addresses = (filter & ISMINE_USED) || !wallet.IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
CAmount nCredit = 0;
- uint256 hashTx = GetHash();
- for (unsigned int i = 0; i < tx->vout.size(); i++)
+ uint256 hashTx = wtx.GetHash();
+ for (unsigned int i = 0; i < wtx.tx->vout.size(); i++)
{
- if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsSpentKey(hashTx, i))) {
- const CTxOut &txout = tx->vout[i];
- nCredit += pwallet->GetCredit(txout, filter);
+ if (!wallet.IsSpent(hashTx, i) && (allow_used_addresses || !wallet.IsSpentKey(hashTx, i))) {
+ const CTxOut &txout = wtx.tx->vout[i];
+ nCredit += OutputGetCredit(wallet, txout, filter);
if (!MoneyRange(nCredit))
throw std::runtime_error(std::string(__func__) + " : value out of range");
}
}
if (allow_cache) {
- m_amounts[AVAILABLE_CREDIT].Set(filter, nCredit);
- m_is_cache_empty = false;
+ wtx.m_amounts[CWalletTx::AVAILABLE_CREDIT].Set(filter, nCredit);
+ wtx.m_is_cache_empty = false;
}
return nCredit;
}
-void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
- std::list<COutputEntry>& listSent, CAmount& nFee, const isminefilter& filter) const
+void CachedTxGetAmounts(const CWallet& wallet, const CWalletTx& wtx,
+ std::list<COutputEntry>& listReceived,
+ std::list<COutputEntry>& listSent, CAmount& nFee, const isminefilter& filter)
{
nFee = 0;
listReceived.clear();
listSent.clear();
// Compute fee:
- CAmount nDebit = GetDebit(filter);
+ CAmount nDebit = CachedTxGetDebit(wallet, wtx, filter);
if (nDebit > 0) // debit>0 means we signed/sent this transaction
{
- CAmount nValueOut = tx->GetValueOut();
+ CAmount nValueOut = wtx.tx->GetValueOut();
nFee = nDebit - nValueOut;
}
- LOCK(pwallet->cs_wallet);
+ LOCK(wallet.cs_wallet);
// Sent/received.
- for (unsigned int i = 0; i < tx->vout.size(); ++i)
+ for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i)
{
- const CTxOut& txout = tx->vout[i];
- isminetype fIsMine = pwallet->IsMine(txout);
+ const CTxOut& txout = wtx.tx->vout[i];
+ isminetype fIsMine = wallet.IsMine(txout);
// Only need to handle txouts if AT LEAST one of these is true:
// 1) they debit from us (sent)
// 2) the output is to us (received)
if (nDebit > 0)
{
// Don't report 'change' txouts
- if (pwallet->IsChange(txout))
+ if (OutputIsChange(wallet, txout))
continue;
}
else if (!(fIsMine & filter))
@@ -253,8 +251,8 @@ void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
if (!ExtractDestination(txout.scriptPubKey, address) && !txout.scriptPubKey.IsUnspendable())
{
- pwallet->WalletLogPrintf("CWalletTx::GetAmounts: Unknown transaction type found, txid %s\n",
- this->GetHash().ToString());
+ wallet.WalletLogPrintf("CWalletTx::GetAmounts: Unknown transaction type found, txid %s\n",
+ wtx.GetHash().ToString());
address = CNoDestination();
}
@@ -271,16 +269,21 @@ void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
}
-bool CWallet::IsTrusted(const CWalletTx& wtx, std::set<uint256>& trusted_parents) const
+bool CachedTxIsFromMe(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter)
+{
+ return (CachedTxGetDebit(wallet, wtx, filter) > 0);
+}
+
+bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx, std::set<uint256>& trusted_parents)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
// Quick answer in most cases
- if (!chain().checkFinalTx(*wtx.tx)) return false;
- int nDepth = wtx.GetDepthInMainChain();
+ if (!wallet.chain().checkFinalTx(*wtx.tx)) return false;
+ int nDepth = wallet.GetTxDepthInMainChain(wtx);
if (nDepth >= 1) return true;
if (nDepth < 0) return false;
// using wtx's cached debit
- if (!m_spend_zero_conf_change || !wtx.IsFromMe(ISMINE_ALL)) return false;
+ if (!wallet.m_spend_zero_conf_change || !CachedTxIsFromMe(wallet, wtx, ISMINE_ALL)) return false;
// Don't trust unconfirmed transactions from us unless they are in the mempool.
if (!wtx.InMempool()) return false;
@@ -289,41 +292,41 @@ bool CWallet::IsTrusted(const CWalletTx& wtx, std::set<uint256>& trusted_parents
for (const CTxIn& txin : wtx.tx->vin)
{
// Transactions not sent by us: not trusted
- const CWalletTx* parent = GetWalletTx(txin.prevout.hash);
+ const CWalletTx* parent = wallet.GetWalletTx(txin.prevout.hash);
if (parent == nullptr) return false;
const CTxOut& parentOut = parent->tx->vout[txin.prevout.n];
// Check that this specific input being spent is trusted
- if (IsMine(parentOut) != ISMINE_SPENDABLE) return false;
+ if (wallet.IsMine(parentOut) != ISMINE_SPENDABLE) return false;
// If we've already trusted this parent, continue
if (trusted_parents.count(parent->GetHash())) continue;
// Recurse to check that the parent is also trusted
- if (!IsTrusted(*parent, trusted_parents)) return false;
+ if (!CachedTxIsTrusted(wallet, *parent, trusted_parents)) return false;
trusted_parents.insert(parent->GetHash());
}
return true;
}
-bool CWalletTx::IsTrusted() const
+bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx)
{
std::set<uint256> trusted_parents;
- LOCK(pwallet->cs_wallet);
- return pwallet->IsTrusted(*this, trusted_parents);
+ LOCK(wallet.cs_wallet);
+ return CachedTxIsTrusted(wallet, wtx, trusted_parents);
}
-CWallet::Balance CWallet::GetBalance(const int min_depth, bool avoid_reuse) const
+Balance GetBalance(const CWallet& wallet, const int min_depth, bool avoid_reuse)
{
Balance ret;
isminefilter reuse_filter = avoid_reuse ? ISMINE_NO : ISMINE_USED;
{
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
std::set<uint256> trusted_parents;
- for (const auto& entry : mapWallet)
+ for (const auto& entry : wallet.mapWallet)
{
const CWalletTx& wtx = entry.second;
- const bool is_trusted{IsTrusted(wtx, trusted_parents)};
- const int tx_depth{wtx.GetDepthInMainChain()};
- const CAmount tx_credit_mine{wtx.GetAvailableCredit(/* fUseCache */ true, ISMINE_SPENDABLE | reuse_filter)};
- const CAmount tx_credit_watchonly{wtx.GetAvailableCredit(/* fUseCache */ true, ISMINE_WATCH_ONLY | reuse_filter)};
+ const bool is_trusted{CachedTxIsTrusted(wallet, wtx, trusted_parents)};
+ const int tx_depth{wallet.GetTxDepthInMainChain(wtx)};
+ const CAmount tx_credit_mine{CachedTxGetAvailableCredit(wallet, wtx, /* fUseCache */ true, ISMINE_SPENDABLE | reuse_filter)};
+ const CAmount tx_credit_watchonly{CachedTxGetAvailableCredit(wallet, wtx, /* fUseCache */ true, ISMINE_WATCH_ONLY | reuse_filter)};
if (is_trusted && tx_depth >= min_depth) {
ret.m_mine_trusted += tx_credit_mine;
ret.m_watchonly_trusted += tx_credit_watchonly;
@@ -332,43 +335,43 @@ CWallet::Balance CWallet::GetBalance(const int min_depth, bool avoid_reuse) cons
ret.m_mine_untrusted_pending += tx_credit_mine;
ret.m_watchonly_untrusted_pending += tx_credit_watchonly;
}
- ret.m_mine_immature += wtx.GetImmatureCredit();
- ret.m_watchonly_immature += wtx.GetImmatureWatchOnlyCredit();
+ ret.m_mine_immature += CachedTxGetImmatureCredit(wallet, wtx);
+ ret.m_watchonly_immature += CachedTxGetImmatureWatchOnlyCredit(wallet, wtx);
}
}
return ret;
}
-std::map<CTxDestination, CAmount> CWallet::GetAddressBalances() const
+std::map<CTxDestination, CAmount> GetAddressBalances(const CWallet& wallet)
{
std::map<CTxDestination, CAmount> balances;
{
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
std::set<uint256> trusted_parents;
- for (const auto& walletEntry : mapWallet)
+ for (const auto& walletEntry : wallet.mapWallet)
{
const CWalletTx& wtx = walletEntry.second;
- if (!IsTrusted(wtx, trusted_parents))
+ if (!CachedTxIsTrusted(wallet, wtx, trusted_parents))
continue;
- if (wtx.IsImmatureCoinBase())
+ if (wallet.IsTxImmatureCoinBase(wtx))
continue;
- int nDepth = wtx.GetDepthInMainChain();
- if (nDepth < (wtx.IsFromMe(ISMINE_ALL) ? 0 : 1))
+ int nDepth = wallet.GetTxDepthInMainChain(wtx);
+ if (nDepth < (CachedTxIsFromMe(wallet, wtx, ISMINE_ALL) ? 0 : 1))
continue;
for (unsigned int i = 0; i < wtx.tx->vout.size(); i++)
{
CTxDestination addr;
- if (!IsMine(wtx.tx->vout[i]))
+ if (!wallet.IsMine(wtx.tx->vout[i]))
continue;
if(!ExtractDestination(wtx.tx->vout[i].scriptPubKey, addr))
continue;
- CAmount n = IsSpent(walletEntry.first, i) ? 0 : wtx.tx->vout[i].nValue;
+ CAmount n = wallet.IsSpent(walletEntry.first, i) ? 0 : wtx.tx->vout[i].nValue;
balances[addr] += n;
}
}
@@ -377,13 +380,13 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances() const
return balances;
}
-std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
+std::set< std::set<CTxDestination> > GetAddressGroupings(const CWallet& wallet)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
std::set< std::set<CTxDestination> > groupings;
std::set<CTxDestination> grouping;
- for (const auto& walletEntry : mapWallet)
+ for (const auto& walletEntry : wallet.mapWallet)
{
const CWalletTx& wtx = walletEntry.second;
@@ -394,9 +397,9 @@ std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
for (const CTxIn& txin : wtx.tx->vin)
{
CTxDestination address;
- if(!IsMine(txin)) /* If this input isn't mine, ignore it */
+ if(!InputIsMine(wallet, txin)) /* If this input isn't mine, ignore it */
continue;
- if(!ExtractDestination(mapWallet.at(txin.prevout.hash).tx->vout[txin.prevout.n].scriptPubKey, address))
+ if(!ExtractDestination(wallet.mapWallet.at(txin.prevout.hash).tx->vout[txin.prevout.n].scriptPubKey, address))
continue;
grouping.insert(address);
any_mine = true;
@@ -406,7 +409,7 @@ std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
if (any_mine)
{
for (const CTxOut& txout : wtx.tx->vout)
- if (IsChange(txout))
+ if (OutputIsChange(wallet, txout))
{
CTxDestination txoutAddr;
if(!ExtractDestination(txout.scriptPubKey, txoutAddr))
@@ -423,7 +426,7 @@ std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
// group lone addrs by themselves
for (const auto& txout : wtx.tx->vout)
- if (IsMine(txout))
+ if (wallet.IsMine(txout))
{
CTxDestination address;
if(!ExtractDestination(txout.scriptPubKey, address))
diff --git a/src/wallet/receive.h b/src/wallet/receive.h
index 8eead32413..b4b311636b 100644
--- a/src/wallet/receive.h
+++ b/src/wallet/receive.h
@@ -10,11 +10,55 @@
#include <wallet/transaction.h>
#include <wallet/wallet.h>
+isminetype InputIsMine(const CWallet& wallet, const CTxIn& txin) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+
+/** Returns whether all of the inputs match the filter */
+bool AllInputsMine(const CWallet& wallet, const CTransaction& tx, const isminefilter& filter);
+
+CAmount OutputGetCredit(const CWallet& wallet, const CTxOut& txout, const isminefilter& filter);
+CAmount TxGetCredit(const CWallet& wallet, const CTransaction& tx, const isminefilter& filter);
+
+bool ScriptIsChange(const CWallet& wallet, const CScript& script) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+bool OutputIsChange(const CWallet& wallet, const CTxOut& txout) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+CAmount OutputGetChange(const CWallet& wallet, const CTxOut& txout) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+CAmount TxGetChange(const CWallet& wallet, const CTransaction& tx);
+
+CAmount CachedTxGetCredit(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter);
+//! filter decides which addresses will count towards the debit
+CAmount CachedTxGetDebit(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter);
+CAmount CachedTxGetChange(const CWallet& wallet, const CWalletTx& wtx);
+CAmount CachedTxGetImmatureCredit(const CWallet& wallet, const CWalletTx& wtx, bool fUseCache = true);
+CAmount CachedTxGetImmatureWatchOnlyCredit(const CWallet& wallet, const CWalletTx& wtx, const bool fUseCache = true);
+// TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
+// annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The
+// annotation "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid
+// having to resolve the issue of member access into incomplete type CWallet.
+CAmount CachedTxGetAvailableCredit(const CWallet& wallet, const CWalletTx& wtx, bool fUseCache = true, const isminefilter& filter = ISMINE_SPENDABLE) NO_THREAD_SAFETY_ANALYSIS;
struct COutputEntry
{
CTxDestination destination;
CAmount amount;
int vout;
};
+void CachedTxGetAmounts(const CWallet& wallet, const CWalletTx& wtx,
+ std::list<COutputEntry>& listReceived,
+ std::list<COutputEntry>& listSent,
+ CAmount& nFee, const isminefilter& filter);
+bool CachedTxIsFromMe(const CWallet& wallet, const CWalletTx& wtx, const isminefilter& filter);
+bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx, std::set<uint256>& trusted_parents) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx);
+
+struct Balance {
+ CAmount m_mine_trusted{0}; //!< Trusted, at depth=GetBalance.min_depth or more
+ CAmount m_mine_untrusted_pending{0}; //!< Untrusted, but in mempool (pending)
+ CAmount m_mine_immature{0}; //!< Immature coinbases in the main chain
+ CAmount m_watchonly_trusted{0};
+ CAmount m_watchonly_untrusted_pending{0};
+ CAmount m_watchonly_immature{0};
+};
+Balance GetBalance(const CWallet& wallet, int min_depth = 0, bool avoid_reuse = true);
+
+std::map<CTxDestination, CAmount> GetAddressBalances(const CWallet& wallet);
+std::set<std::set<CTxDestination>> GetAddressGroupings(const CWallet& wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
#endif // BITCOIN_WALLET_RECEIVE_H
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index ea97b339cf..72c60c8fe2 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chain.h>
+#include <clientversion.h>
#include <core_io.h>
#include <interfaces/chain.h>
#include <key_io.h>
@@ -740,7 +741,7 @@ RPCHelpMan dumpwallet()
// the user could have gotten from another RPC command prior to now
wallet.BlockUntilSyncedToCurrentChain();
- LOCK2(wallet.cs_wallet, spk_man.cs_KeyStore);
+ LOCK(wallet.cs_wallet);
EnsureWalletIsUnlocked(wallet);
@@ -762,9 +763,16 @@ RPCHelpMan dumpwallet()
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file");
std::map<CKeyID, int64_t> mapKeyBirth;
- const std::map<CKeyID, int64_t>& mapKeyPool = spk_man.GetAllReserveKeys();
wallet.GetKeyBirthTimes(mapKeyBirth);
+ int64_t block_time = 0;
+ CHECK_NONFATAL(wallet.chain().findBlock(wallet.GetLastBlockHash(), FoundBlock().time(block_time)));
+
+ // Note: To avoid a lock order issue, access to cs_main must be locked before cs_KeyStore.
+ // So we do the two things in this function that lock cs_main first: GetKeyBirthTimes, and findBlock.
+ LOCK(spk_man.cs_KeyStore);
+
+ const std::map<CKeyID, int64_t>& mapKeyPool = spk_man.GetAllReserveKeys();
std::set<CScriptID> scripts = spk_man.GetCScripts();
// sort time/key pairs
@@ -776,11 +784,9 @@ RPCHelpMan dumpwallet()
std::sort(vKeyBirth.begin(), vKeyBirth.end());
// produce output
- file << strprintf("# Wallet dump created by Bitcoin %s\n", CLIENT_BUILD);
+ file << strprintf("# Wallet dump created by %s %s\n", PACKAGE_NAME, FormatFullVersion());
file << strprintf("# * Created on %s\n", FormatISO8601DateTime(GetTime()));
file << strprintf("# * Best block at time of backup was %i (%s),\n", wallet.GetLastBlockHeight(), wallet.GetLastBlockHash().ToString());
- int64_t block_time = 0;
- CHECK_NONFATAL(wallet.chain().findBlock(wallet.GetLastBlockHash(), FoundBlock().time(block_time)));
file << strprintf("# mined on %s\n", FormatISO8601DateTime(block_time));
file << "\n";
@@ -1755,8 +1761,10 @@ RPCHelpMan listdescriptors()
{
return RPCHelpMan{
"listdescriptors",
- "\nList descriptors imported into a descriptor-enabled wallet.",
- {},
+ "\nList descriptors imported into a descriptor-enabled wallet.\n",
+ {
+ {"private", RPCArg::Type::BOOL, RPCArg::Default{false}, "Show private descriptors."}
+ },
RPCResult{RPCResult::Type::OBJ, "", "", {
{RPCResult::Type::STR, "wallet_name", "Name of wallet this operation was performed on"},
{RPCResult::Type::ARR, "descriptors", "Array of descriptor objects",
@@ -1776,6 +1784,7 @@ RPCHelpMan listdescriptors()
}},
RPCExamples{
HelpExampleCli("listdescriptors", "") + HelpExampleRpc("listdescriptors", "")
+ + HelpExampleCli("listdescriptors", "true") + HelpExampleRpc("listdescriptors", "true")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
@@ -1786,6 +1795,11 @@ RPCHelpMan listdescriptors()
throw JSONRPCError(RPC_WALLET_ERROR, "listdescriptors is not available for non-descriptor wallets");
}
+ const bool priv = !request.params[0].isNull() && request.params[0].get_bool();
+ if (priv) {
+ EnsureWalletIsUnlocked(*wallet);
+ }
+
LOCK(wallet->cs_wallet);
UniValue descriptors(UniValue::VARR);
@@ -1799,8 +1813,9 @@ RPCHelpMan listdescriptors()
LOCK(desc_spk_man->cs_desc_man);
const auto& wallet_descriptor = desc_spk_man->GetWalletDescriptor();
std::string descriptor;
- if (!desc_spk_man->GetDescriptorString(descriptor)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Can't get normalized descriptor string.");
+
+ if (!desc_spk_man->GetDescriptorString(descriptor, priv)) {
+ throw JSONRPCError(RPC_WALLET_ERROR, "Can't get descriptor string.");
}
spk.pushKV("desc", descriptor);
spk.pushKV("timestamp", wallet_descriptor.creation_time);
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index f1d5117415..ff9e10c5ad 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -31,7 +31,9 @@
#include <wallet/context.h>
#include <wallet/feebumper.h>
#include <wallet/load.h>
+#include <wallet/receive.h>
#include <wallet/rpcwallet.h>
+#include <wallet/spend.h>
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
#include <wallet/walletutil.h>
@@ -96,14 +98,16 @@ bool GetWalletNameFromJSONRPCRequest(const JSONRPCRequest& request, std::string&
std::shared_ptr<CWallet> GetWalletForJSONRPCRequest(const JSONRPCRequest& request)
{
CHECK_NONFATAL(request.mode == JSONRPCRequest::EXECUTE);
+ WalletContext& context = EnsureWalletContext(request.context);
+
std::string wallet_name;
if (GetWalletNameFromJSONRPCRequest(request, wallet_name)) {
- std::shared_ptr<CWallet> pwallet = GetWallet(wallet_name);
+ std::shared_ptr<CWallet> pwallet = GetWallet(context, wallet_name);
if (!pwallet) throw JSONRPCError(RPC_WALLET_NOT_FOUND, "Requested wallet does not exist or is not loaded");
return pwallet;
}
- std::vector<std::shared_ptr<CWallet>> wallets = GetWallets();
+ std::vector<std::shared_ptr<CWallet>> wallets = GetWallets(context);
if (wallets.size() == 1) {
return wallets[0];
}
@@ -145,9 +149,10 @@ LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_cr
return *spk_man;
}
-static void WalletTxToJSON(interfaces::Chain& chain, const CWalletTx& wtx, UniValue& entry)
+static void WalletTxToJSON(const CWallet& wallet, const CWalletTx& wtx, UniValue& entry)
{
- int confirms = wtx.GetDepthInMainChain();
+ interfaces::Chain& chain = wallet.chain();
+ int confirms = wallet.GetTxDepthInMainChain(wtx);
entry.pushKV("confirmations", confirms);
if (wtx.IsCoinBase())
entry.pushKV("generated", true);
@@ -160,12 +165,12 @@ static void WalletTxToJSON(interfaces::Chain& chain, const CWalletTx& wtx, UniVa
CHECK_NONFATAL(chain.findBlock(wtx.m_confirm.hashBlock, FoundBlock().time(block_time)));
entry.pushKV("blocktime", block_time);
} else {
- entry.pushKV("trusted", wtx.IsTrusted());
+ entry.pushKV("trusted", CachedTxIsTrusted(wallet, wtx));
}
uint256 hash = wtx.GetHash();
entry.pushKV("txid", hash.GetHex());
UniValue conflicts(UniValue::VARR);
- for (const uint256& conflict : wtx.GetConflicts())
+ for (const uint256& conflict : wallet.GetTxConflicts(wtx))
conflicts.push_back(conflict.GetHex());
entry.pushKV("walletconflicts", conflicts);
entry.pushKV("time", wtx.GetTxTime());
@@ -266,18 +271,19 @@ static RPCHelpMan getnewaddress()
OutputType output_type = pwallet->m_default_address_type;
if (!request.params[1].isNull()) {
- if (!ParseOutputType(request.params[1].get_str(), output_type)) {
+ std::optional<OutputType> parsed = ParseOutputType(request.params[1].get_str());
+ if (!parsed) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[1].get_str()));
- }
- if (output_type == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) {
+ } else if (parsed.value() == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Legacy wallets cannot provide bech32m addresses");
}
+ output_type = parsed.value();
}
CTxDestination dest;
- std::string error;
+ bilingual_str error;
if (!pwallet->GetNewDestination(output_type, label, dest, error)) {
- throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error);
+ throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error.original);
}
return EncodeDestination(dest);
@@ -313,18 +319,19 @@ static RPCHelpMan getrawchangeaddress()
OutputType output_type = pwallet->m_default_change_type.value_or(pwallet->m_default_address_type);
if (!request.params[0].isNull()) {
- if (!ParseOutputType(request.params[0].get_str(), output_type)) {
+ std::optional<OutputType> parsed = ParseOutputType(request.params[0].get_str());
+ if (!parsed) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[0].get_str()));
- }
- if (output_type == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) {
+ } else if (parsed.value() == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Legacy wallets cannot provide bech32m addresses");
}
+ output_type = parsed.value();
}
CTxDestination dest;
- std::string error;
+ bilingual_str error;
if (!pwallet->GetNewChangeDestination(output_type, dest, error)) {
- throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error);
+ throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error.original);
}
return EncodeDestination(dest);
},
@@ -419,7 +426,7 @@ UniValue SendMoney(CWallet& wallet, const CCoinControl &coin_control, std::vecto
bilingual_str error;
CTransactionRef tx;
FeeCalculation fee_calc_out;
- const bool fCreated = wallet.CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, true);
+ const bool fCreated = CreateTransaction(wallet, recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, true);
if (!fCreated) {
throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original);
}
@@ -572,8 +579,8 @@ static RPCHelpMan listaddressgroupings()
LOCK(pwallet->cs_wallet);
UniValue jsonGroupings(UniValue::VARR);
- std::map<CTxDestination, CAmount> balances = pwallet->GetAddressBalances();
- for (const std::set<CTxDestination>& grouping : pwallet->GetAddressGroupings()) {
+ std::map<CTxDestination, CAmount> balances = GetAddressBalances(*pwallet);
+ for (const std::set<CTxDestination>& grouping : GetAddressGroupings(*pwallet)) {
UniValue jsonGrouping(UniValue::VARR);
for (const CTxDestination& address : grouping)
{
@@ -682,7 +689,7 @@ static CAmount GetReceived(const CWallet& wallet, const UniValue& params, bool b
CAmount amount = 0;
for (const std::pair<const uint256, CWalletTx>& wtx_pair : wallet.mapWallet) {
const CWalletTx& wtx = wtx_pair.second;
- if (wtx.IsCoinBase() || !wallet.chain().checkFinalTx(*wtx.tx) || wtx.GetDepthInMainChain() < min_depth) {
+ if (wtx.IsCoinBase() || !wallet.chain().checkFinalTx(*wtx.tx) || wallet.GetTxDepthInMainChain(wtx) < min_depth) {
continue;
}
@@ -822,7 +829,7 @@ static RPCHelpMan getbalance()
bool avoid_reuse = GetAvoidReuseFlag(*pwallet, request.params[3]);
- const auto bal = pwallet->GetBalance(min_depth, avoid_reuse);
+ const auto bal = GetBalance(*pwallet, min_depth, avoid_reuse);
return ValueFromAmount(bal.m_mine_trusted + (include_watchonly ? bal.m_watchonly_trusted : 0));
},
@@ -847,7 +854,7 @@ static RPCHelpMan getunconfirmedbalance()
LOCK(pwallet->cs_wallet);
- return ValueFromAmount(pwallet->GetBalance().m_mine_untrusted_pending);
+ return ValueFromAmount(GetBalance(*pwallet).m_mine_untrusted_pending);
},
};
}
@@ -1007,12 +1014,13 @@ static RPCHelpMan addmultisigaddress()
OutputType output_type = pwallet->m_default_address_type;
if (!request.params[3].isNull()) {
- if (!ParseOutputType(request.params[3].get_str(), output_type)) {
+ std::optional<OutputType> parsed = ParseOutputType(request.params[3].get_str());
+ if (!parsed) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[3].get_str()));
- }
- if (output_type == OutputType::BECH32M) {
+ } else if (parsed.value() == OutputType::BECH32M) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Bech32m multisig addresses cannot be created with legacy wallets");
}
+ output_type = parsed.value();
}
// Construct using pay-to-script-hash:
@@ -1080,7 +1088,7 @@ static UniValue ListReceived(const CWallet& wallet, const UniValue& params, bool
continue;
}
- int nDepth = wtx.GetDepthInMainChain();
+ int nDepth = wallet.GetTxDepthInMainChain(wtx);
if (nDepth < nMinDepth)
continue;
@@ -1305,9 +1313,9 @@ static void ListTransactions(const CWallet& wallet, const CWalletTx& wtx, int nM
std::list<COutputEntry> listReceived;
std::list<COutputEntry> listSent;
- wtx.GetAmounts(listReceived, listSent, nFee, filter_ismine);
+ CachedTxGetAmounts(wallet, wtx, listReceived, listSent, nFee, filter_ismine);
- bool involvesWatchonly = wtx.IsFromMe(ISMINE_WATCH_ONLY);
+ bool involvesWatchonly = CachedTxIsFromMe(wallet, wtx, ISMINE_WATCH_ONLY);
// Sent
if (!filter_label)
@@ -1328,14 +1336,14 @@ static void ListTransactions(const CWallet& wallet, const CWalletTx& wtx, int nM
entry.pushKV("vout", s.vout);
entry.pushKV("fee", ValueFromAmount(-nFee));
if (fLong)
- WalletTxToJSON(wallet.chain(), wtx, entry);
+ WalletTxToJSON(wallet, wtx, entry);
entry.pushKV("abandoned", wtx.isAbandoned());
ret.push_back(entry);
}
}
// Received
- if (listReceived.size() > 0 && wtx.GetDepthInMainChain() >= nMinDepth) {
+ if (listReceived.size() > 0 && wallet.GetTxDepthInMainChain(wtx) >= nMinDepth) {
for (const COutputEntry& r : listReceived)
{
std::string label;
@@ -1353,9 +1361,9 @@ static void ListTransactions(const CWallet& wallet, const CWalletTx& wtx, int nM
MaybePushAddress(entry, r.destination);
if (wtx.IsCoinBase())
{
- if (wtx.GetDepthInMainChain() < 1)
+ if (wallet.GetTxDepthInMainChain(wtx) < 1)
entry.pushKV("category", "orphan");
- else if (wtx.IsImmatureCoinBase())
+ else if (wallet.IsTxImmatureCoinBase(wtx))
entry.pushKV("category", "immature");
else
entry.pushKV("category", "generate");
@@ -1370,7 +1378,7 @@ static void ListTransactions(const CWallet& wallet, const CWalletTx& wtx, int nM
}
entry.pushKV("vout", r.vout);
if (fLong)
- WalletTxToJSON(wallet.chain(), wtx, entry);
+ WalletTxToJSON(wallet, wtx, entry);
ret.push_back(entry);
}
}
@@ -1610,7 +1618,7 @@ static RPCHelpMan listsinceblock()
for (const std::pair<const uint256, CWalletTx>& pairWtx : wallet.mapWallet) {
const CWalletTx& tx = pairWtx.second;
- if (depth == -1 || abs(tx.GetDepthInMainChain()) < depth) {
+ if (depth == -1 || abs(wallet.GetTxDepthInMainChain(tx)) < depth) {
ListTransactions(wallet, tx, 0, true, transactions, filter, nullptr /* filter_label */);
}
}
@@ -1731,16 +1739,16 @@ static RPCHelpMan gettransaction()
}
const CWalletTx& wtx = it->second;
- CAmount nCredit = wtx.GetCredit(filter);
- CAmount nDebit = wtx.GetDebit(filter);
+ CAmount nCredit = CachedTxGetCredit(*pwallet, wtx, filter);
+ CAmount nDebit = CachedTxGetDebit(*pwallet, wtx, filter);
CAmount nNet = nCredit - nDebit;
- CAmount nFee = (wtx.IsFromMe(filter) ? wtx.tx->GetValueOut() - nDebit : 0);
+ CAmount nFee = (CachedTxIsFromMe(*pwallet, wtx, filter) ? wtx.tx->GetValueOut() - nDebit : 0);
entry.pushKV("amount", ValueFromAmount(nNet - nFee));
- if (wtx.IsFromMe(filter))
+ if (CachedTxIsFromMe(*pwallet, wtx, filter))
entry.pushKV("fee", ValueFromAmount(nFee));
- WalletTxToJSON(pwallet->chain(), wtx, entry);
+ WalletTxToJSON(*pwallet, wtx, entry);
UniValue details(UniValue::VARR);
ListTransactions(*pwallet, wtx, 0, false, details, filter, nullptr /* filter_label */);
@@ -2379,7 +2387,7 @@ static RPCHelpMan getbalances()
LOCK(wallet.cs_wallet);
- const auto bal = wallet.GetBalance();
+ const auto bal = GetBalance(wallet);
UniValue balances{UniValue::VOBJ};
{
UniValue balances_mine{UniValue::VOBJ};
@@ -2389,7 +2397,7 @@ static RPCHelpMan getbalances()
if (wallet.IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE)) {
// If the AVOID_REUSE flag is set, bal has been set to just the un-reused address balance. Get
// the total balance, and then subtract bal to get the reused address balance.
- const auto full_bal = wallet.GetBalance(0, false);
+ const auto full_bal = GetBalance(wallet, 0, false);
balances_mine.pushKV("used", ValueFromAmount(full_bal.m_mine_trusted + full_bal.m_mine_untrusted_pending - bal.m_mine_trusted - bal.m_mine_untrusted_pending));
}
balances.pushKV("mine", balances_mine);
@@ -2457,7 +2465,7 @@ static RPCHelpMan getwalletinfo()
UniValue obj(UniValue::VOBJ);
size_t kpExternalSize = pwallet->KeypoolCountExternalKeys();
- const auto bal = pwallet->GetBalance();
+ const auto bal = GetBalance(*pwallet);
int64_t kp_oldest = pwallet->GetOldestKeyPoolTime();
obj.pushKV("walletname", pwallet->GetName());
obj.pushKV("walletversion", pwallet->GetVersion());
@@ -2559,7 +2567,8 @@ static RPCHelpMan listwallets()
{
UniValue obj(UniValue::VARR);
- for (const std::shared_ptr<CWallet>& wallet : GetWallets()) {
+ WalletContext& context = EnsureWalletContext(request.context);
+ for (const std::shared_ptr<CWallet>& wallet : GetWallets(context)) {
LOCK(wallet->cs_wallet);
obj.push_back(wallet->GetName());
}
@@ -2569,6 +2578,37 @@ static RPCHelpMan listwallets()
};
}
+static std::tuple<std::shared_ptr<CWallet>, std::vector<bilingual_str>> LoadWalletHelper(WalletContext& context, UniValue load_on_start_param, const std::string wallet_name)
+{
+ DatabaseOptions options;
+ DatabaseStatus status;
+ options.require_existing = true;
+ bilingual_str error;
+ std::vector<bilingual_str> warnings;
+ std::optional<bool> load_on_start = load_on_start_param.isNull() ? std::nullopt : std::optional<bool>(load_on_start_param.get_bool());
+ std::shared_ptr<CWallet> const wallet = LoadWallet(context, wallet_name, load_on_start, options, status, error, warnings);
+
+ if (!wallet) {
+ // Map bad format to not found, since bad format is returned when the
+ // wallet directory exists, but doesn't contain a data file.
+ RPCErrorCode code = RPC_WALLET_ERROR;
+ switch (status) {
+ case DatabaseStatus::FAILED_NOT_FOUND:
+ case DatabaseStatus::FAILED_BAD_FORMAT:
+ code = RPC_WALLET_NOT_FOUND;
+ break;
+ case DatabaseStatus::FAILED_ALREADY_LOADED:
+ code = RPC_WALLET_ALREADY_LOADED;
+ break;
+ default: // RPC_WALLET_ERROR is returned for all other cases.
+ break;
+ }
+ throw JSONRPCError(code, error.original);
+ }
+
+ return { wallet, warnings };
+}
+
static RPCHelpMan loadwallet()
{
return RPCHelpMan{"loadwallet",
@@ -2595,30 +2635,7 @@ static RPCHelpMan loadwallet()
WalletContext& context = EnsureWalletContext(request.context);
const std::string name(request.params[0].get_str());
- DatabaseOptions options;
- DatabaseStatus status;
- options.require_existing = true;
- bilingual_str error;
- std::vector<bilingual_str> warnings;
- std::optional<bool> load_on_start = request.params[1].isNull() ? std::nullopt : std::optional<bool>(request.params[1].get_bool());
- std::shared_ptr<CWallet> const wallet = LoadWallet(*context.chain, name, load_on_start, options, status, error, warnings);
- if (!wallet) {
- // Map bad format to not found, since bad format is returned when the
- // wallet directory exists, but doesn't contain a data file.
- RPCErrorCode code = RPC_WALLET_ERROR;
- switch (status) {
- case DatabaseStatus::FAILED_NOT_FOUND:
- case DatabaseStatus::FAILED_BAD_FORMAT:
- code = RPC_WALLET_NOT_FOUND;
- break;
- case DatabaseStatus::FAILED_ALREADY_LOADED:
- code = RPC_WALLET_ALREADY_LOADED;
- break;
- default: // RPC_WALLET_ERROR is returned for all other cases.
- break;
- }
- throw JSONRPCError(code, error.original);
- }
+ auto [wallet, warnings] = LoadWalletHelper(context, request.params[1], name);
UniValue obj(UniValue::VOBJ);
obj.pushKV("name", wallet->GetName());
@@ -2777,7 +2794,7 @@ static RPCHelpMan createwallet()
options.create_passphrase = passphrase;
bilingual_str error;
std::optional<bool> load_on_start = request.params[6].isNull() ? std::nullopt : std::optional<bool>(request.params[6].get_bool());
- std::shared_ptr<CWallet> wallet = CreateWallet(*context.chain, request.params[0].get_str(), load_on_start, options, status, error, warnings);
+ std::shared_ptr<CWallet> wallet = CreateWallet(context, request.params[0].get_str(), load_on_start, options, status, error, warnings);
if (!wallet) {
RPCErrorCode code = status == DatabaseStatus::FAILED_ENCRYPT ? RPC_WALLET_ENCRYPTION_FAILED : RPC_WALLET_ERROR;
throw JSONRPCError(code, error.original);
@@ -2792,6 +2809,68 @@ static RPCHelpMan createwallet()
};
}
+static RPCHelpMan restorewallet()
+{
+ return RPCHelpMan{
+ "restorewallet",
+ "\nRestore and loads a wallet from backup.\n",
+ {
+ {"wallet_name", RPCArg::Type::STR, RPCArg::Optional::NO, "The name that will be applied to the restored wallet"},
+ {"backup_file", RPCArg::Type::STR, RPCArg::Optional::NO, "The backup file that will be used to restore the wallet."},
+ {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED_NAMED_ARG, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name if restored successfully."},
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not loaded cleanly."},
+ }
+ },
+ RPCExamples{
+ HelpExampleCli("restorewallet", "\"testwallet\" \"home\\backups\\backup-file.bak\"")
+ + HelpExampleRpc("restorewallet", "\"testwallet\" \"home\\backups\\backup-file.bak\"")
+ + HelpExampleCliNamed("restorewallet", {{"wallet_name", "testwallet"}, {"backup_file", "home\\backups\\backup-file.bak\""}, {"load_on_startup", true}})
+ + HelpExampleRpcNamed("restorewallet", {{"wallet_name", "testwallet"}, {"backup_file", "home\\backups\\backup-file.bak\""}, {"load_on_startup", true}})
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+
+ WalletContext& context = EnsureWalletContext(request.context);
+
+ std::string backup_file = request.params[1].get_str();
+
+ if (!fs::exists(backup_file)) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Backup file does not exist");
+ }
+
+ std::string wallet_name = request.params[0].get_str();
+
+ const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_name);
+
+ if (fs::exists(wallet_path)) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Wallet name already exists.");
+ }
+
+ if (!TryCreateDirectories(wallet_path)) {
+ throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Failed to create database path '%s'. Database already exists.", wallet_path.string()));
+ }
+
+ auto wallet_file = wallet_path / "wallet.dat";
+
+ fs::copy_file(backup_file, wallet_file, fs::copy_option::fail_if_exists);
+
+ auto [wallet, warnings] = LoadWalletHelper(context, request.params[2], wallet_name);
+
+ UniValue obj(UniValue::VOBJ);
+ obj.pushKV("name", wallet->GetName());
+ obj.pushKV("warning", Join(warnings, Untranslated("\n")).original);
+
+ return obj;
+
+},
+ };
+}
+
static RPCHelpMan unloadwallet()
{
return RPCHelpMan{"unloadwallet",
@@ -2819,7 +2898,8 @@ static RPCHelpMan unloadwallet()
wallet_name = request.params[0].get_str();
}
- std::shared_ptr<CWallet> wallet = GetWallet(wallet_name);
+ WalletContext& context = EnsureWalletContext(request.context);
+ std::shared_ptr<CWallet> wallet = GetWallet(context, wallet_name);
if (!wallet) {
throw JSONRPCError(RPC_WALLET_NOT_FOUND, "Requested wallet does not exist or is not loaded");
}
@@ -2829,7 +2909,7 @@ static RPCHelpMan unloadwallet()
// is destroyed (see CheckUniqueFileid).
std::vector<bilingual_str> warnings;
std::optional<bool> load_on_start = request.params[1].isNull() ? std::nullopt : std::optional<bool>(request.params[1].get_bool());
- if (!RemoveWallet(wallet, load_on_start, warnings)) {
+ if (!RemoveWallet(context, wallet, load_on_start, warnings)) {
throw JSONRPCError(RPC_MISC_ERROR, "Requested wallet already unloaded");
}
@@ -2981,7 +3061,7 @@ static RPCHelpMan listunspent()
cctl.m_max_depth = nMaxDepth;
cctl.m_include_unsafe_inputs = include_unsafe;
LOCK(pwallet->cs_wallet);
- pwallet->AvailableCoins(vecOutputs, &cctl, nMinimumAmount, nMaximumAmount, nMinimumSumAmount, nMaximumCount);
+ AvailableCoins(*pwallet, vecOutputs, &cctl, nMinimumAmount, nMaximumAmount, nMinimumSumAmount, nMaximumCount);
}
LOCK(pwallet->cs_wallet);
@@ -3133,11 +3213,11 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
if (options.exists("changeAddress") || options.exists("change_address")) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot specify both change address and address type options");
}
- OutputType out_type;
- if (!ParseOutputType(options["change_type"].get_str(), out_type)) {
+ if (std::optional<OutputType> parsed = ParseOutputType(options["change_type"].get_str())) {
+ coinControl.m_change_type.emplace(parsed.value());
+ } else {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown change type '%s'", options["change_type"].get_str()));
}
- coinControl.m_change_type.emplace(out_type);
}
const UniValue include_watching_option = options.exists("include_watching") ? options["include_watching"] : options["includeWatching"];
@@ -3197,7 +3277,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
bilingual_str error;
- if (!wallet.FundTransaction(tx, fee_out, change_position, error, lockUnspents, setSubtractFeeFromOutputs, coinControl)) {
+ if (!FundTransaction(wallet, tx, fee_out, change_position, error, lockUnspents, setSubtractFeeFromOutputs, coinControl)) {
throw JSONRPCError(RPC_WALLET_ERROR, error.original);
}
}
@@ -3389,7 +3469,7 @@ RPCHelpMan signrawtransactionwithwallet()
int nHashType = ParseSighashString(request.params[2]);
// Script verification errors
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
bool complete = pwallet->SignTransaction(mtx, coins, nHashType, input_errors);
UniValue result(UniValue::VOBJ);
@@ -3872,7 +3952,7 @@ RPCHelpMan getaddressinfo()
DescriptorScriptPubKeyMan* desc_spk_man = dynamic_cast<DescriptorScriptPubKeyMan*>(pwallet->GetScriptPubKeyMan(scriptPubKey));
if (desc_spk_man) {
std::string desc_str;
- if (desc_spk_man->GetDescriptorString(desc_str)) {
+ if (desc_spk_man->GetDescriptorString(desc_str, /* priv */ false)) {
ret.pushKV("parent_desc", desc_str);
}
}
@@ -3882,7 +3962,7 @@ RPCHelpMan getaddressinfo()
UniValue detail = DescribeWalletAddress(*pwallet, dest);
ret.pushKVs(detail);
- ret.pushKV("ischange", pwallet->IsChange(scriptPubKey));
+ ret.pushKV("ischange", ScriptIsChange(*pwallet, scriptPubKey));
ScriptPubKeyMan* spk_man = pwallet->GetScriptPubKeyMan(scriptPubKey);
if (spk_man) {
@@ -4636,6 +4716,7 @@ static const CRPCCommand commands[] =
{ "wallet", &bumpfee, },
{ "wallet", &psbtbumpfee, },
{ "wallet", &createwallet, },
+ { "wallet", &restorewallet, },
{ "wallet", &dumpprivkey, },
{ "wallet", &dumpwallet, },
{ "wallet", &encryptwallet, },
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index 73433214f1..fe41f9b8cc 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -20,10 +20,10 @@
//! Value for the first BIP 32 hardened derivation. Can be used as a bit mask and as a value. See BIP 32 for more details.
const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
-bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error)
+bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, bilingual_str& error)
{
if (LEGACY_OUTPUT_TYPES.count(type) == 0) {
- error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types").translated;
+ error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types");
return false;
}
assert(type != OutputType::BECH32M);
@@ -34,7 +34,7 @@ bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestinat
// Generate a new key that is added to wallet
CPubKey new_key;
if (!GetKeyFromPool(new_key, type)) {
- error = _("Error: Keypool ran out, please call keypoolrefill first").translated;
+ error = _("Error: Keypool ran out, please call keypoolrefill first");
return false;
}
LearnRelatedScripts(new_key, type);
@@ -295,22 +295,22 @@ bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBat
return true;
}
-bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error)
+bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, bilingual_str& error)
{
if (LEGACY_OUTPUT_TYPES.count(type) == 0) {
- error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types").translated;
+ error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types");
return false;
}
assert(type != OutputType::BECH32M);
LOCK(cs_KeyStore);
if (!CanGetAddresses(internal)) {
- error = _("Error: Keypool ran out, please call keypoolrefill first").translated;
+ error = _("Error: Keypool ran out, please call keypoolrefill first");
return false;
}
if (!ReserveKeyFromKeyPool(index, keypool, internal)) {
- error = _("Error: Keypool ran out, please call keypoolrefill first").translated;
+ error = _("Error: Keypool ran out, please call keypoolrefill first");
return false;
}
address = GetDestinationForKey(keypool.vchPubKey, type);
@@ -592,7 +592,7 @@ bool LegacyScriptPubKeyMan::CanProvide(const CScript& script, SignatureData& sig
}
}
-bool LegacyScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+bool LegacyScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const
{
return ::SignTransaction(tx, this, coins, sighash, input_errors);
}
@@ -1613,11 +1613,11 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const
return set_address;
}
-bool DescriptorScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error)
+bool DescriptorScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, bilingual_str& error)
{
// Returns true if this descriptor supports getting new addresses. Conditions where we may be unable to fetch them (e.g. locked) are caught later
if (!CanGetAddresses()) {
- error = "No addresses available";
+ error = _("No addresses available");
return false;
}
{
@@ -1636,12 +1636,12 @@ bool DescriptorScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDest
std::vector<CScript> scripts_temp;
if (m_wallet_descriptor.range_end <= m_max_cached_index && !TopUp(1)) {
// We can't generate anymore keys
- error = "Error: Keypool ran out, please call keypoolrefill first";
+ error = _("Error: Keypool ran out, please call keypoolrefill first");
return false;
}
if (!m_wallet_descriptor.descriptor->ExpandFromCache(m_wallet_descriptor.next_index, m_wallet_descriptor.cache, scripts_temp, out_keys)) {
// We can't generate anymore keys
- error = "Error: Keypool ran out, please call keypoolrefill first";
+ error = _("Error: Keypool ran out, please call keypoolrefill first");
return false;
}
@@ -1721,7 +1721,7 @@ bool DescriptorScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, Walle
return true;
}
-bool DescriptorScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error)
+bool DescriptorScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, bilingual_str& error)
{
LOCK(cs_desc_man);
bool result = GetNewDestination(type, address, error);
@@ -2046,7 +2046,7 @@ bool DescriptorScriptPubKeyMan::CanProvide(const CScript& script, SignatureData&
return IsMine(script);
}
-bool DescriptorScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+bool DescriptorScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const
{
std::unique_ptr<FlatSigningProvider> keys = std::make_unique<FlatSigningProvider>();
for (const auto& coin_pair : coins) {
@@ -2258,13 +2258,20 @@ const std::vector<CScript> DescriptorScriptPubKeyMan::GetScriptPubKeys() const
return script_pub_keys;
}
-bool DescriptorScriptPubKeyMan::GetDescriptorString(std::string& out) const
+bool DescriptorScriptPubKeyMan::GetDescriptorString(std::string& out, const bool priv) const
{
LOCK(cs_desc_man);
FlatSigningProvider provider;
provider.keys = GetKeys();
+ if (priv) {
+ // For the private version, always return the master key to avoid
+ // exposing child private keys. The risk implications of exposing child
+ // private keys together with the parent xpub may be non-obvious for users.
+ return m_wallet_descriptor.descriptor->ToPrivateString(provider, out);
+ }
+
return m_wallet_descriptor.descriptor->ToNormalizedString(provider, out, &m_wallet_descriptor.cache);
}
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index 572a695662..ef74638751 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -148,17 +148,6 @@ public:
}
};
-class KeyIDHasher
-{
-public:
- KeyIDHasher() {}
-
- size_t operator()(const CKeyID& id) const
- {
- return id.GetUint64(0);
- }
-};
-
/*
* A class implementing ScriptPubKeyMan manages some (or all) scriptPubKeys used in a wallet.
* It contains the scripts and keys related to the scriptPubKeys it manages.
@@ -174,14 +163,14 @@ protected:
public:
explicit ScriptPubKeyMan(WalletStorage& storage) : m_storage(storage) {}
virtual ~ScriptPubKeyMan() {};
- virtual bool GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) { return false; }
+ virtual bool GetNewDestination(const OutputType type, CTxDestination& dest, bilingual_str& error) { return false; }
virtual isminetype IsMine(const CScript& script) const { return ISMINE_NO; }
//! Check that the given decryption key is valid for this ScriptPubKeyMan, i.e. it decrypts all of the keys handled by it.
virtual bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) { return false; }
virtual bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) { return false; }
- virtual bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) { return false; }
+ virtual bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, bilingual_str& error) { return false; }
virtual void KeepDestination(int64_t index, const OutputType& type) {}
virtual void ReturnDestination(int64_t index, bool internal, const CTxDestination& addr) {}
@@ -207,7 +196,7 @@ public:
virtual bool CanGetAddresses(bool internal = false) const { return false; }
/** Upgrades the wallet to the specified version */
- virtual bool Upgrade(int prev_version, int new_version, bilingual_str& error) { return false; }
+ virtual bool Upgrade(int prev_version, int new_version, bilingual_str& error) { return true; }
virtual bool HavePrivateKeys() const { return false; }
@@ -230,7 +219,7 @@ public:
virtual bool CanProvide(const CScript& script, SignatureData& sigdata) { return false; }
/** Creates new signatures and adds them to the transaction. Returns whether all inputs were signed */
- virtual bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const { return false; }
+ virtual bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const { return false; }
/** Sign a message with the given script */
virtual SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const { return SigningResult::SIGNING_FAILED; };
/** Adds script and derivation path information to a PSBT, and optionally signs it. */
@@ -355,13 +344,13 @@ private:
public:
using ScriptPubKeyMan::ScriptPubKeyMan;
- bool GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) override;
+ bool GetNewDestination(const OutputType type, CTxDestination& dest, bilingual_str& error) override;
isminetype IsMine(const CScript& script) const override;
bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) override;
bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override;
- bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) override;
+ bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, bilingual_str& error) override;
void KeepDestination(int64_t index, const OutputType& type) override;
void ReturnDestination(int64_t index, bool internal, const CTxDestination&) override;
@@ -396,7 +385,7 @@ public:
bool CanProvide(const CScript& script, SignatureData& sigdata) override;
- bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override;
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const override;
SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override;
TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override;
@@ -559,13 +548,13 @@ public:
mutable RecursiveMutex cs_desc_man;
- bool GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) override;
+ bool GetNewDestination(const OutputType type, CTxDestination& dest, bilingual_str& error) override;
isminetype IsMine(const CScript& script) const override;
bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) override;
bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override;
- bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) override;
+ bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, bilingual_str& error) override;
void ReturnDestination(int64_t index, bool internal, const CTxDestination& addr) override;
// Tops up the descriptor cache and m_map_script_pub_keys. The cache is stored in the wallet file
@@ -601,7 +590,7 @@ public:
bool CanProvide(const CScript& script, SignatureData& sigdata) override;
- bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override;
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const override;
SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override;
TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override;
@@ -621,7 +610,7 @@ public:
const WalletDescriptor GetWalletDescriptor() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man);
const std::vector<CScript> GetScriptPubKeys() const;
- bool GetDescriptorString(std::string& out) const;
+ bool GetDescriptorString(std::string& out, const bool priv) const;
void UpgradeDescriptorCache();
};
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 6a8df437ae..4a7a268982 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -21,6 +21,11 @@ using interfaces::FoundBlock;
static constexpr size_t OUTPUT_GROUP_MAX_ENTRIES{100};
+int GetTxSpendSize(const CWallet& wallet, const CWalletTx& wtx, unsigned int out, bool use_max_sig)
+{
+ return CalculateMaximumSignedInputSize(wtx.tx->vout[out], &wallet, use_max_sig);
+}
+
std::string COutput::ToString() const
{
return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue));
@@ -64,33 +69,33 @@ TxSize CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *walle
return CalculateMaximumSignedTxSize(tx, wallet, txouts, use_max_sig);
}
-void CWallet::AvailableCoins(std::vector<COutput>& vCoins, const CCoinControl* coinControl, const CAmount& nMinimumAmount, const CAmount& nMaximumAmount, const CAmount& nMinimumSumAmount, const uint64_t nMaximumCount) const
+void AvailableCoins(const CWallet& wallet, std::vector<COutput>& vCoins, const CCoinControl* coinControl, const CAmount& nMinimumAmount, const CAmount& nMaximumAmount, const CAmount& nMinimumSumAmount, const uint64_t nMaximumCount)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
vCoins.clear();
CAmount nTotal = 0;
// Either the WALLET_FLAG_AVOID_REUSE flag is not set (in which case we always allow), or we default to avoiding, and only in the case where
// a coin control object is provided, and has the avoid address reuse flag set to false, do we allow already used addresses
- bool allow_used_addresses = !IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE) || (coinControl && !coinControl->m_avoid_address_reuse);
+ bool allow_used_addresses = !wallet.IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE) || (coinControl && !coinControl->m_avoid_address_reuse);
const int min_depth = {coinControl ? coinControl->m_min_depth : DEFAULT_MIN_DEPTH};
const int max_depth = {coinControl ? coinControl->m_max_depth : DEFAULT_MAX_DEPTH};
const bool only_safe = {coinControl ? !coinControl->m_include_unsafe_inputs : true};
std::set<uint256> trusted_parents;
- for (const auto& entry : mapWallet)
+ for (const auto& entry : wallet.mapWallet)
{
const uint256& wtxid = entry.first;
const CWalletTx& wtx = entry.second;
- if (!chain().checkFinalTx(*wtx.tx)) {
+ if (!wallet.chain().checkFinalTx(*wtx.tx)) {
continue;
}
- if (wtx.IsImmatureCoinBase())
+ if (wallet.IsTxImmatureCoinBase(wtx))
continue;
- int nDepth = wtx.GetDepthInMainChain();
+ int nDepth = wallet.GetTxDepthInMainChain(wtx);
if (nDepth < 0)
continue;
@@ -99,7 +104,7 @@ void CWallet::AvailableCoins(std::vector<COutput>& vCoins, const CCoinControl* c
if (nDepth == 0 && !wtx.InMempool())
continue;
- bool safeTx = IsTrusted(wtx, trusted_parents);
+ bool safeTx = CachedTxIsTrusted(wallet, wtx, trusted_parents);
// We should not consider coins from transactions that are replacing
// other transactions.
@@ -152,28 +157,28 @@ void CWallet::AvailableCoins(std::vector<COutput>& vCoins, const CCoinControl* c
if (coinControl && coinControl->HasSelected() && !coinControl->fAllowOtherInputs && !coinControl->IsSelected(COutPoint(entry.first, i)))
continue;
- if (IsLockedCoin(entry.first, i))
+ if (wallet.IsLockedCoin(entry.first, i))
continue;
- if (IsSpent(wtxid, i))
+ if (wallet.IsSpent(wtxid, i))
continue;
- isminetype mine = IsMine(wtx.tx->vout[i]);
+ isminetype mine = wallet.IsMine(wtx.tx->vout[i]);
if (mine == ISMINE_NO) {
continue;
}
- if (!allow_used_addresses && IsSpentKey(wtxid, i)) {
+ if (!allow_used_addresses && wallet.IsSpentKey(wtxid, i)) {
continue;
}
- std::unique_ptr<SigningProvider> provider = GetSolvingProvider(wtx.tx->vout[i].scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = wallet.GetSolvingProvider(wtx.tx->vout[i].scriptPubKey);
bool solvable = provider ? IsSolvable(*provider, wtx.tx->vout[i].scriptPubKey) : false;
bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable));
- vCoins.push_back(COutput(&wtx, i, nDepth, spendable, solvable, safeTx, (coinControl && coinControl->fAllowWatchOnly)));
+ vCoins.push_back(COutput(wallet, wtx, i, nDepth, spendable, solvable, safeTx, (coinControl && coinControl->fAllowWatchOnly)));
// Checks the sum amount of all UTXO's.
if (nMinimumSumAmount != MAX_MONEY) {
@@ -192,13 +197,13 @@ void CWallet::AvailableCoins(std::vector<COutput>& vCoins, const CCoinControl* c
}
}
-CAmount CWallet::GetAvailableBalance(const CCoinControl* coinControl) const
+CAmount GetAvailableBalance(const CWallet& wallet, const CCoinControl* coinControl)
{
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
CAmount balance = 0;
std::vector<COutput> vCoins;
- AvailableCoins(vCoins, coinControl);
+ AvailableCoins(wallet, vCoins, coinControl);
for (const COutput& out : vCoins) {
if (out.fSpendable) {
balance += out.tx->tx->vout[out.i].nValue;
@@ -207,16 +212,16 @@ CAmount CWallet::GetAvailableBalance(const CCoinControl* coinControl) const
return balance;
}
-const CTxOut& CWallet::FindNonChangeParentOutput(const CTransaction& tx, int output) const
+const CTxOut& FindNonChangeParentOutput(const CWallet& wallet, const CTransaction& tx, int output)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
const CTransaction* ptx = &tx;
int n = output;
- while (IsChange(ptx->vout[n]) && ptx->vin.size() > 0) {
+ while (OutputIsChange(wallet, ptx->vout[n]) && ptx->vin.size() > 0) {
const COutPoint& prevout = ptx->vin[0].prevout;
- auto it = mapWallet.find(prevout.hash);
- if (it == mapWallet.end() || it->second.tx->vout.size() <= prevout.n ||
- !IsMine(it->second.tx->vout[prevout.n])) {
+ auto it = wallet.mapWallet.find(prevout.hash);
+ if (it == wallet.mapWallet.end() || it->second.tx->vout.size() <= prevout.n ||
+ !wallet.IsMine(it->second.tx->vout[prevout.n])) {
break;
}
ptx = it->second.tx.get();
@@ -225,39 +230,39 @@ const CTxOut& CWallet::FindNonChangeParentOutput(const CTransaction& tx, int out
return ptx->vout[n];
}
-std::map<CTxDestination, std::vector<COutput>> CWallet::ListCoins() const
+std::map<CTxDestination, std::vector<COutput>> ListCoins(const CWallet& wallet)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
std::map<CTxDestination, std::vector<COutput>> result;
std::vector<COutput> availableCoins;
- AvailableCoins(availableCoins);
+ AvailableCoins(wallet, availableCoins);
for (const COutput& coin : availableCoins) {
CTxDestination address;
- if ((coin.fSpendable || (IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && coin.fSolvable)) &&
- ExtractDestination(FindNonChangeParentOutput(*coin.tx->tx, coin.i).scriptPubKey, address)) {
+ if ((coin.fSpendable || (wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && coin.fSolvable)) &&
+ ExtractDestination(FindNonChangeParentOutput(wallet, *coin.tx->tx, coin.i).scriptPubKey, address)) {
result[address].emplace_back(std::move(coin));
}
}
std::vector<COutPoint> lockedCoins;
- ListLockedCoins(lockedCoins);
+ wallet.ListLockedCoins(lockedCoins);
// Include watch-only for LegacyScriptPubKeyMan wallets without private keys
- const bool include_watch_only = GetLegacyScriptPubKeyMan() && IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
+ const bool include_watch_only = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
const isminetype is_mine_filter = include_watch_only ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
for (const COutPoint& output : lockedCoins) {
- auto it = mapWallet.find(output.hash);
- if (it != mapWallet.end()) {
- int depth = it->second.GetDepthInMainChain();
+ auto it = wallet.mapWallet.find(output.hash);
+ if (it != wallet.mapWallet.end()) {
+ int depth = wallet.GetTxDepthInMainChain(it->second);
if (depth >= 0 && output.n < it->second.tx->vout.size() &&
- IsMine(it->second.tx->vout[output.n]) == is_mine_filter
+ wallet.IsMine(it->second.tx->vout[output.n]) == is_mine_filter
) {
CTxDestination address;
- if (ExtractDestination(FindNonChangeParentOutput(*it->second.tx, output.n).scriptPubKey, address)) {
+ if (ExtractDestination(FindNonChangeParentOutput(wallet, *it->second.tx, output.n).scriptPubKey, address)) {
result[address].emplace_back(
- &it->second, output.n, depth, true /* spendable */, true /* solvable */, false /* safe */);
+ wallet, it->second, output.n, depth, true /* spendable */, true /* solvable */, false /* safe */);
}
}
}
@@ -266,7 +271,7 @@ std::map<CTxDestination, std::vector<COutput>> CWallet::ListCoins() const
return result;
}
-std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outputs, const CoinSelectionParams& coin_sel_params, const CoinEligibilityFilter& filter, bool positive_only) const
+std::vector<OutputGroup> GroupOutputs(const CWallet& wallet, const std::vector<COutput>& outputs, const CoinSelectionParams& coin_sel_params, const CoinEligibilityFilter& filter, bool positive_only)
{
std::vector<OutputGroup> groups_out;
@@ -277,12 +282,12 @@ std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outpu
if (!output.fSpendable) continue;
size_t ancestors, descendants;
- chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
+ wallet.chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
CInputCoin input_coin = output.GetInputCoin();
// Make an OutputGroup containing just this output
OutputGroup group{coin_sel_params};
- group.Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants, positive_only);
+ group.Insert(input_coin, output.nDepth, CachedTxIsFromMe(wallet, *output.tx, ISMINE_ALL), ancestors, descendants, positive_only);
// Check the OutputGroup's eligibility. Only add the eligible ones.
if (positive_only && group.GetSelectionAmount() <= 0) continue;
@@ -303,7 +308,7 @@ std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outpu
if (!output.fSpendable) continue;
size_t ancestors, descendants;
- chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
+ wallet.chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
CInputCoin input_coin = output.GetInputCoin();
CScript spk = input_coin.txout.scriptPubKey;
@@ -327,7 +332,7 @@ std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outpu
}
// Add the input_coin to group
- group->Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants, positive_only);
+ group->Insert(input_coin, output.nDepth, CachedTxIsFromMe(wallet, *output.tx, ISMINE_ALL), ancestors, descendants, positive_only);
}
// Now we go through the entire map and pull out the OutputGroups
@@ -352,25 +357,52 @@ std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outpu
return groups_out;
}
-bool CWallet::AttemptSelection(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins,
- std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params) const
+bool AttemptSelection(const CWallet& wallet, const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins,
+ std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params)
{
setCoinsRet.clear();
nValueRet = 0;
+ // Vector of results for use with waste calculation
+ // In order: calculated waste, selected inputs, selected input value (sum of input values)
+ // TODO: Use a struct representing the selection result
+ std::vector<std::tuple<CAmount, std::set<CInputCoin>, CAmount>> results;
// Note that unlike KnapsackSolver, we do not include the fee for creating a change output as BnB will not create a change output.
- std::vector<OutputGroup> positive_groups = GroupOutputs(coins, coin_selection_params, eligibility_filter, true /* positive_only */);
- if (SelectCoinsBnB(positive_groups, nTargetValue, coin_selection_params.m_cost_of_change, setCoinsRet, nValueRet)) {
- return true;
+ std::vector<OutputGroup> positive_groups = GroupOutputs(wallet, coins, coin_selection_params, eligibility_filter, true /* positive_only */);
+ std::set<CInputCoin> bnb_coins;
+ CAmount bnb_value;
+ if (SelectCoinsBnB(positive_groups, nTargetValue, coin_selection_params.m_cost_of_change, bnb_coins, bnb_value)) {
+ const auto waste = GetSelectionWaste(bnb_coins, /* cost of change */ CAmount(0), nTargetValue, !coin_selection_params.m_subtract_fee_outputs);
+ results.emplace_back(std::make_tuple(waste, std::move(bnb_coins), bnb_value));
}
+
// The knapsack solver has some legacy behavior where it will spend dust outputs. We retain this behavior, so don't filter for positive only here.
- std::vector<OutputGroup> all_groups = GroupOutputs(coins, coin_selection_params, eligibility_filter, false /* positive_only */);
+ std::vector<OutputGroup> all_groups = GroupOutputs(wallet, coins, coin_selection_params, eligibility_filter, false /* positive_only */);
// While nTargetValue includes the transaction fees for non-input things, it does not include the fee for creating a change output.
// So we need to include that for KnapsackSolver as well, as we are expecting to create a change output.
- return KnapsackSolver(nTargetValue + coin_selection_params.m_change_fee, all_groups, setCoinsRet, nValueRet);
+ std::set<CInputCoin> knapsack_coins;
+ CAmount knapsack_value;
+ if (KnapsackSolver(nTargetValue + coin_selection_params.m_change_fee, all_groups, knapsack_coins, knapsack_value)) {
+ const auto waste = GetSelectionWaste(knapsack_coins, coin_selection_params.m_cost_of_change, nTargetValue + coin_selection_params.m_change_fee, !coin_selection_params.m_subtract_fee_outputs);
+ results.emplace_back(std::make_tuple(waste, std::move(knapsack_coins), knapsack_value));
+ }
+
+ if (results.size() == 0) {
+ // No solution found
+ return false;
+ }
+
+ // Choose the result with the least waste
+ // If the waste is the same, choose the one which spends more inputs.
+ const auto& best_result = std::min_element(results.begin(), results.end(), [](const auto& a, const auto& b) {
+ return std::get<0>(a) < std::get<0>(b) || (std::get<0>(a) == std::get<0>(b) && std::get<1>(a).size() > std::get<1>(b).size());
+ });
+ setCoinsRet = std::get<1>(*best_result);
+ nValueRet = std::get<2>(*best_result);
+ return true;
}
-bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params) const
+bool SelectCoins(const CWallet& wallet, const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params)
{
std::vector<COutput> vCoins(vAvailableCoins);
CAmount value_to_select = nTargetValue;
@@ -396,8 +428,8 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
coin_control.ListSelected(vPresetInputs);
for (const COutPoint& outpoint : vPresetInputs)
{
- std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(outpoint.hash);
- if (it != mapWallet.end())
+ std::map<uint256, CWalletTx>::const_iterator it = wallet.mapWallet.find(outpoint.hash);
+ if (it != wallet.mapWallet.end())
{
const CWalletTx& wtx = it->second;
// Clearly invalid input, fail
@@ -405,7 +437,7 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
return false;
}
// Just to calculate the marginal byte size
- CInputCoin coin(wtx.tx, outpoint.n, wtx.GetSpendSize(outpoint.n, false));
+ CInputCoin coin(wtx.tx, outpoint.n, GetTxSpendSize(wallet, wtx, outpoint.n, false));
nValueFromPresetInputs += coin.txout.nValue;
if (coin.m_input_bytes <= 0) {
return false; // Not solvable, can't estimate size for fee
@@ -433,7 +465,7 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
unsigned int limit_ancestor_count = 0;
unsigned int limit_descendant_count = 0;
- chain().getPackageLimits(limit_ancestor_count, limit_descendant_count);
+ wallet.chain().getPackageLimits(limit_ancestor_count, limit_descendant_count);
const size_t max_ancestors = (size_t)std::max<int64_t>(1, limit_ancestor_count);
const size_t max_descendants = (size_t)std::max<int64_t>(1, limit_descendant_count);
const bool fRejectLongChains = gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
@@ -456,32 +488,32 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
// If possible, fund the transaction with confirmed UTXOs only. Prefer at least six
// confirmations on outputs received from other wallets and only spend confirmed change.
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
// Fall back to using zero confirmation change (but with as few ancestors in the mempool as
// possible) if we cannot fund the transaction otherwise.
- if (m_spend_zero_conf_change) {
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)),
+ if (wallet.m_spend_zero_conf_change) {
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params)) return true;
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)),
vCoins, setCoinsRet, nValueRet, coin_selection_params)) {
return true;
}
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2),
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2),
vCoins, setCoinsRet, nValueRet, coin_selection_params)) {
return true;
}
// If partial groups are allowed, relax the requirement of spending OutputGroups (groups
// of UTXOs sent to the same address, which are obviously controlled by a single wallet)
// in their entirety.
- if (AttemptSelection(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1, true /* include_partial_groups */),
+ if (AttemptSelection(wallet, value_to_select, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1, true /* include_partial_groups */),
vCoins, setCoinsRet, nValueRet, coin_selection_params)) {
return true;
}
// Try with unsafe inputs if they are allowed. This may spend unconfirmed outputs
// received from other wallets.
if (coin_control.m_include_unsafe_inputs
- && AttemptSelection(value_to_select,
+ && AttemptSelection(wallet, value_to_select,
CoinEligibilityFilter(0 /* conf_mine */, 0 /* conf_theirs */, max_ancestors-1, max_descendants-1, true /* include_partial_groups */),
vCoins, setCoinsRet, nValueRet, coin_selection_params)) {
return true;
@@ -489,7 +521,7 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
// Try with unlimited ancestors/descendants. The transaction will still need to meet
// mempool ancestor/descendant policy to be accepted to mempool and broadcasted, but
// OutputGroups use heuristics that may overestimate ancestor/descendant counts.
- if (!fRejectLongChains && AttemptSelection(value_to_select,
+ if (!fRejectLongChains && AttemptSelection(wallet, value_to_select,
CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max(), true /* include_partial_groups */),
vCoins, setCoinsRet, nValueRet, coin_selection_params)) {
return true;
@@ -568,7 +600,8 @@ static uint32_t GetLocktimeForNewTransaction(interfaces::Chain& chain, const uin
return locktime;
}
-bool CWallet::CreateTransactionInternal(
+static bool CreateTransactionInternal(
+ CWallet& wallet,
const std::vector<CRecipient>& vecSend,
CTransactionRef& tx,
CAmount& nFeeRet,
@@ -576,19 +609,22 @@ bool CWallet::CreateTransactionInternal(
bilingual_str& error,
const CCoinControl& coin_control,
FeeCalculation& fee_calc_out,
- bool sign)
+ bool sign) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(wallet.cs_wallet);
CMutableTransaction txNew; // The resulting transaction that we make
- txNew.nLockTime = GetLocktimeForNewTransaction(chain(), GetLastBlockHash(), GetLastBlockHeight());
+ txNew.nLockTime = GetLocktimeForNewTransaction(wallet.chain(), wallet.GetLastBlockHash(), wallet.GetLastBlockHeight());
CoinSelectionParams coin_selection_params; // Parameters for coin selection, init with dummy
coin_selection_params.m_avoid_partial_spends = coin_control.m_avoid_partial_spends;
+ // Set the long term feerate estimate to the wallet's consolidate feerate
+ coin_selection_params.m_long_term_feerate = wallet.m_consolidate_feerate;
+
CAmount recipients_sum = 0;
- const OutputType change_type = TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : m_default_change_type, vecSend);
- ReserveDestination reservedest(this, change_type);
+ const OutputType change_type = wallet.TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : wallet.m_default_change_type, vecSend);
+ ReserveDestination reservedest(&wallet, change_type);
unsigned int outputs_to_subtract_fee_from = 0; // The number of outputs which we are subtracting the fee from
for (const auto& recipient : vecSend) {
recipients_sum += recipient.nAmount;
@@ -618,9 +654,9 @@ bool CWallet::CreateTransactionInternal(
// Reserve a new key pair from key pool. If it fails, provide a dummy
// destination in case we don't need change.
CTxDestination dest;
- std::string dest_err;
+ bilingual_str dest_err;
if (!reservedest.GetReservedDestination(dest, true, dest_err)) {
- error = strprintf(_("Transaction needs a change address, but we can't generate it. %s"), dest_err);
+ error = _("Transaction needs a change address, but we can't generate it.") + Untranslated(" ") + dest_err;
}
scriptChange = GetScriptForDestination(dest);
// A valid destination implies a change script (and
@@ -632,7 +668,7 @@ bool CWallet::CreateTransactionInternal(
coin_selection_params.change_output_size = GetSerializeSize(change_prototype_txout);
// Get size of spending the change output
- int change_spend_size = CalculateMaximumSignedInputSize(change_prototype_txout, this);
+ int change_spend_size = CalculateMaximumSignedInputSize(change_prototype_txout, &wallet);
// If the wallet doesn't know how to sign change output, assume p2sh-p2wpkh
// as lower-bound to allow BnB to do it's thing
if (change_spend_size == -1) {
@@ -642,28 +678,23 @@ bool CWallet::CreateTransactionInternal(
}
// Set discard feerate
- coin_selection_params.m_discard_feerate = GetDiscardRate(*this);
+ coin_selection_params.m_discard_feerate = GetDiscardRate(wallet);
// Get the fee rate to use effective values in coin selection
FeeCalculation feeCalc;
- coin_selection_params.m_effective_feerate = GetMinimumFeeRate(*this, coin_control, &feeCalc);
+ coin_selection_params.m_effective_feerate = GetMinimumFeeRate(wallet, coin_control, &feeCalc);
// Do not, ever, assume that it's fine to change the fee rate if the user has explicitly
// provided one
if (coin_control.m_feerate && coin_selection_params.m_effective_feerate > *coin_control.m_feerate) {
error = strprintf(_("Fee rate (%s) is lower than the minimum fee rate setting (%s)"), coin_control.m_feerate->ToString(FeeEstimateMode::SAT_VB), coin_selection_params.m_effective_feerate.ToString(FeeEstimateMode::SAT_VB));
return false;
}
- if (feeCalc.reason == FeeReason::FALLBACK && !m_allow_fallback_fee) {
+ if (feeCalc.reason == FeeReason::FALLBACK && !wallet.m_allow_fallback_fee) {
// eventually allow a fallback fee
error = _("Fee estimation failed. Fallbackfee is disabled. Wait a few blocks or enable -fallbackfee.");
return false;
}
- // Get long term estimate
- CCoinControl cc_temp;
- cc_temp.m_confirm_target = chain().estimateMaxBlocks();
- coin_selection_params.m_long_term_feerate = GetMinimumFeeRate(*this, cc_temp, nullptr);
-
// Calculate the cost of change
// Cost of change is the cost of creating the change output + cost of spending the change output in the future.
// For creating the change output now, we use the effective feerate.
@@ -685,7 +716,7 @@ bool CWallet::CreateTransactionInternal(
coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout, PROTOCOL_VERSION);
}
- if (IsDust(txout, chain().relayDustFee()))
+ if (IsDust(txout, wallet.chain().relayDustFee()))
{
error = _("Transaction amount too small");
return false;
@@ -699,12 +730,12 @@ bool CWallet::CreateTransactionInternal(
// Get available coins
std::vector<COutput> vAvailableCoins;
- AvailableCoins(vAvailableCoins, &coin_control, 1, MAX_MONEY, MAX_MONEY, 0);
+ AvailableCoins(wallet, vAvailableCoins, &coin_control, 1, MAX_MONEY, MAX_MONEY, 0);
// Choose coins to use
CAmount inputs_sum = 0;
std::set<CInputCoin> setCoins;
- if (!SelectCoins(vAvailableCoins, /* nTargetValue */ selection_target, setCoins, inputs_sum, coin_control, coin_selection_params))
+ if (!SelectCoins(wallet, vAvailableCoins, /* nTargetValue */ selection_target, setCoins, inputs_sum, coin_control, coin_selection_params))
{
error = _("Insufficient funds");
return false;
@@ -742,13 +773,13 @@ bool CWallet::CreateTransactionInternal(
// to avoid conflicting with other possible uses of nSequence,
// and in the spirit of "smallest possible change from prior
// behavior."
- const uint32_t nSequence = coin_control.m_signal_bip125_rbf.value_or(m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1);
+ const uint32_t nSequence = coin_control.m_signal_bip125_rbf.value_or(wallet.m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1);
for (const auto& coin : selected_coins) {
txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence));
}
// Calculate the transaction fee
- TxSize tx_sizes = CalculateMaximumSignedTxSize(CTransaction(txNew), this, coin_control.fAllowWatchOnly);
+ TxSize tx_sizes = CalculateMaximumSignedTxSize(CTransaction(txNew), &wallet, coin_control.fAllowWatchOnly);
int nBytes = tx_sizes.vsize;
if (nBytes < 0) {
error = _("Signing transaction failed");
@@ -773,11 +804,15 @@ bool CWallet::CreateTransactionInternal(
txNew.vout.erase(change_position);
// Because we have dropped this change, the tx size and required fee will be different, so let's recalculate those
- tx_sizes = CalculateMaximumSignedTxSize(CTransaction(txNew), this, coin_control.fAllowWatchOnly);
+ tx_sizes = CalculateMaximumSignedTxSize(CTransaction(txNew), &wallet, coin_control.fAllowWatchOnly);
nBytes = tx_sizes.vsize;
fee_needed = coin_selection_params.m_effective_feerate.GetFee(nBytes);
}
+ // The only time that fee_needed should be less than the amount available for fees (in change_and_fee - change_amount) is when
+ // we are subtracting the fee from the outputs. If this occurs at any other time, it is a bug.
+ assert(coin_selection_params.m_subtract_fee_outputs || fee_needed <= change_and_fee - change_amount);
+
// Update nFeeRet in case fee_needed changed due to dropping the change output
if (fee_needed <= change_and_fee - change_amount) {
nFeeRet = change_and_fee - change_amount;
@@ -806,7 +841,7 @@ bool CWallet::CreateTransactionInternal(
}
// Error if this output is reduced to be below dust
- if (IsDust(txout, chain().relayDustFee())) {
+ if (IsDust(txout, wallet.chain().relayDustFee())) {
if (txout.nValue < 0) {
error = _("The transaction amount is too small to pay the fee");
} else {
@@ -825,7 +860,7 @@ bool CWallet::CreateTransactionInternal(
return false;
}
- if (sign && !SignTransaction(txNew)) {
+ if (sign && !wallet.SignTransaction(txNew)) {
error = _("Signing transaction failed");
return false;
}
@@ -841,14 +876,14 @@ bool CWallet::CreateTransactionInternal(
return false;
}
- if (nFeeRet > m_default_max_tx_fee) {
+ if (nFeeRet > wallet.m_default_max_tx_fee) {
error = TransactionErrorString(TransactionError::MAX_FEE_EXCEEDED);
return false;
}
if (gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS)) {
// Lastly, ensure this tx will pass the mempool's chain limits
- if (!chain().checkChainLimits(tx)) {
+ if (!wallet.chain().checkChainLimits(tx)) {
error = _("Transaction has too long of a mempool chain");
return false;
}
@@ -859,7 +894,7 @@ bool CWallet::CreateTransactionInternal(
reservedest.KeepDestination();
fee_calc_out = feeCalc;
- WalletLogPrintf("Fee Calculation: Fee:%d Bytes:%u Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
+ wallet.WalletLogPrintf("Fee Calculation: Fee:%d Bytes:%u Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
nFeeRet, nBytes, feeCalc.returnedTarget, feeCalc.desiredTarget, StringForFeeReason(feeCalc.reason), feeCalc.est.decay,
feeCalc.est.pass.start, feeCalc.est.pass.end,
(feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool) > 0.0 ? 100 * feeCalc.est.pass.withinTarget / (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool) : 0.0,
@@ -870,7 +905,8 @@ bool CWallet::CreateTransactionInternal(
return true;
}
-bool CWallet::CreateTransaction(
+bool CreateTransaction(
+ CWallet& wallet,
const std::vector<CRecipient>& vecSend,
CTransactionRef& tx,
CAmount& nFeeRet,
@@ -890,23 +926,23 @@ bool CWallet::CreateTransaction(
return false;
}
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
int nChangePosIn = nChangePosInOut;
Assert(!tx); // tx is an out-param. TODO change the return type from bool to tx (or nullptr)
- bool res = CreateTransactionInternal(vecSend, tx, nFeeRet, nChangePosInOut, error, coin_control, fee_calc_out, sign);
+ bool res = CreateTransactionInternal(wallet, vecSend, tx, nFeeRet, nChangePosInOut, error, coin_control, fee_calc_out, sign);
// try with avoidpartialspends unless it's enabled already
- if (res && nFeeRet > 0 /* 0 means non-functional fee rate estimation */ && m_max_aps_fee > -1 && !coin_control.m_avoid_partial_spends) {
+ if (res && nFeeRet > 0 /* 0 means non-functional fee rate estimation */ && wallet.m_max_aps_fee > -1 && !coin_control.m_avoid_partial_spends) {
CCoinControl tmp_cc = coin_control;
tmp_cc.m_avoid_partial_spends = true;
CAmount nFeeRet2;
CTransactionRef tx2;
int nChangePosInOut2 = nChangePosIn;
bilingual_str error2; // fired and forgotten; if an error occurs, we discard the results
- if (CreateTransactionInternal(vecSend, tx2, nFeeRet2, nChangePosInOut2, error2, tmp_cc, fee_calc_out, sign)) {
+ if (CreateTransactionInternal(wallet, vecSend, tx2, nFeeRet2, nChangePosInOut2, error2, tmp_cc, fee_calc_out, sign)) {
// if fee of this alternative one is within the range of the max fee, we use this one
- const bool use_aps = nFeeRet2 <= nFeeRet + m_max_aps_fee;
- WalletLogPrintf("Fee non-grouped = %lld, grouped = %lld, using %s\n", nFeeRet, nFeeRet2, use_aps ? "grouped" : "non-grouped");
+ const bool use_aps = nFeeRet2 <= nFeeRet + wallet.m_max_aps_fee;
+ wallet.WalletLogPrintf("Fee non-grouped = %lld, grouped = %lld, using %s\n", nFeeRet, nFeeRet2, use_aps ? "grouped" : "non-grouped");
if (use_aps) {
tx = tx2;
nFeeRet = nFeeRet2;
@@ -917,7 +953,7 @@ bool CWallet::CreateTransaction(
return res;
}
-bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl coinControl)
+bool FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl coinControl)
{
std::vector<CRecipient> vecSend;
@@ -936,11 +972,11 @@ bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nC
// Acquire the locks to prevent races to the new locked unspents between the
// CreateTransaction call and LockCoin calls (when lockUnspents is true).
- LOCK(cs_wallet);
+ LOCK(wallet.cs_wallet);
CTransactionRef tx_new;
FeeCalculation fee_calc_out;
- if (!CreateTransaction(vecSend, tx_new, nFeeRet, nChangePosInOut, error, coinControl, fee_calc_out, false)) {
+ if (!CreateTransaction(wallet, vecSend, tx_new, nFeeRet, nChangePosInOut, error, coinControl, fee_calc_out, false)) {
return false;
}
@@ -961,7 +997,7 @@ bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nC
}
if (lockUnspents) {
- LockCoin(txin.prevout);
+ wallet.LockCoin(txin.prevout);
}
}
diff --git a/src/wallet/spend.h b/src/wallet/spend.h
index 03f9a7c2b5..e39f134dc3 100644
--- a/src/wallet/spend.h
+++ b/src/wallet/spend.h
@@ -9,6 +9,9 @@
#include <wallet/transaction.h>
#include <wallet/wallet.h>
+/** Get the marginal bytes if spending the specified output from this transaction */
+int GetTxSpendSize(const CWallet& wallet, const CWalletTx& wtx, unsigned int out, bool use_max_sig = false);
+
class COutput
{
public:
@@ -43,13 +46,13 @@ public:
*/
bool fSafe;
- COutput(const CWalletTx *txIn, int iIn, int nDepthIn, bool fSpendableIn, bool fSolvableIn, bool fSafeIn, bool use_max_sig_in = false)
+ COutput(const CWallet& wallet, const CWalletTx& wtx, int iIn, int nDepthIn, bool fSpendableIn, bool fSolvableIn, bool fSafeIn, bool use_max_sig_in = false)
{
- tx = txIn; i = iIn; nDepth = nDepthIn; fSpendable = fSpendableIn; fSolvable = fSolvableIn; fSafe = fSafeIn; nInputBytes = -1; use_max_sig = use_max_sig_in;
+ tx = &wtx; i = iIn; nDepth = nDepthIn; fSpendable = fSpendableIn; fSolvable = fSolvableIn; fSafe = fSafeIn; nInputBytes = -1; use_max_sig = use_max_sig_in;
// If known and signable by the given wallet, compute nInputBytes
// Failure will keep this value -1
- if (fSpendable && tx) {
- nInputBytes = tx->GetSpendSize(i, use_max_sig);
+ if (fSpendable) {
+ nInputBytes = GetTxSpendSize(wallet, wtx, i, use_max_sig);
}
}
@@ -61,4 +64,76 @@ public:
}
};
+//Get the marginal bytes of spending the specified output
+int CalculateMaximumSignedInputSize(const CTxOut& txout, const CWallet* pwallet, bool use_max_sig = false);
+
+struct TxSize {
+ int64_t vsize{-1};
+ int64_t weight{-1};
+};
+
+/** Calculate the size of the transaction assuming all signatures are max size
+* Use DummySignatureCreator, which inserts 71 byte signatures everywhere.
+* NOTE: this requires that all inputs must be in mapWallet (eg the tx should
+* be AllInputsMine). */
+TxSize CalculateMaximumSignedTxSize(const CTransaction& tx, const CWallet* wallet, const std::vector<CTxOut>& txouts, bool use_max_sig = false);
+TxSize CalculateMaximumSignedTxSize(const CTransaction& tx, const CWallet* wallet, bool use_max_sig = false) EXCLUSIVE_LOCKS_REQUIRED(wallet->cs_wallet);
+
+/**
+ * populate vCoins with vector of available COutputs.
+ */
+void AvailableCoins(const CWallet& wallet, std::vector<COutput>& vCoins, const CCoinControl* coinControl = nullptr, const CAmount& nMinimumAmount = 1, const CAmount& nMaximumAmount = MAX_MONEY, const CAmount& nMinimumSumAmount = MAX_MONEY, const uint64_t nMaximumCount = 0) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+
+CAmount GetAvailableBalance(const CWallet& wallet, const CCoinControl* coinControl = nullptr);
+
+/**
+ * Find non-change parent output.
+ */
+const CTxOut& FindNonChangeParentOutput(const CWallet& wallet, const CTransaction& tx, int output) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+
+/**
+ * Return list of available coins and locked coins grouped by non-change output address.
+ */
+std::map<CTxDestination, std::vector<COutput>> ListCoins(const CWallet& wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+
+std::vector<OutputGroup> GroupOutputs(const CWallet& wallet, const std::vector<COutput>& outputs, const CoinSelectionParams& coin_sel_params, const CoinEligibilityFilter& filter, bool positive_only);
+
+/**
+ * Shuffle and select coins until nTargetValue is reached while avoiding
+ * small change; This method is stochastic for some inputs and upon
+ * completion the coin set and corresponding actual target value is
+ * assembled
+ * param@[in] coins Set of UTXOs to consider. These will be categorized into
+ * OutputGroups and filtered using eligibility_filter before
+ * selecting coins.
+ * param@[out] setCoinsRet Populated with the coins selected if successful.
+ * param@[out] nValueRet Used to return the total value of selected coins.
+ */
+bool AttemptSelection(const CWallet& wallet, const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins,
+ std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params);
+
+/**
+ * Select a set of coins such that nValueRet >= nTargetValue and at least
+ * all coins from coin_control are selected; never select unconfirmed coins if they are not ours
+ * param@[out] setCoinsRet Populated with inputs including pre-selected inputs from
+ * coin_control and Coin Selection if successful.
+ * param@[out] nValueRet Total value of selected coins including pre-selected ones
+ * from coin_control and Coin Selection if successful.
+ */
+bool SelectCoins(const CWallet& wallet, const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet,
+ const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet);
+
+/**
+ * Create a new transaction paying the recipients with a set of coins
+ * selected by SelectCoins(); Also create the change output, when needed
+ * @note passing nChangePosInOut as -1 will result in setting a random position
+ */
+bool CreateTransaction(CWallet& wallet, const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, const CCoinControl& coin_control, FeeCalculation& fee_calc_out, bool sign = true);
+
+/**
+ * Insert additional inputs into the transaction by
+ * calling CreateTransaction();
+ */
+bool FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl);
+
#endif // BITCOIN_WALLET_SPEND_H
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index c65ebad52f..5d51809241 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -7,8 +7,10 @@
#include <primitives/transaction.h>
#include <random.h>
#include <test/util/setup_common.h>
+#include <util/translation.h>
#include <wallet/coincontrol.h>
#include <wallet/coinselection.h>
+#include <wallet/spend.h>
#include <wallet/test/wallet_test_fixture.h>
#include <wallet/wallet.h>
@@ -38,7 +40,7 @@ CoinEligibilityFilter filter_standard_extra(6, 6, 0);
CoinSelectionParams coin_selection_params(/* change_output_size= */ 0,
/* change_spend_size= */ 0, /* effective_feerate= */ CFeeRate(0),
/* long_term_feerate= */ CFeeRate(0), /* discard_feerate= */ CFeeRate(0),
- /* tx_no_inputs_size= */ 0, /* avoid_partial= */ false);
+ /* tx_noinputs_size= */ 0, /* avoid_partial= */ false);
static void add_coin(const CAmount& nValue, int nInput, std::vector<CInputCoin>& set)
{
@@ -48,12 +50,16 @@ static void add_coin(const CAmount& nValue, int nInput, std::vector<CInputCoin>&
set.emplace_back(MakeTransactionRef(tx), nInput);
}
-static void add_coin(const CAmount& nValue, int nInput, CoinSet& set)
+static void add_coin(const CAmount& nValue, int nInput, CoinSet& set, CAmount fee = 0, CAmount long_term_fee = 0)
{
CMutableTransaction tx;
tx.vout.resize(nInput + 1);
tx.vout[nInput].nValue = nValue;
- set.emplace(MakeTransactionRef(tx), nInput);
+ CInputCoin coin(MakeTransactionRef(tx), nInput);
+ coin.effective_value = nValue - fee;
+ coin.m_fee = fee;
+ coin.m_long_term_fee = long_term_fee;
+ set.insert(coin);
}
static void add_coin(CWallet& wallet, const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = false, int nInput=0, bool spendable = false)
@@ -66,7 +72,7 @@ static void add_coin(CWallet& wallet, const CAmount& nValue, int nAge = 6*24, bo
tx.vout[nInput].nValue = nValue;
if (spendable) {
CTxDestination dest;
- std::string error;
+ bilingual_str error;
const bool destination_ok = wallet.GetNewDestination(OutputType::BECH32, "", dest, error);
assert(destination_ok);
tx.vout[nInput].scriptPubKey = GetScriptForDestination(dest);
@@ -82,7 +88,7 @@ static void add_coin(CWallet& wallet, const CAmount& nValue, int nAge = 6*24, bo
wtx->m_amounts[CWalletTx::DEBIT].Set(ISMINE_SPENDABLE, 1);
wtx->m_is_cache_empty = false;
}
- COutput output(wtx, nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */);
+ COutput output(wallet, *wtx, nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */);
vCoins.push_back(output);
}
static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = false, int nInput=0, bool spendable = false)
@@ -136,6 +142,13 @@ inline std::vector<OutputGroup>& GroupCoins(const std::vector<COutput>& coins)
return static_groups;
}
+inline std::vector<OutputGroup>& KnapsackGroupOutputs(const CoinEligibilityFilter& filter)
+{
+ static std::vector<OutputGroup> static_groups;
+ static_groups = GroupOutputs(testWallet, vCoins, coin_selection_params, filter, /* positive_only */false);
+ return static_groups;
+}
+
// Branch and bound coin selection tests
BOOST_AUTO_TEST_CASE(bnb_search_test)
{
@@ -274,20 +287,20 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
CoinSelectionParams coin_selection_params_bnb(/* change_output_size= */ 0,
/* change_spend_size= */ 0, /* effective_feerate= */ CFeeRate(3000),
/* long_term_feerate= */ CFeeRate(1000), /* discard_feerate= */ CFeeRate(1000),
- /* tx_no_inputs_size= */ 0, /* avoid_partial= */ false);
+ /* tx_noinputs_size= */ 0, /* avoid_partial= */ false);
CoinSet setCoinsRet;
CAmount nValueRet;
empty_wallet();
add_coin(1);
vCoins.at(0).nInputBytes = 40; // Make sure that it has a negative effective value. The next check should assert if this somehow got through. Otherwise it will fail
- BOOST_CHECK(!testWallet.AttemptSelection( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb));
+ BOOST_CHECK(!SelectCoinsBnB(GroupCoins(vCoins), 1 * CENT, coin_selection_params_bnb.m_cost_of_change, setCoinsRet, nValueRet));
// Test fees subtracted from output:
empty_wallet();
add_coin(1 * CENT);
vCoins.at(0).nInputBytes = 40;
coin_selection_params_bnb.m_subtract_fee_outputs = true;
- BOOST_CHECK(testWallet.AttemptSelection( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb));
+ BOOST_CHECK(SelectCoinsBnB(GroupCoins(vCoins), 1 * CENT, coin_selection_params_bnb.m_cost_of_change, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * CENT);
// Make sure that can use BnB when there are preset inputs
@@ -304,7 +317,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
coin_control.fAllowOtherInputs = true;
coin_control.Select(COutPoint(vCoins.at(0).tx->GetHash(), vCoins.at(0).i));
coin_selection_params_bnb.m_effective_feerate = CFeeRate(0);
- BOOST_CHECK(wallet->SelectCoins(vCoins, 10 * CENT, setCoinsRet, nValueRet, coin_control, coin_selection_params_bnb));
+ BOOST_CHECK(SelectCoins(*wallet, vCoins, 10 * CENT, setCoinsRet, nValueRet, coin_control, coin_selection_params_bnb));
}
}
@@ -322,24 +335,24 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
empty_wallet();
// with an empty wallet we can't even pay one cent
- BOOST_CHECK(!testWallet.AttemptSelection( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(!KnapsackSolver(1 * CENT, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
add_coin(1*CENT, 4); // add a new 1 cent coin
// with a new 1 cent coin, we still can't find a mature 1 cent
- BOOST_CHECK(!testWallet.AttemptSelection( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(!KnapsackSolver(1 * CENT, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
// but we can find a new 1 cent
- BOOST_CHECK( testWallet.AttemptSelection( 1 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(1 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * CENT);
add_coin(2*CENT); // add a mature 2 cent coin
// we can't make 3 cents of mature coins
- BOOST_CHECK(!testWallet.AttemptSelection( 3 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(!KnapsackSolver(3 * CENT, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
// we can make 3 cents of new coins
- BOOST_CHECK( testWallet.AttemptSelection( 3 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(3 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 3 * CENT);
add_coin(5*CENT); // add a mature 5 cent coin,
@@ -349,33 +362,33 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
// now we have new: 1+10=11 (of which 10 was self-sent), and mature: 2+5+20=27. total = 38
// we can't make 38 cents only if we disallow new coins:
- BOOST_CHECK(!testWallet.AttemptSelection(38 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(!KnapsackSolver(38 * CENT, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
// we can't even make 37 cents if we don't allow new coins even if they're from us
- BOOST_CHECK(!testWallet.AttemptSelection(38 * CENT, filter_standard_extra, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(!KnapsackSolver(38 * CENT, KnapsackGroupOutputs(filter_standard_extra), setCoinsRet, nValueRet));
// but we can make 37 cents if we accept new coins from ourself
- BOOST_CHECK( testWallet.AttemptSelection(37 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(37 * CENT, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 37 * CENT);
// and we can make 38 cents if we accept all new coins
- BOOST_CHECK( testWallet.AttemptSelection(38 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(38 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 38 * CENT);
// try making 34 cents from 1,2,5,10,20 - we can't do it exactly
- BOOST_CHECK( testWallet.AttemptSelection(34 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(34 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 35 * CENT); // but 35 cents is closest
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); // the best should be 20+10+5. it's incredibly unlikely the 1 or 2 got included (but possible)
// when we try making 7 cents, the smaller coins (1,2,5) are enough. We should see just 2+5
- BOOST_CHECK( testWallet.AttemptSelection( 7 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(7 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 7 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
// when we try making 8 cents, the smaller coins (1,2,5) are exactly enough.
- BOOST_CHECK( testWallet.AttemptSelection( 8 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(8 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK(nValueRet == 8 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
// when we try making 9 cents, no subset of smaller coins is enough, and we get the next bigger coin (10)
- BOOST_CHECK( testWallet.AttemptSelection( 9 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(9 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 10 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -389,30 +402,30 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin(30*CENT); // now we have 6+7+8+20+30 = 71 cents total
// check that we have 71 and not 72
- BOOST_CHECK( testWallet.AttemptSelection(71 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
- BOOST_CHECK(!testWallet.AttemptSelection(72 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(71 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
+ BOOST_CHECK(!KnapsackSolver(72 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
// now try making 16 cents. the best smaller coins can do is 6+7+8 = 21; not as good at the next biggest coin, 20
- BOOST_CHECK( testWallet.AttemptSelection(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(16 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 20 * CENT); // we should get 20 in one coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
add_coin( 5*CENT); // now we have 5+6+7+8+20+30 = 75 cents total
// now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, better than the next biggest coin, 20
- BOOST_CHECK( testWallet.AttemptSelection(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(16 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 3 coins
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
add_coin( 18*CENT); // now we have 5+6+7+8+18+20+30
// and now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, the same as the next biggest coin, 18
- BOOST_CHECK( testWallet.AttemptSelection(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(16 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); // because in the event of a tie, the biggest coin wins
// now try making 11 cents. we should get 5+6
- BOOST_CHECK( testWallet.AttemptSelection(11 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(11 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 11 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
@@ -421,11 +434,11 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin( 2*COIN);
add_coin( 3*COIN);
add_coin( 4*COIN); // now we have 5+6+7+8+18+20+30+100+200+300+400 = 1094 cents
- BOOST_CHECK( testWallet.AttemptSelection(95 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(95 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * COIN); // we should get 1 BTC in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
- BOOST_CHECK( testWallet.AttemptSelection(195 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(195 * CENT, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 2 * COIN); // we should get 2 BTC in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -440,14 +453,14 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
// try making 1 * MIN_CHANGE from the 1.5 * MIN_CHANGE
// we'll get change smaller than MIN_CHANGE whatever happens, so can expect MIN_CHANGE exactly
- BOOST_CHECK( testWallet.AttemptSelection(MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(MIN_CHANGE, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE);
// but if we add a bigger coin, small change is avoided
add_coin(1111*MIN_CHANGE);
// try making 1 from 0.1 + 0.2 + 0.3 + 0.4 + 0.5 + 1111 = 1112.5
- BOOST_CHECK( testWallet.AttemptSelection(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(1 * MIN_CHANGE, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount
// if we add more small coins:
@@ -455,7 +468,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin(MIN_CHANGE * 7 / 10);
// and try again to make 1.0 * MIN_CHANGE
- BOOST_CHECK( testWallet.AttemptSelection(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(1 * MIN_CHANGE, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount
// run the 'mtgox' test (see https://blockexplorer.com/tx/29a3efd3ef04f9153d47a990bd7b048a4b2d213daaa5fb8ed670fb85f13bdbcf)
@@ -464,7 +477,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
for (int j = 0; j < 20; j++)
add_coin(50000 * COIN);
- BOOST_CHECK( testWallet.AttemptSelection(500000 * COIN, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(500000 * COIN, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 500000 * COIN); // we should get the exact amount
BOOST_CHECK_EQUAL(setCoinsRet.size(), 10U); // in ten coins
@@ -477,7 +490,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin(MIN_CHANGE * 6 / 10);
add_coin(MIN_CHANGE * 7 / 10);
add_coin(1111 * MIN_CHANGE);
- BOOST_CHECK( testWallet.AttemptSelection(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(1 * MIN_CHANGE, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1111 * MIN_CHANGE); // we get the bigger coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -487,7 +500,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin(MIN_CHANGE * 6 / 10);
add_coin(MIN_CHANGE * 8 / 10);
add_coin(1111 * MIN_CHANGE);
- BOOST_CHECK( testWallet.AttemptSelection(MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(MIN_CHANGE, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE); // we should get the exact amount
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); // in two coins 0.4+0.6
@@ -498,12 +511,12 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
add_coin(MIN_CHANGE * 100);
// trying to make 100.01 from these three coins
- BOOST_CHECK(testWallet.AttemptSelection(MIN_CHANGE * 10001 / 100, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(MIN_CHANGE * 10001 / 100, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE * 10105 / 100); // we should get all coins
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
// but if we try to make 99.9, we should take the bigger of the two small coins to avoid small change
- BOOST_CHECK(testWallet.AttemptSelection(MIN_CHANGE * 9990 / 100, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(MIN_CHANGE * 9990 / 100, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 101 * MIN_CHANGE);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
}
@@ -517,7 +530,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
// We only create the wallet once to save time, but we still run the coin selection RUN_TESTS times.
for (int i = 0; i < RUN_TESTS; i++) {
- BOOST_CHECK(testWallet.AttemptSelection(2000, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(2000, KnapsackGroupOutputs(filter_confirmed), setCoinsRet, nValueRet));
if (amt - 2000 < MIN_CHANGE) {
// needs more than one input:
@@ -602,7 +615,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
add_coin(1000 * COIN);
add_coin(3 * COIN);
- BOOST_CHECK(testWallet.AttemptSelection(1003 * COIN, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params));
+ BOOST_CHECK(KnapsackSolver(1003 * COIN, KnapsackGroupOutputs(filter_standard), setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1003 * COIN);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
@@ -641,13 +654,82 @@ BOOST_AUTO_TEST_CASE(SelectCoins_test)
CoinSelectionParams cs_params(/* change_output_size= */ 34,
/* change_spend_size= */ 148, /* effective_feerate= */ CFeeRate(0),
/* long_term_feerate= */ CFeeRate(0), /* discard_feerate= */ CFeeRate(0),
- /* tx_no_inputs_size= */ 0, /* avoid_partial= */ false);
+ /* tx_noinputs_size= */ 0, /* avoid_partial= */ false);
CoinSet out_set;
CAmount out_value = 0;
CCoinControl cc;
- BOOST_CHECK(testWallet.SelectCoins(vCoins, target, out_set, out_value, cc, cs_params));
+ BOOST_CHECK(SelectCoins(testWallet, vCoins, target, out_set, out_value, cc, cs_params));
BOOST_CHECK_GE(out_value, target);
}
}
+BOOST_AUTO_TEST_CASE(waste_test)
+{
+ CoinSet selection;
+ const CAmount fee{100};
+ const CAmount change_cost{125};
+ const CAmount fee_diff{40};
+ const CAmount in_amt{3 * COIN};
+ const CAmount target{2 * COIN};
+ const CAmount excess{in_amt - fee * 2 - target};
+
+ // Waste with change is the change cost and difference between fee and long term fee
+ add_coin(1 * COIN, 1, selection, fee, fee - fee_diff);
+ add_coin(2 * COIN, 2, selection, fee, fee - fee_diff);
+ const CAmount waste1 = GetSelectionWaste(selection, change_cost, target);
+ BOOST_CHECK_EQUAL(fee_diff * 2 + change_cost, waste1);
+ selection.clear();
+
+ // Waste without change is the excess and difference between fee and long term fee
+ add_coin(1 * COIN, 1, selection, fee, fee - fee_diff);
+ add_coin(2 * COIN, 2, selection, fee, fee - fee_diff);
+ const CAmount waste_nochange1 = GetSelectionWaste(selection, 0, target);
+ BOOST_CHECK_EQUAL(fee_diff * 2 + excess, waste_nochange1);
+ selection.clear();
+
+ // Waste with change and fee == long term fee is just cost of change
+ add_coin(1 * COIN, 1, selection, fee, fee);
+ add_coin(2 * COIN, 2, selection, fee, fee);
+ BOOST_CHECK_EQUAL(change_cost, GetSelectionWaste(selection, change_cost, target));
+ selection.clear();
+
+ // Waste without change and fee == long term fee is just the excess
+ add_coin(1 * COIN, 1, selection, fee, fee);
+ add_coin(2 * COIN, 2, selection, fee, fee);
+ BOOST_CHECK_EQUAL(excess, GetSelectionWaste(selection, 0, target));
+ selection.clear();
+
+ // Waste will be greater when fee is greater, but long term fee is the same
+ add_coin(1 * COIN, 1, selection, fee * 2, fee - fee_diff);
+ add_coin(2 * COIN, 2, selection, fee * 2, fee - fee_diff);
+ const CAmount waste2 = GetSelectionWaste(selection, change_cost, target);
+ BOOST_CHECK_GT(waste2, waste1);
+ selection.clear();
+
+ // Waste with change is the change cost and difference between fee and long term fee
+ // With long term fee greater than fee, waste should be less than when long term fee is less than fee
+ add_coin(1 * COIN, 1, selection, fee, fee + fee_diff);
+ add_coin(2 * COIN, 2, selection, fee, fee + fee_diff);
+ const CAmount waste3 = GetSelectionWaste(selection, change_cost, target);
+ BOOST_CHECK_EQUAL(fee_diff * -2 + change_cost, waste3);
+ BOOST_CHECK_LT(waste3, waste1);
+ selection.clear();
+
+ // Waste without change is the excess and difference between fee and long term fee
+ // With long term fee greater than fee, waste should be less than when long term fee is less than fee
+ add_coin(1 * COIN, 1, selection, fee, fee + fee_diff);
+ add_coin(2 * COIN, 2, selection, fee, fee + fee_diff);
+ const CAmount waste_nochange2 = GetSelectionWaste(selection, 0, target);
+ BOOST_CHECK_EQUAL(fee_diff * -2 + excess, waste_nochange2);
+ BOOST_CHECK_LT(waste_nochange2, waste_nochange1);
+ selection.clear();
+
+ // 0 Waste only when fee == long term fee, no change, and no excess
+ add_coin(1 * COIN, 1, selection, fee, fee);
+ add_coin(2 * COIN, 2, selection, fee, fee);
+ const CAmount exact_target = in_amt - 2 * fee;
+ BOOST_CHECK_EQUAL(0, GetSelectionWaste(selection, 0, exact_target));
+
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 17f5264b45..16cb7e0baf 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -25,7 +25,11 @@ BOOST_AUTO_TEST_CASE(getwalletenv_file)
std::string test_name = "test_name.dat";
const fs::path datadir = gArgs.GetDataDirNet();
fs::path file_path = datadir / test_name;
+#if BOOST_VERSION >= 107700
+ std::ofstream f(BOOST_FILESYSTEM_C_STR(file_path));
+#else
std::ofstream f(file_path.BOOST_FILESYSTEM_C_STR);
+#endif // BOOST_VERSION >= 107700
f.close();
std::string filename;
diff --git a/src/wallet/test/init_test_fixture.cpp b/src/wallet/test/init_test_fixture.cpp
index dd9354848d..53c972c46d 100644
--- a/src/wallet/test/init_test_fixture.cpp
+++ b/src/wallet/test/init_test_fixture.cpp
@@ -32,7 +32,11 @@ InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainNam
fs::create_directories(m_walletdir_path_cases["default"]);
fs::create_directories(m_walletdir_path_cases["custom"]);
fs::create_directories(m_walletdir_path_cases["relative"]);
+#if BOOST_VERSION >= 107700
+ std::ofstream f(BOOST_FILESYSTEM_C_STR(m_walletdir_path_cases["file"]));
+#else
std::ofstream f(m_walletdir_path_cases["file"].BOOST_FILESYSTEM_C_STR);
+#endif // BOOST_VERSION >= 107700
f.close();
}
diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp
index 1cefa386b7..8a97f7779d 100644
--- a/src/wallet/test/psbt_wallet_tests.cpp
+++ b/src/wallet/test/psbt_wallet_tests.cpp
@@ -22,12 +22,12 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test)
CDataStream s_prev_tx1(ParseHex("0200000000010158e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7501000000171600145f275f436b09a8cc9a2eb2a2f528485c68a56323feffffff02d8231f1b0100000017a914aed962d6654f9a2b36608eb9d64d2b260db4f1118700c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88702483045022100a22edcc6e5bc511af4cc4ae0de0fcd75c7e04d8c1c3a8aa9d820ed4b967384ec02200642963597b9b1bc22c75e9f3e117284a962188bf5e8a74c895089046a20ad770121035509a48eb623e10aace8bfd0212fdb8a8e5af3c94b0b133b95e114cab89e4f7965000000"), SER_NETWORK, PROTOCOL_VERSION);
CTransactionRef prev_tx1;
s_prev_tx1 >> prev_tx1;
- m_wallet.mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(prev_tx1->GetHash()), std::forward_as_tuple(&m_wallet, prev_tx1));
+ m_wallet.mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(prev_tx1->GetHash()), std::forward_as_tuple(prev_tx1));
CDataStream s_prev_tx2(ParseHex("0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000"), SER_NETWORK, PROTOCOL_VERSION);
CTransactionRef prev_tx2;
s_prev_tx2 >> prev_tx2;
- m_wallet.mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(prev_tx2->GetHash()), std::forward_as_tuple(&m_wallet, prev_tx2));
+ m_wallet.mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(prev_tx2->GetHash()), std::forward_as_tuple(prev_tx2));
// Add scripts
CScript rs1;
diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp
new file mode 100644
index 0000000000..e779b2450f
--- /dev/null
+++ b/src/wallet/test/spend_tests.cpp
@@ -0,0 +1,62 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <policy/fees.h>
+#include <validation.h>
+#include <wallet/coincontrol.h>
+#include <wallet/spend.h>
+#include <wallet/test/util.h>
+#include <wallet/test/wallet_test_fixture.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(spend_tests, WalletTestingSetup)
+
+BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup)
+{
+ CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
+ auto wallet = CreateSyncedWallet(*m_node.chain, m_node.chainman->ActiveChain(), coinbaseKey);
+
+ // Check that a subtract-from-recipient transaction slightly less than the
+ // coinbase input amount does not create a change output (because it would
+ // be uneconomical to add and spend the output), and make sure it pays the
+ // leftover input amount which would have been change to the recipient
+ // instead of the miner.
+ auto check_tx = [&wallet](CAmount leftover_input_amount) {
+ CRecipient recipient{GetScriptForRawPubKey({}), 50 * COIN - leftover_input_amount, true /* subtract fee */};
+ CTransactionRef tx;
+ CAmount fee;
+ int change_pos = -1;
+ bilingual_str error;
+ CCoinControl coin_control;
+ coin_control.m_feerate.emplace(10000);
+ coin_control.fOverrideFeeRate = true;
+ FeeCalculation fee_calc;
+ BOOST_CHECK(CreateTransaction(*wallet, {recipient}, tx, fee, change_pos, error, coin_control, fee_calc));
+ BOOST_CHECK_EQUAL(tx->vout.size(), 1);
+ BOOST_CHECK_EQUAL(tx->vout[0].nValue, recipient.nAmount + leftover_input_amount - fee);
+ BOOST_CHECK_GT(fee, 0);
+ return fee;
+ };
+
+ // Send full input amount to recipient, check that only nonzero fee is
+ // subtracted (to_reduce == fee).
+ const CAmount fee{check_tx(0)};
+
+ // Send slightly less than full input amount to recipient, check leftover
+ // input amount is paid to recipient not the miner (to_reduce == fee - 123)
+ BOOST_CHECK_EQUAL(fee, check_tx(123));
+
+ // Send full input minus fee amount to recipient, check leftover input
+ // amount is paid to recipient not the miner (to_reduce == 0)
+ BOOST_CHECK_EQUAL(fee, check_tx(fee));
+
+ // Send full input minus more than the fee amount to recipient, check
+ // leftover input amount is paid to recipient not the miner (to_reduce ==
+ // -123). This overpays the recipient instead of overpaying the miner more
+ // than double the necessary fee.
+ BOOST_CHECK_EQUAL(fee, check_tx(fee + 123));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/wallet/test/util.cpp b/src/wallet/test/util.cpp
new file mode 100644
index 0000000000..c3061b93c0
--- /dev/null
+++ b/src/wallet/test/util.cpp
@@ -0,0 +1,38 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <wallet/test/util.h>
+
+#include <chain.h>
+#include <key.h>
+#include <test/util/setup_common.h>
+#include <wallet/wallet.h>
+#include <wallet/walletdb.h>
+
+#include <boost/test/unit_test.hpp>
+
+#include <memory>
+
+std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key)
+{
+ auto wallet = std::make_unique<CWallet>(&chain, "", CreateMockWalletDatabase());
+ {
+ LOCK2(wallet->cs_wallet, ::cs_main);
+ wallet->SetLastBlockProcessed(cchain.Height(), cchain.Tip()->GetBlockHash());
+ }
+ wallet->LoadWallet();
+ {
+ auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
+ LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore);
+ spk_man->AddKeyPubKey(key, key.GetPubKey());
+ }
+ WalletRescanReserver reserver(*wallet);
+ reserver.reserve();
+ CWallet::ScanResult result = wallet->ScanForWalletTransactions(cchain.Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */);
+ BOOST_CHECK_EQUAL(result.status, CWallet::ScanResult::SUCCESS);
+ BOOST_CHECK_EQUAL(result.last_scanned_block, cchain.Tip()->GetBlockHash());
+ BOOST_CHECK_EQUAL(*result.last_scanned_height, cchain.Height());
+ BOOST_CHECK(result.last_failed_block.IsNull());
+ return wallet;
+}
diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h
new file mode 100644
index 0000000000..288c111571
--- /dev/null
+++ b/src/wallet/test/util.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_WALLET_TEST_UTIL_H
+#define BITCOIN_WALLET_TEST_UTIL_H
+
+#include <memory>
+
+class CChain;
+class CKey;
+class CWallet;
+namespace interfaces {
+class Chain;
+} // namespace interfaces
+
+std::unique_ptr<CWallet> CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key);
+
+#endif // BITCOIN_WALLET_TEST_UTIL_H
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index a0070b8dd3..5431a38bee 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -20,6 +20,10 @@
#include <util/translation.h>
#include <validation.h>
#include <wallet/coincontrol.h>
+#include <wallet/context.h>
+#include <wallet/receive.h>
+#include <wallet/spend.h>
+#include <wallet/test/util.h>
#include <wallet/test/wallet_test_fixture.h>
#include <boost/test/unit_test.hpp>
@@ -29,8 +33,6 @@ RPCHelpMan importmulti();
RPCHelpMan dumpwallet();
RPCHelpMan importwallet();
-extern RecursiveMutex cs_wallets;
-
// Ensure that fee levels defined in the wallet are at least as high
// as the default levels for node policy.
static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee");
@@ -38,15 +40,15 @@ static_assert(WALLET_INCREMENTAL_RELAY_FEE >= DEFAULT_INCREMENTAL_RELAY_FEE, "wa
BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup)
-static std::shared_ptr<CWallet> TestLoadWallet(interfaces::Chain* chain)
+static std::shared_ptr<CWallet> TestLoadWallet(WalletContext& context)
{
DatabaseOptions options;
DatabaseStatus status;
bilingual_str error;
std::vector<bilingual_str> warnings;
auto database = MakeWalletDatabase("", options, status, error);
- auto wallet = CWallet::Create(chain, "", std::move(database), options.create_flags, error, warnings);
- if (chain) {
+ auto wallet = CWallet::Create(context, "", std::move(database), options.create_flags, error, warnings);
+ if (context.chain) {
wallet->postInitProcess();
}
return wallet;
@@ -68,7 +70,7 @@ static CMutableTransaction TestSimpleSpend(const CTransaction& from, uint32_t in
keystore.AddKey(key);
std::map<COutPoint, Coin> coins;
coins[mtx.vin[0].prevout].out = from.vout[index];
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
BOOST_CHECK(SignTransaction(mtx, &keystore, coins, SIGHASH_ALL, input_errors));
return mtx;
}
@@ -103,7 +105,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
BOOST_CHECK(result.last_failed_block.IsNull());
BOOST_CHECK(result.last_scanned_block.IsNull());
BOOST_CHECK(!result.last_scanned_height);
- BOOST_CHECK_EQUAL(wallet.GetBalance().m_mine_immature, 0);
+ BOOST_CHECK_EQUAL(GetBalance(wallet).m_mine_immature, 0);
}
// Verify ScanForWalletTransactions picks up transactions in both the old
@@ -122,7 +124,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
BOOST_CHECK(result.last_failed_block.IsNull());
BOOST_CHECK_EQUAL(result.last_scanned_block, newTip->GetBlockHash());
BOOST_CHECK_EQUAL(*result.last_scanned_height, newTip->nHeight);
- BOOST_CHECK_EQUAL(wallet.GetBalance().m_mine_immature, 100 * COIN);
+ BOOST_CHECK_EQUAL(GetBalance(wallet).m_mine_immature, 100 * COIN);
}
// Prune the older block file.
@@ -148,7 +150,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
BOOST_CHECK_EQUAL(result.last_failed_block, oldTip->GetBlockHash());
BOOST_CHECK_EQUAL(result.last_scanned_block, newTip->GetBlockHash());
BOOST_CHECK_EQUAL(*result.last_scanned_height, newTip->nHeight);
- BOOST_CHECK_EQUAL(wallet.GetBalance().m_mine_immature, 50 * COIN);
+ BOOST_CHECK_EQUAL(GetBalance(wallet).m_mine_immature, 50 * COIN);
}
// Prune the remaining block file.
@@ -173,7 +175,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
BOOST_CHECK_EQUAL(result.last_failed_block, newTip->GetBlockHash());
BOOST_CHECK(result.last_scanned_block.IsNull());
BOOST_CHECK(!result.last_scanned_height);
- BOOST_CHECK_EQUAL(wallet.GetBalance().m_mine_immature, 0);
+ BOOST_CHECK_EQUAL(GetBalance(wallet).m_mine_immature, 0);
}
}
@@ -199,7 +201,9 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
WITH_LOCK(wallet->cs_wallet, wallet->SetLastBlockProcessed(newTip->nHeight, newTip->GetBlockHash()));
- AddWallet(wallet);
+ WalletContext context;
+ context.args = &gArgs;
+ AddWallet(context, wallet);
UniValue keys;
keys.setArray();
UniValue key;
@@ -217,6 +221,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
key.pushKV("internal", UniValue(true));
keys.push_back(key);
JSONRPCRequest request;
+ request.context = &context;
request.params.setArray();
request.params.push_back(keys);
@@ -230,7 +235,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
"downloading and rescanning the relevant blocks (see -reindex and -rescan "
"options).\"}},{\"success\":true}]",
0, oldTip->GetBlockTimeMax(), TIMESTAMP_WINDOW));
- RemoveWallet(wallet, std::nullopt);
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt);
}
}
@@ -257,6 +262,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// Import key into wallet and call dumpwallet to create backup file.
{
+ WalletContext context;
+ context.args = &gArgs;
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
@@ -264,15 +271,16 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
spk_man->mapKeyMetadata[coinbaseKey.GetPubKey().GetID()].nCreateTime = KEY_TIME;
spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey());
- AddWallet(wallet);
+ AddWallet(context, wallet);
wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash());
}
JSONRPCRequest request;
+ request.context = &context;
request.params.setArray();
request.params.push_back(backup_file);
::dumpwallet().HandleRequest(request);
- RemoveWallet(wallet, std::nullopt);
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt);
}
// Call importwallet RPC and verify all blocks with timestamps >= BLOCK_TIME
@@ -282,13 +290,16 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
LOCK(wallet->cs_wallet);
wallet->SetupLegacyScriptPubKeyMan();
+ WalletContext context;
+ context.args = &gArgs;
JSONRPCRequest request;
+ request.context = &context;
request.params.setArray();
request.params.push_back(backup_file);
- AddWallet(wallet);
+ AddWallet(context, wallet);
wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash());
::importwallet().HandleRequest(request);
- RemoveWallet(wallet, std::nullopt);
+ RemoveWallet(context, wallet, /* load_on_start= */ std::nullopt);
BOOST_CHECK_EQUAL(wallet->mapWallet.size(), 3U);
BOOST_CHECK_EQUAL(m_coinbase_txns.size(), 103U);
@@ -310,7 +321,7 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
{
CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
- CWalletTx wtx(&wallet, m_coinbase_txns.back());
+ CWalletTx wtx(m_coinbase_txns.back());
LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore);
wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash());
@@ -320,13 +331,13 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
// Call GetImmatureCredit() once before adding the key to the wallet to
// cache the current immature credit amount, which is 0.
- BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 0);
+ BOOST_CHECK_EQUAL(CachedTxGetImmatureCredit(wallet, wtx), 0);
// Invalidate the cached value, add the key, and make sure a new immature
// credit amount is calculated.
wtx.MarkDirty();
BOOST_CHECK(spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey()));
- BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 50*COIN);
+ BOOST_CHECK_EQUAL(CachedTxGetImmatureCredit(wallet, wtx), 50*COIN);
}
static int64_t AddTx(ChainstateManager& chainman, CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64_t blockTime)
@@ -480,20 +491,7 @@ public:
ListCoinsTestingSetup()
{
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
- wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
- {
- LOCK2(wallet->cs_wallet, ::cs_main);
- wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash());
- }
- wallet->LoadWallet();
- AddKey(*wallet, coinbaseKey);
- WalletRescanReserver reserver(*wallet);
- reserver.reserve();
- CWallet::ScanResult result = wallet->ScanForWalletTransactions(m_node.chainman->ActiveChain().Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */);
- BOOST_CHECK_EQUAL(result.status, CWallet::ScanResult::SUCCESS);
- BOOST_CHECK_EQUAL(result.last_scanned_block, m_node.chainman->ActiveChain().Tip()->GetBlockHash());
- BOOST_CHECK_EQUAL(*result.last_scanned_height, m_node.chainman->ActiveChain().Height());
- BOOST_CHECK(result.last_failed_block.IsNull());
+ wallet = CreateSyncedWallet(*m_node.chain, m_node.chainman->ActiveChain(), coinbaseKey);
}
~ListCoinsTestingSetup()
@@ -510,7 +508,7 @@ public:
CCoinControl dummy;
FeeCalculation fee_calc_out;
{
- BOOST_CHECK(wallet->CreateTransaction({recipient}, tx, fee, changePos, error, dummy, fee_calc_out));
+ BOOST_CHECK(CreateTransaction(*wallet, {recipient}, tx, fee, changePos, error, dummy, fee_calc_out));
}
wallet->CommitTransaction(tx, {}, {});
CMutableTransaction blocktx;
@@ -532,7 +530,7 @@ public:
std::unique_ptr<CWallet> wallet;
};
-BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
+BOOST_FIXTURE_TEST_CASE(ListCoinsTest, ListCoinsTestingSetup)
{
std::string coinbaseAddress = coinbaseKey.GetPubKey().GetID().ToString();
@@ -541,14 +539,14 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
std::map<CTxDestination, std::vector<COutput>> list;
{
LOCK(wallet->cs_wallet);
- list = wallet->ListCoins();
+ list = ListCoins(*wallet);
}
BOOST_CHECK_EQUAL(list.size(), 1U);
BOOST_CHECK_EQUAL(std::get<PKHash>(list.begin()->first).ToString(), coinbaseAddress);
BOOST_CHECK_EQUAL(list.begin()->second.size(), 1U);
// Check initial balance from one mature coinbase transaction.
- BOOST_CHECK_EQUAL(50 * COIN, wallet->GetAvailableBalance());
+ BOOST_CHECK_EQUAL(50 * COIN, GetAvailableBalance(*wallet));
// Add a transaction creating a change address, and confirm ListCoins still
// returns the coin associated with the change address underneath the
@@ -557,7 +555,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
AddTx(CRecipient{GetScriptForRawPubKey({}), 1 * COIN, false /* subtract fee */});
{
LOCK(wallet->cs_wallet);
- list = wallet->ListCoins();
+ list = ListCoins(*wallet);
}
BOOST_CHECK_EQUAL(list.size(), 1U);
BOOST_CHECK_EQUAL(std::get<PKHash>(list.begin()->first).ToString(), coinbaseAddress);
@@ -567,7 +565,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
{
LOCK(wallet->cs_wallet);
std::vector<COutput> available;
- wallet->AvailableCoins(available);
+ AvailableCoins(*wallet, available);
BOOST_CHECK_EQUAL(available.size(), 2U);
}
for (const auto& group : list) {
@@ -579,14 +577,14 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
{
LOCK(wallet->cs_wallet);
std::vector<COutput> available;
- wallet->AvailableCoins(available);
+ AvailableCoins(*wallet, available);
BOOST_CHECK_EQUAL(available.size(), 0U);
}
// Confirm ListCoins still returns same result as before, despite coins
// being locked.
{
LOCK(wallet->cs_wallet);
- list = wallet->ListCoins();
+ list = ListCoins(*wallet);
}
BOOST_CHECK_EQUAL(list.size(), 1U);
BOOST_CHECK_EQUAL(std::get<PKHash>(list.begin()->first).ToString(), coinbaseAddress);
@@ -601,7 +599,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
wallet->SetWalletFlag(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
BOOST_CHECK(!wallet->TopUpKeyPool(1000));
CTxDestination dest;
- std::string error;
+ bilingual_str error;
BOOST_CHECK(!wallet->GetNewDestination(OutputType::BECH32, "", dest, error));
}
@@ -691,7 +689,10 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
{
gArgs.ForceSetArg("-unsafesqlitesync", "1");
// Create new wallet with known key and unload it.
- auto wallet = TestLoadWallet(m_node.chain.get());
+ WalletContext context;
+ context.args = &gArgs;
+ context.chain = m_node.chain.get();
+ auto wallet = TestLoadWallet(context);
CKey key;
key.MakeNewKey(true);
AddKey(*wallet, key);
@@ -731,7 +732,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
// Reload wallet and make sure new transactions are detected despite events
// being blocked
- wallet = TestLoadWallet(m_node.chain.get());
+ wallet = TestLoadWallet(context);
BOOST_CHECK(rescan_completed);
BOOST_CHECK_EQUAL(addtx_count, 2);
{
@@ -758,20 +759,20 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
// deadlock during the sync and simulates a new block notification happening
// as soon as possible.
addtx_count = 0;
- auto handler = HandleLoadWallet([&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet, cs_wallets) {
+ auto handler = HandleLoadWallet(context, [&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet, context.wallets_mutex) {
BOOST_CHECK(rescan_completed);
m_coinbase_txns.push_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
block_tx = TestSimpleSpend(*m_coinbase_txns[2], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
m_coinbase_txns.push_back(CreateAndProcessBlock({block_tx}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
mempool_tx = TestSimpleSpend(*m_coinbase_txns[3], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
BOOST_CHECK(m_node.chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
- LEAVE_CRITICAL_SECTION(cs_wallets);
+ LEAVE_CRITICAL_SECTION(context.wallets_mutex);
LEAVE_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
SyncWithValidationInterfaceQueue();
ENTER_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
- ENTER_CRITICAL_SECTION(cs_wallets);
+ ENTER_CRITICAL_SECTION(context.wallets_mutex);
});
- wallet = TestLoadWallet(m_node.chain.get());
+ wallet = TestLoadWallet(context);
BOOST_CHECK_EQUAL(addtx_count, 4);
{
LOCK(wallet->cs_wallet);
@@ -785,7 +786,9 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
BOOST_FIXTURE_TEST_CASE(CreateWalletWithoutChain, BasicTestingSetup)
{
- auto wallet = TestLoadWallet(nullptr);
+ WalletContext context;
+ context.args = &gArgs;
+ auto wallet = TestLoadWallet(context);
BOOST_CHECK(wallet);
UnloadWallet(std::move(wallet));
}
@@ -793,7 +796,10 @@ BOOST_FIXTURE_TEST_CASE(CreateWalletWithoutChain, BasicTestingSetup)
BOOST_FIXTURE_TEST_CASE(ZapSelectTx, TestChain100Setup)
{
gArgs.ForceSetArg("-unsafesqlitesync", "1");
- auto wallet = TestLoadWallet(m_node.chain.get());
+ WalletContext context;
+ context.args = &gArgs;
+ context.chain = m_node.chain.get();
+ auto wallet = TestLoadWallet(context);
CKey key;
key.MakeNewKey(true);
AddKey(*wallet, key);
diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h
index 131faefe0b..094221adf2 100644
--- a/src/wallet/transaction.h
+++ b/src/wallet/transaction.h
@@ -17,12 +17,8 @@
#include <list>
#include <vector>
-struct COutputEntry;
-
typedef std::map<std::string, std::string> mapValue_t;
-//Get the marginal bytes of spending the specified output
-int CalculateMaximumSignedInputSize(const CTxOut& txout, const CWallet* pwallet, bool use_max_sig = false);
static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
{
@@ -34,6 +30,7 @@ static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
nOrderPos = atoi64(mapValue["n"]);
}
+
static inline void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue)
{
if (nOrderPos == -1)
@@ -68,8 +65,6 @@ public:
class CWalletTx
{
private:
- const CWallet* const pwallet;
-
/** Constant used in hashBlock to indicate tx has been abandoned, only used at
* serialization/deserialization to avoid ambiguity with conflicted.
*/
@@ -126,7 +121,6 @@ public:
// memory only
enum AmountType { DEBIT, CREDIT, IMMATURE_CREDIT, AVAILABLE_CREDIT, AMOUNTTYPE_ENUM_ELEMENTS };
- CAmount GetCachableAmount(AmountType type, const isminefilter& filter, bool recalculate = false) const;
mutable CachableAmount m_amounts[AMOUNTTYPE_ENUM_ELEMENTS];
/**
* This flag is true if all m_amounts caches are empty. This is particularly
@@ -139,9 +133,8 @@ public:
mutable bool fInMempool;
mutable CAmount nChangeCached;
- CWalletTx(const CWallet* wallet, CTransactionRef arg)
- : pwallet(wallet),
- tx(std::move(arg))
+ CWalletTx(CTransactionRef arg)
+ : tx(std::move(arg))
{
Init();
}
@@ -264,72 +257,13 @@ public:
m_is_cache_empty = true;
}
- //! filter decides which addresses will count towards the debit
- CAmount GetDebit(const isminefilter& filter) const;
- CAmount GetCredit(const isminefilter& filter) const;
- CAmount GetImmatureCredit(bool fUseCache = true) const;
- // TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
- // annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The
- // annotation "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid
- // having to resolve the issue of member access into incomplete type CWallet.
- CAmount GetAvailableCredit(bool fUseCache = true, const isminefilter& filter = ISMINE_SPENDABLE) const NO_THREAD_SAFETY_ANALYSIS;
- CAmount GetImmatureWatchOnlyCredit(const bool fUseCache = true) const;
- CAmount GetChange() const;
-
- /** Get the marginal bytes if spending the specified output from this transaction */
- int GetSpendSize(unsigned int out, bool use_max_sig = false) const
- {
- return CalculateMaximumSignedInputSize(tx->vout[out], pwallet, use_max_sig);
- }
-
- void GetAmounts(std::list<COutputEntry>& listReceived,
- std::list<COutputEntry>& listSent, CAmount& nFee, const isminefilter& filter) const;
-
- bool IsFromMe(const isminefilter& filter) const
- {
- return (GetDebit(filter) > 0);
- }
-
/** True if only scriptSigs are different */
bool IsEquivalentTo(const CWalletTx& tx) const;
bool InMempool() const;
- bool IsTrusted() const;
int64_t GetTxTime() const;
- /** Pass this transaction to node for mempool insertion and relay to peers if flag set to true */
- bool SubmitMemoryPoolAndRelay(std::string& err_string, bool relay);
-
- // TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
- // annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The annotation
- // "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid having to
- // resolve the issue of member access into incomplete type CWallet. Note
- // that we still have the runtime check "AssertLockHeld(pwallet->cs_wallet)"
- // in place.
- std::set<uint256> GetConflicts() const NO_THREAD_SAFETY_ANALYSIS;
-
- /**
- * Return depth of transaction in blockchain:
- * <0 : conflicts with a transaction this deep in the blockchain
- * 0 : in memory pool, waiting to be included in a block
- * >=1 : this many blocks deep in the main chain
- */
- // TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
- // annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The annotation
- // "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid having to
- // resolve the issue of member access into incomplete type CWallet. Note
- // that we still have the runtime check "AssertLockHeld(pwallet->cs_wallet)"
- // in place.
- int GetDepthInMainChain() const NO_THREAD_SAFETY_ANALYSIS;
- bool IsInMainChain() const { return GetDepthInMainChain() > 0; }
-
- /**
- * @return number of blocks to maturity for this transaction:
- * 0 : is not a coinbase transaction, or is a mature coinbase transaction
- * >0 : is a coinbase transaction which matures in this many blocks
- */
- int GetBlocksToMaturity() const;
bool isAbandoned() const { return m_confirm.status == CWalletTx::ABANDONED; }
void setAbandoned()
{
@@ -346,7 +280,6 @@ public:
void setConfirmed() { m_confirm.status = CWalletTx::CONFIRMED; }
const uint256& GetHash() const { return tx->GetHash(); }
bool IsCoinBase() const { return tx->IsCoinBase(); }
- bool IsImmatureCoinBase() const;
// Disable copying of CWalletTx objects to prevent bugs where instances get
// copied in and out of the mapWallet map, and fields are updated in the
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 27565aefc9..70349b2455 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -33,6 +33,7 @@
#include <util/string.h>
#include <util/translation.h>
#include <wallet/coincontrol.h>
+#include <wallet/context.h>
#include <wallet/fees.h>
#include <wallet/external_signer_scriptpubkeyman.h>
@@ -54,10 +55,6 @@ const std::map<uint64_t,std::string> WALLET_FLAG_CAVEATS{
},
};
-RecursiveMutex cs_wallets;
-static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets);
-static std::list<LoadWalletFn> g_load_wallet_fns GUARDED_BY(cs_wallets);
-
bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name)
{
util::SettingsValue setting_value = chain.getRwSetting("wallet");
@@ -94,19 +91,29 @@ static void UpdateWalletSetting(interfaces::Chain& chain,
}
}
-bool AddWallet(const std::shared_ptr<CWallet>& wallet)
+/**
+ * Refresh mempool status so the wallet is in an internally consistent state and
+ * immediately knows the transaction's status: Whether it can be considered
+ * trusted and is eligible to be abandoned ...
+ */
+static void RefreshMempoolStatus(CWalletTx& tx, interfaces::Chain& chain)
{
- LOCK(cs_wallets);
+ tx.fInMempool = chain.isInMempool(tx.GetHash());
+}
+
+bool AddWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet)
+{
+ LOCK(context.wallets_mutex);
assert(wallet);
- std::vector<std::shared_ptr<CWallet>>::const_iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet);
- if (i != vpwallets.end()) return false;
- vpwallets.push_back(wallet);
+ std::vector<std::shared_ptr<CWallet>>::const_iterator i = std::find(context.wallets.begin(), context.wallets.end(), wallet);
+ if (i != context.wallets.end()) return false;
+ context.wallets.push_back(wallet);
wallet->ConnectScriptPubKeyManNotifiers();
wallet->NotifyCanGetAddressesChanged();
return true;
}
-bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start, std::vector<bilingual_str>& warnings)
+bool RemoveWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start, std::vector<bilingual_str>& warnings)
{
assert(wallet);
@@ -115,10 +122,10 @@ bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> lo
// Unregister with the validation interface which also drops shared ponters.
wallet->m_chain_notifications_handler.reset();
- LOCK(cs_wallets);
- std::vector<std::shared_ptr<CWallet>>::iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet);
- if (i == vpwallets.end()) return false;
- vpwallets.erase(i);
+ LOCK(context.wallets_mutex);
+ std::vector<std::shared_ptr<CWallet>>::iterator i = std::find(context.wallets.begin(), context.wallets.end(), wallet);
+ if (i == context.wallets.end()) return false;
+ context.wallets.erase(i);
// Write the wallet setting
UpdateWalletSetting(chain, name, load_on_start, warnings);
@@ -126,32 +133,32 @@ bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> lo
return true;
}
-bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start)
+bool RemoveWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start)
{
std::vector<bilingual_str> warnings;
- return RemoveWallet(wallet, load_on_start, warnings);
+ return RemoveWallet(context, wallet, load_on_start, warnings);
}
-std::vector<std::shared_ptr<CWallet>> GetWallets()
+std::vector<std::shared_ptr<CWallet>> GetWallets(WalletContext& context)
{
- LOCK(cs_wallets);
- return vpwallets;
+ LOCK(context.wallets_mutex);
+ return context.wallets;
}
-std::shared_ptr<CWallet> GetWallet(const std::string& name)
+std::shared_ptr<CWallet> GetWallet(WalletContext& context, const std::string& name)
{
- LOCK(cs_wallets);
- for (const std::shared_ptr<CWallet>& wallet : vpwallets) {
+ LOCK(context.wallets_mutex);
+ for (const std::shared_ptr<CWallet>& wallet : context.wallets) {
if (wallet->GetName() == name) return wallet;
}
return nullptr;
}
-std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet)
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(WalletContext& context, LoadWalletFn load_wallet)
{
- LOCK(cs_wallets);
- auto it = g_load_wallet_fns.emplace(g_load_wallet_fns.end(), std::move(load_wallet));
- return interfaces::MakeHandler([it] { LOCK(cs_wallets); g_load_wallet_fns.erase(it); });
+ LOCK(context.wallets_mutex);
+ auto it = context.wallet_load_fns.emplace(context.wallet_load_fns.end(), std::move(load_wallet));
+ return interfaces::MakeHandler([&context, it] { LOCK(context.wallets_mutex); context.wallet_load_fns.erase(it); });
}
static Mutex g_loading_wallet_mutex;
@@ -203,7 +210,7 @@ void UnloadWallet(std::shared_ptr<CWallet>&& wallet)
}
namespace {
-std::shared_ptr<CWallet> LoadWalletInternal(interfaces::Chain& chain, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
+std::shared_ptr<CWallet> LoadWalletInternal(WalletContext& context, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
{
try {
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(name, options, status, error);
@@ -212,18 +219,18 @@ std::shared_ptr<CWallet> LoadWalletInternal(interfaces::Chain& chain, const std:
return nullptr;
}
- chain.initMessage(_("Loading wallet…").translated);
- std::shared_ptr<CWallet> wallet = CWallet::Create(&chain, name, std::move(database), options.create_flags, error, warnings);
+ context.chain->initMessage(_("Loading wallet…").translated);
+ std::shared_ptr<CWallet> wallet = CWallet::Create(context, name, std::move(database), options.create_flags, error, warnings);
if (!wallet) {
error = Untranslated("Wallet loading failed.") + Untranslated(" ") + error;
status = DatabaseStatus::FAILED_LOAD;
return nullptr;
}
- AddWallet(wallet);
+ AddWallet(context, wallet);
wallet->postInitProcess();
// Write the wallet setting
- UpdateWalletSetting(chain, name, load_on_start, warnings);
+ UpdateWalletSetting(*context.chain, name, load_on_start, warnings);
return wallet;
} catch (const std::runtime_error& e) {
@@ -234,7 +241,7 @@ std::shared_ptr<CWallet> LoadWalletInternal(interfaces::Chain& chain, const std:
}
} // namespace
-std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
+std::shared_ptr<CWallet> LoadWallet(WalletContext& context, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
{
auto result = WITH_LOCK(g_loading_wallet_mutex, return g_loading_wallet_set.insert(name));
if (!result.second) {
@@ -242,12 +249,12 @@ std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string&
status = DatabaseStatus::FAILED_LOAD;
return nullptr;
}
- auto wallet = LoadWalletInternal(chain, name, load_on_start, options, status, error, warnings);
+ auto wallet = LoadWalletInternal(context, name, load_on_start, options, status, error, warnings);
WITH_LOCK(g_loading_wallet_mutex, g_loading_wallet_set.erase(result.first));
return wallet;
}
-std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, std::optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
+std::shared_ptr<CWallet> CreateWallet(WalletContext& context, const std::string& name, std::optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings)
{
uint64_t wallet_creation_flags = options.create_flags;
const SecureString& passphrase = options.create_passphrase;
@@ -292,8 +299,8 @@ std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::strin
}
// Make the wallet
- chain.initMessage(_("Loading wallet…").translated);
- std::shared_ptr<CWallet> wallet = CWallet::Create(&chain, name, std::move(database), wallet_creation_flags, error, warnings);
+ context.chain->initMessage(_("Loading wallet…").translated);
+ std::shared_ptr<CWallet> wallet = CWallet::Create(context, name, std::move(database), wallet_creation_flags, error, warnings);
if (!wallet) {
error = Untranslated("Wallet creation failed.") + Untranslated(" ") + error;
status = DatabaseStatus::FAILED_CREATE;
@@ -335,11 +342,11 @@ std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::strin
wallet->Lock();
}
}
- AddWallet(wallet);
+ AddWallet(context, wallet);
wallet->postInitProcess();
// Write the wallet settings
- UpdateWalletSetting(chain, name, load_on_start, warnings);
+ UpdateWalletSetting(*context.chain, name, load_on_start, warnings);
status = DatabaseStatus::SUCCESS;
return wallet;
@@ -574,7 +581,7 @@ bool CWallet::IsSpent(const uint256& hash, unsigned int n) const
const uint256& wtxid = it->second;
std::map<uint256, CWalletTx>::const_iterator mit = mapWallet.find(wtxid);
if (mit != mapWallet.end()) {
- int depth = mit->second.GetDepthInMainChain();
+ int depth = GetTxDepthInMainChain(mit->second);
if (depth > 0 || (depth == 0 && !mit->second.isAbandoned()))
return true; // Spent
}
@@ -803,10 +810,7 @@ bool CWallet::MarkReplaced(const uint256& originalHash, const uint256& newHash)
wtx.mapValue["replaced_by_txid"] = newHash.ToString();
// Refresh mempool status without waiting for transactionRemovedFromMempool
- // notification so the wallet is in an internally consistent state and
- // immediately knows the old transaction should not be considered trusted
- // and is eligible to be abandoned
- wtx.fInMempool = chain().isInMempool(originalHash);
+ RefreshMempoolStatus(wtx, chain());
WalletBatch batch(GetDatabase());
@@ -896,7 +900,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const CWalletTx::Confirmatio
}
// Inserts only if not already there, returns tx inserted or tx found
- auto ret = mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(hash), std::forward_as_tuple(this, tx));
+ auto ret = mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(hash), std::forward_as_tuple(tx));
CWalletTx& wtx = (*ret.first).second;
bool fInsertedNew = ret.second;
bool fUpdated = update_wtx && update_wtx(wtx, fInsertedNew);
@@ -980,7 +984,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const CWalletTx::Confirmatio
bool CWallet::LoadToWallet(const uint256& hash, const UpdateWalletTxFn& fill_wtx)
{
- const auto& ins = mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(hash), std::forward_as_tuple(this, nullptr));
+ const auto& ins = mapWallet.emplace(std::piecewise_construct, std::forward_as_tuple(hash), std::forward_as_tuple(nullptr));
CWalletTx& wtx = ins.first->second;
if (!fill_wtx(wtx, ins.second)) {
return false;
@@ -1070,7 +1074,7 @@ bool CWallet::TransactionCanBeAbandoned(const uint256& hashTx) const
{
LOCK(cs_wallet);
const CWalletTx* wtx = GetWalletTx(hashTx);
- return wtx && !wtx->isAbandoned() && wtx->GetDepthInMainChain() == 0 && !wtx->InMempool();
+ return wtx && !wtx->isAbandoned() && GetTxDepthInMainChain(*wtx) == 0 && !wtx->InMempool();
}
void CWallet::MarkInputsDirty(const CTransactionRef& tx)
@@ -1096,7 +1100,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
auto it = mapWallet.find(hashTx);
assert(it != mapWallet.end());
const CWalletTx& origtx = it->second;
- if (origtx.GetDepthInMainChain() != 0 || origtx.InMempool()) {
+ if (GetTxDepthInMainChain(origtx) != 0 || origtx.InMempool()) {
return false;
}
@@ -1109,7 +1113,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
auto it = mapWallet.find(now);
assert(it != mapWallet.end());
CWalletTx& wtx = it->second;
- int currentconfirm = wtx.GetDepthInMainChain();
+ int currentconfirm = GetTxDepthInMainChain(wtx);
// If the orig tx was not in block, none of its spends can be
assert(currentconfirm <= 0);
// if (currentconfirm < 0) {Tx and spends are already conflicted, no need to abandon}
@@ -1164,7 +1168,7 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
auto it = mapWallet.find(now);
assert(it != mapWallet.end());
CWalletTx& wtx = it->second;
- int currentconfirm = wtx.GetDepthInMainChain();
+ int currentconfirm = GetTxDepthInMainChain(wtx);
if (conflictconfirms < currentconfirm) {
// Block is 'more conflicted' than current confirm; update.
// Mark transaction as conflicted with this block.
@@ -1206,7 +1210,7 @@ void CWallet::transactionAddedToMempool(const CTransactionRef& tx, uint64_t memp
auto it = mapWallet.find(tx->GetHash());
if (it != mapWallet.end()) {
- it->second.fInMempool = true;
+ RefreshMempoolStatus(it->second, chain());
}
}
@@ -1214,7 +1218,7 @@ void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRe
LOCK(cs_wallet);
auto it = mapWallet.find(tx->GetHash());
if (it != mapWallet.end()) {
- it->second.fInMempool = false;
+ RefreshMempoolStatus(it->second, chain());
}
// Handle transactions that were removed from the mempool because they
// conflict with transactions in a newly connected block.
@@ -1360,9 +1364,10 @@ CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) co
bool CWallet::IsHDEnabled() const
{
// All Active ScriptPubKeyMans must be HD for this to be true
- bool result = true;
+ bool result = false;
for (const auto& spk_man : GetActiveScriptPubKeyMans()) {
- result &= spk_man->IsHDEnabled();
+ if (!spk_man->IsHDEnabled()) return false;
+ result = true;
}
return result;
}
@@ -1693,7 +1698,7 @@ void CWallet::ReacceptWalletTransactions()
CWalletTx& wtx = item.second;
assert(wtx.GetHash() == wtxid);
- int nDepth = wtx.GetDepthInMainChain();
+ int nDepth = GetTxDepthInMainChain(wtx);
if (!wtx.IsCoinBase() && (nDepth == 0 && !wtx.isAbandoned())) {
mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx));
@@ -1704,24 +1709,24 @@ void CWallet::ReacceptWalletTransactions()
for (const std::pair<const int64_t, CWalletTx*>& item : mapSorted) {
CWalletTx& wtx = *(item.second);
std::string unused_err_string;
- wtx.SubmitMemoryPoolAndRelay(unused_err_string, false);
+ SubmitTxMemoryPoolAndRelay(wtx, unused_err_string, false);
}
}
-bool CWalletTx::SubmitMemoryPoolAndRelay(std::string& err_string, bool relay)
+bool CWallet::SubmitTxMemoryPoolAndRelay(const CWalletTx& wtx, std::string& err_string, bool relay) const
{
// Can't relay if wallet is not broadcasting
- if (!pwallet->GetBroadcastTransactions()) return false;
+ if (!GetBroadcastTransactions()) return false;
// Don't relay abandoned transactions
- if (isAbandoned()) return false;
+ if (wtx.isAbandoned()) return false;
// Don't try to submit coinbase transactions. These would fail anyway but would
// cause log spam.
- if (IsCoinBase()) return false;
+ if (wtx.IsCoinBase()) return false;
// Don't try to submit conflicted or confirmed transactions.
- if (GetDepthInMainChain() != 0) return false;
+ if (GetTxDepthInMainChain(wtx) != 0) return false;
// Submit transaction to mempool for relay
- pwallet->WalletLogPrintf("Submitting wtx %s to mempool for relay\n", GetHash().ToString());
+ WalletLogPrintf("Submitting wtx %s to mempool for relay\n", wtx.GetHash().ToString());
// We must set fInMempool here - while it will be re-set to true by the
// entered-mempool callback, if we did not there would be a race where a
// user could call sendmoney in a loop and hit spurious out of funds errors
@@ -1731,18 +1736,17 @@ bool CWalletTx::SubmitMemoryPoolAndRelay(std::string& err_string, bool relay)
// Irrespective of the failure reason, un-marking fInMempool
// out-of-order is incorrect - it should be unmarked when
// TransactionRemovedFromMempool fires.
- bool ret = pwallet->chain().broadcastTransaction(tx, pwallet->m_default_max_tx_fee, relay, err_string);
- fInMempool |= ret;
+ bool ret = chain().broadcastTransaction(wtx.tx, m_default_max_tx_fee, relay, err_string);
+ wtx.fInMempool |= ret;
return ret;
}
-std::set<uint256> CWalletTx::GetConflicts() const
+std::set<uint256> CWallet::GetTxConflicts(const CWalletTx& wtx) const
{
std::set<uint256> result;
- if (pwallet != nullptr)
{
- uint256 myHash = GetHash();
- result = pwallet->GetConflicts(myHash);
+ uint256 myHash = wtx.GetHash();
+ result = GetConflicts(myHash);
result.erase(myHash);
}
return result;
@@ -1780,11 +1784,11 @@ void CWallet::ResendWalletTransactions()
for (std::pair<const uint256, CWalletTx>& item : mapWallet) {
CWalletTx& wtx = item.second;
// Attempt to rebroadcast all txes more than 5 minutes older than
- // the last block. SubmitMemoryPoolAndRelay() will not rebroadcast
+ // the last block. SubmitTxMemoryPoolAndRelay() will not rebroadcast
// any confirmed or conflicting txs.
if (wtx.nTimeReceived > m_best_block_time - 5 * 60) continue;
std::string unused_err_string;
- if (wtx.SubmitMemoryPoolAndRelay(unused_err_string, true)) ++submitted_tx_count;
+ if (SubmitTxMemoryPoolAndRelay(wtx, unused_err_string, true)) ++submitted_tx_count;
}
} // cs_wallet
@@ -1795,9 +1799,9 @@ void CWallet::ResendWalletTransactions()
/** @} */ // end of mapWallet
-void MaybeResendWalletTxs()
+void MaybeResendWalletTxs(WalletContext& context)
{
- for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) {
+ for (const std::shared_ptr<CWallet>& pwallet : GetWallets(context)) {
pwallet->ResendWalletTransactions();
}
}
@@ -1822,11 +1826,11 @@ bool CWallet::SignTransaction(CMutableTransaction& tx) const
const CWalletTx& wtx = mi->second;
coins[input.prevout] = Coin(wtx.tx->vout[input.prevout.n], wtx.m_confirm.block_height, wtx.IsCoinBase());
}
- std::map<int, std::string> input_errors;
+ std::map<int, bilingual_str> input_errors;
return SignTransaction(tx, coins, SIGHASH_DEFAULT, input_errors);
}
-bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const
{
// Try to sign with all ScriptPubKeyMans
for (ScriptPubKeyMan* spk_man : GetAllScriptPubKeyMans()) {
@@ -1972,7 +1976,7 @@ void CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::ve
}
std::string err_string;
- if (!wtx.SubmitMemoryPoolAndRelay(err_string, true)) {
+ if (!SubmitTxMemoryPoolAndRelay(wtx, err_string, true)) {
WalletLogPrintf("CommitTransaction(): Transaction cannot be broadcast immediately, %s\n", err_string);
// TODO: if we expect the failure to be long term or permanent, instead delete wtx from the wallet and return failure.
}
@@ -2128,7 +2132,7 @@ bool CWallet::TopUpKeyPool(unsigned int kpSize)
return res;
}
-bool CWallet::GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, std::string& error)
+bool CWallet::GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, bilingual_str& error)
{
LOCK(cs_wallet);
error.clear();
@@ -2138,7 +2142,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label,
spk_man->TopUp();
result = spk_man->GetNewDestination(type, dest, error);
} else {
- error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type)).translated;
+ error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type));
}
if (result) {
SetAddressBook(dest, label, "receive");
@@ -2147,7 +2151,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label,
return result;
}
-bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error)
+bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& dest, bilingual_str& error)
{
LOCK(cs_wallet);
error.clear();
@@ -2187,7 +2191,7 @@ void CWallet::MarkDestinationsDirty(const std::set<CTxDestination>& destinations
std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) const
{
- LOCK(cs_wallet);
+ AssertLockHeld(cs_wallet);
std::set<CTxDestination> result;
for (const std::pair<const CTxDestination, CAddressBookData>& item : m_address_book)
{
@@ -2200,11 +2204,11 @@ std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) co
return result;
}
-bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal, std::string& error)
+bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal, bilingual_str& error)
{
m_spk_man = pwallet->GetScriptPubKeyMan(type, internal);
if (!m_spk_man) {
- error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type)).translated;
+ error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type));
return false;
}
@@ -2298,44 +2302,48 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64_t>& mapKeyBirth) const {
AssertLockHeld(cs_wallet);
mapKeyBirth.clear();
- LegacyScriptPubKeyMan* spk_man = GetLegacyScriptPubKeyMan();
- assert(spk_man != nullptr);
- LOCK(spk_man->cs_KeyStore);
-
- // get birth times for keys with metadata
- for (const auto& entry : spk_man->mapKeyMetadata) {
- if (entry.second.nCreateTime) {
- mapKeyBirth[entry.first] = entry.second.nCreateTime;
- }
- }
-
// map in which we'll infer heights of other keys
std::map<CKeyID, const CWalletTx::Confirmation*> mapKeyFirstBlock;
CWalletTx::Confirmation max_confirm;
max_confirm.block_height = GetLastBlockHeight() > 144 ? GetLastBlockHeight() - 144 : 0; // the tip can be reorganized; use a 144-block safety margin
CHECK_NONFATAL(chain().findAncestorByHeight(GetLastBlockHash(), max_confirm.block_height, FoundBlock().hash(max_confirm.hashBlock)));
- for (const CKeyID &keyid : spk_man->GetKeys()) {
- if (mapKeyBirth.count(keyid) == 0)
- mapKeyFirstBlock[keyid] = &max_confirm;
- }
- // if there are no such keys, we're done
- if (mapKeyFirstBlock.empty())
- return;
+ {
+ LegacyScriptPubKeyMan* spk_man = GetLegacyScriptPubKeyMan();
+ assert(spk_man != nullptr);
+ LOCK(spk_man->cs_KeyStore);
+
+ // get birth times for keys with metadata
+ for (const auto& entry : spk_man->mapKeyMetadata) {
+ if (entry.second.nCreateTime) {
+ mapKeyBirth[entry.first] = entry.second.nCreateTime;
+ }
+ }
+
+ // Prepare to infer birth heights for keys without metadata
+ for (const CKeyID &keyid : spk_man->GetKeys()) {
+ if (mapKeyBirth.count(keyid) == 0)
+ mapKeyFirstBlock[keyid] = &max_confirm;
+ }
- // find first block that affects those keys, if there are any left
- for (const auto& entry : mapWallet) {
- // iterate over all wallet transactions...
- const CWalletTx &wtx = entry.second;
- if (wtx.m_confirm.status == CWalletTx::CONFIRMED) {
- // ... which are already in a block
- for (const CTxOut &txout : wtx.tx->vout) {
- // iterate over all their outputs
- for (const auto &keyid : GetAffectedKeys(txout.scriptPubKey, *spk_man)) {
- // ... and all their affected keys
- auto rit = mapKeyFirstBlock.find(keyid);
- if (rit != mapKeyFirstBlock.end() && wtx.m_confirm.block_height < rit->second->block_height) {
- rit->second = &wtx.m_confirm;
+ // if there are no such keys, we're done
+ if (mapKeyFirstBlock.empty())
+ return;
+
+ // find first block that affects those keys, if there are any left
+ for (const auto& entry : mapWallet) {
+ // iterate over all wallet transactions...
+ const CWalletTx &wtx = entry.second;
+ if (wtx.m_confirm.status == CWalletTx::CONFIRMED) {
+ // ... which are already in a block
+ for (const CTxOut &txout : wtx.tx->vout) {
+ // iterate over all their outputs
+ for (const auto &keyid : GetAffectedKeys(txout.scriptPubKey, *spk_man)) {
+ // ... and all their affected keys
+ auto rit = mapKeyFirstBlock.find(keyid);
+ if (rit != mapKeyFirstBlock.end() && wtx.m_confirm.block_height < rit->second->block_height) {
+ rit->second = &wtx.m_confirm;
+ }
}
}
}
@@ -2498,8 +2506,10 @@ std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, cons
return MakeDatabase(wallet_path, options, status, error_string);
}
-std::shared_ptr<CWallet> CWallet::Create(interfaces::Chain* chain, const std::string& name, std::unique_ptr<WalletDatabase> database, uint64_t wallet_creation_flags, bilingual_str& error, std::vector<bilingual_str>& warnings)
+std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::string& name, std::unique_ptr<WalletDatabase> database, uint64_t wallet_creation_flags, bilingual_str& error, std::vector<bilingual_str>& warnings)
{
+ interfaces::Chain* chain = context.chain;
+ ArgsManager& args = *Assert(context.args);
const std::string& walletFile = database->Filename();
int64_t nStart = GetTimeMillis();
@@ -2581,111 +2591,124 @@ std::shared_ptr<CWallet> CWallet::Create(interfaces::Chain* chain, const std::st
}
}
- if (!gArgs.GetArg("-addresstype", "").empty()) {
- if (!ParseOutputType(gArgs.GetArg("-addresstype", ""), walletInstance->m_default_address_type)) {
- error = strprintf(_("Unknown address type '%s'"), gArgs.GetArg("-addresstype", ""));
+ if (!args.GetArg("-addresstype", "").empty()) {
+ std::optional<OutputType> parsed = ParseOutputType(args.GetArg("-addresstype", ""));
+ if (!parsed) {
+ error = strprintf(_("Unknown address type '%s'"), args.GetArg("-addresstype", ""));
return nullptr;
}
+ walletInstance->m_default_address_type = parsed.value();
}
- if (!gArgs.GetArg("-changetype", "").empty()) {
- OutputType out_type;
- if (!ParseOutputType(gArgs.GetArg("-changetype", ""), out_type)) {
- error = strprintf(_("Unknown change type '%s'"), gArgs.GetArg("-changetype", ""));
+ if (!args.GetArg("-changetype", "").empty()) {
+ std::optional<OutputType> parsed = ParseOutputType(args.GetArg("-changetype", ""));
+ if (!parsed) {
+ error = strprintf(_("Unknown change type '%s'"), args.GetArg("-changetype", ""));
return nullptr;
}
- walletInstance->m_default_change_type = out_type;
+ walletInstance->m_default_change_type = parsed.value();
}
- if (gArgs.IsArgSet("-mintxfee")) {
- CAmount n = 0;
- if (!ParseMoney(gArgs.GetArg("-mintxfee", ""), n) || 0 == n) {
- error = AmountErrMsg("mintxfee", gArgs.GetArg("-mintxfee", ""));
+ if (args.IsArgSet("-mintxfee")) {
+ std::optional<CAmount> min_tx_fee = ParseMoney(args.GetArg("-mintxfee", ""));
+ if (!min_tx_fee || min_tx_fee.value() == 0) {
+ error = AmountErrMsg("mintxfee", args.GetArg("-mintxfee", ""));
return nullptr;
- }
- if (n > HIGH_TX_FEE_PER_KB) {
+ } else if (min_tx_fee.value() > HIGH_TX_FEE_PER_KB) {
warnings.push_back(AmountHighWarn("-mintxfee") + Untranslated(" ") +
_("This is the minimum transaction fee you pay on every transaction."));
}
- walletInstance->m_min_fee = CFeeRate(n);
+
+ walletInstance->m_min_fee = CFeeRate{min_tx_fee.value()};
}
- if (gArgs.IsArgSet("-maxapsfee")) {
- const std::string max_aps_fee{gArgs.GetArg("-maxapsfee", "")};
- CAmount n = 0;
+ if (args.IsArgSet("-maxapsfee")) {
+ const std::string max_aps_fee{args.GetArg("-maxapsfee", "")};
if (max_aps_fee == "-1") {
- n = -1;
- } else if (!ParseMoney(max_aps_fee, n)) {
+ walletInstance->m_max_aps_fee = -1;
+ } else if (std::optional<CAmount> max_fee = ParseMoney(max_aps_fee)) {
+ if (max_fee.value() > HIGH_APS_FEE) {
+ warnings.push_back(AmountHighWarn("-maxapsfee") + Untranslated(" ") +
+ _("This is the maximum transaction fee you pay (in addition to the normal fee) to prioritize partial spend avoidance over regular coin selection."));
+ }
+ walletInstance->m_max_aps_fee = max_fee.value();
+ } else {
error = AmountErrMsg("maxapsfee", max_aps_fee);
return nullptr;
}
- if (n > HIGH_APS_FEE) {
- warnings.push_back(AmountHighWarn("-maxapsfee") + Untranslated(" ") +
- _("This is the maximum transaction fee you pay (in addition to the normal fee) to prioritize partial spend avoidance over regular coin selection."));
- }
- walletInstance->m_max_aps_fee = n;
}
- if (gArgs.IsArgSet("-fallbackfee")) {
- CAmount nFeePerK = 0;
- if (!ParseMoney(gArgs.GetArg("-fallbackfee", ""), nFeePerK)) {
- error = strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"), gArgs.GetArg("-fallbackfee", ""));
+ if (args.IsArgSet("-fallbackfee")) {
+ std::optional<CAmount> fallback_fee = ParseMoney(args.GetArg("-fallbackfee", ""));
+ if (!fallback_fee) {
+ error = strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"), args.GetArg("-fallbackfee", ""));
return nullptr;
- }
- if (nFeePerK > HIGH_TX_FEE_PER_KB) {
+ } else if (fallback_fee.value() > HIGH_TX_FEE_PER_KB) {
warnings.push_back(AmountHighWarn("-fallbackfee") + Untranslated(" ") +
_("This is the transaction fee you may pay when fee estimates are not available."));
}
- walletInstance->m_fallback_fee = CFeeRate(nFeePerK);
+ walletInstance->m_fallback_fee = CFeeRate{fallback_fee.value()};
}
+
// Disable fallback fee in case value was set to 0, enable if non-null value
walletInstance->m_allow_fallback_fee = walletInstance->m_fallback_fee.GetFeePerK() != 0;
- if (gArgs.IsArgSet("-discardfee")) {
- CAmount nFeePerK = 0;
- if (!ParseMoney(gArgs.GetArg("-discardfee", ""), nFeePerK)) {
- error = strprintf(_("Invalid amount for -discardfee=<amount>: '%s'"), gArgs.GetArg("-discardfee", ""));
+ if (args.IsArgSet("-discardfee")) {
+ std::optional<CAmount> discard_fee = ParseMoney(args.GetArg("-discardfee", ""));
+ if (!discard_fee) {
+ error = strprintf(_("Invalid amount for -discardfee=<amount>: '%s'"), args.GetArg("-discardfee", ""));
return nullptr;
- }
- if (nFeePerK > HIGH_TX_FEE_PER_KB) {
+ } else if (discard_fee.value() > HIGH_TX_FEE_PER_KB) {
warnings.push_back(AmountHighWarn("-discardfee") + Untranslated(" ") +
_("This is the transaction fee you may discard if change is smaller than dust at this level"));
}
- walletInstance->m_discard_rate = CFeeRate(nFeePerK);
+ walletInstance->m_discard_rate = CFeeRate{discard_fee.value()};
}
- if (gArgs.IsArgSet("-paytxfee")) {
- CAmount nFeePerK = 0;
- if (!ParseMoney(gArgs.GetArg("-paytxfee", ""), nFeePerK)) {
- error = AmountErrMsg("paytxfee", gArgs.GetArg("-paytxfee", ""));
+
+ if (args.IsArgSet("-paytxfee")) {
+ std::optional<CAmount> pay_tx_fee = ParseMoney(args.GetArg("-paytxfee", ""));
+ if (!pay_tx_fee) {
+ error = AmountErrMsg("paytxfee", args.GetArg("-paytxfee", ""));
return nullptr;
- }
- if (nFeePerK > HIGH_TX_FEE_PER_KB) {
+ } else if (pay_tx_fee.value() > HIGH_TX_FEE_PER_KB) {
warnings.push_back(AmountHighWarn("-paytxfee") + Untranslated(" ") +
_("This is the transaction fee you will pay if you send a transaction."));
}
- walletInstance->m_pay_tx_fee = CFeeRate(nFeePerK, 1000);
+
+ walletInstance->m_pay_tx_fee = CFeeRate{pay_tx_fee.value(), 1000};
+
if (chain && walletInstance->m_pay_tx_fee < chain->relayMinFee()) {
error = strprintf(_("Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)"),
- gArgs.GetArg("-paytxfee", ""), chain->relayMinFee().ToString());
+ args.GetArg("-paytxfee", ""), chain->relayMinFee().ToString());
return nullptr;
}
}
- if (gArgs.IsArgSet("-maxtxfee")) {
- CAmount nMaxFee = 0;
- if (!ParseMoney(gArgs.GetArg("-maxtxfee", ""), nMaxFee)) {
- error = AmountErrMsg("maxtxfee", gArgs.GetArg("-maxtxfee", ""));
+ if (args.IsArgSet("-maxtxfee")) {
+ std::optional<CAmount> max_fee = ParseMoney(args.GetArg("-maxtxfee", ""));
+ if (!max_fee) {
+ error = AmountErrMsg("maxtxfee", args.GetArg("-maxtxfee", ""));
return nullptr;
- }
- if (nMaxFee > HIGH_MAX_TX_FEE) {
+ } else if (max_fee.value() > HIGH_MAX_TX_FEE) {
warnings.push_back(_("-maxtxfee is set very high! Fees this large could be paid on a single transaction."));
}
- if (chain && CFeeRate(nMaxFee, 1000) < chain->relayMinFee()) {
+
+ if (chain && CFeeRate{max_fee.value(), 1000} < chain->relayMinFee()) {
error = strprintf(_("Invalid amount for -maxtxfee=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)"),
- gArgs.GetArg("-maxtxfee", ""), chain->relayMinFee().ToString());
+ args.GetArg("-maxtxfee", ""), chain->relayMinFee().ToString());
+ return nullptr;
+ }
+
+ walletInstance->m_default_max_tx_fee = max_fee.value();
+ }
+
+ if (gArgs.IsArgSet("-consolidatefeerate")) {
+ if (std::optional<CAmount> consolidate_feerate = ParseMoney(gArgs.GetArg("-consolidatefeerate", ""))) {
+ walletInstance->m_consolidate_feerate = CFeeRate(*consolidate_feerate);
+ } else {
+ error = AmountErrMsg("consolidatefeerate", gArgs.GetArg("-consolidatefeerate", ""));
return nullptr;
}
- walletInstance->m_default_max_tx_fee = nMaxFee;
}
if (chain && chain->relayMinFee().GetFeePerK() > HIGH_TX_FEE_PER_KB) {
@@ -2693,9 +2716,9 @@ std::shared_ptr<CWallet> CWallet::Create(interfaces::Chain* chain, const std::st
_("The wallet will avoid paying less than the minimum relay fee."));
}
- walletInstance->m_confirm_target = gArgs.GetArg("-txconfirmtarget", DEFAULT_TX_CONFIRM_TARGET);
- walletInstance->m_spend_zero_conf_change = gArgs.GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE);
- walletInstance->m_signal_rbf = gArgs.GetBoolArg("-walletrbf", DEFAULT_WALLET_RBF);
+ walletInstance->m_confirm_target = args.GetArg("-txconfirmtarget", DEFAULT_TX_CONFIRM_TARGET);
+ walletInstance->m_spend_zero_conf_change = args.GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE);
+ walletInstance->m_signal_rbf = args.GetBoolArg("-walletrbf", DEFAULT_WALLET_RBF);
walletInstance->WalletLogPrintf("Wallet completed loading in %15dms\n", GetTimeMillis() - nStart);
@@ -2709,13 +2732,13 @@ std::shared_ptr<CWallet> CWallet::Create(interfaces::Chain* chain, const std::st
}
{
- LOCK(cs_wallets);
- for (auto& load_wallet : g_load_wallet_fns) {
- load_wallet(interfaces::MakeWallet(walletInstance));
+ LOCK(context.wallets_mutex);
+ for (auto& load_wallet : context.wallet_load_fns) {
+ load_wallet(interfaces::MakeWallet(context, walletInstance));
}
}
- walletInstance->SetBroadcastTransactions(gArgs.GetBoolArg("-walletbroadcast", DEFAULT_WALLETBROADCAST));
+ walletInstance->SetBroadcastTransactions(args.GetBoolArg("-walletbroadcast", DEFAULT_WALLETBROADCAST));
{
walletInstance->WalletLogPrintf("setKeyPool.size() = %u\n", walletInstance->GetKeyPoolSize());
@@ -2887,28 +2910,27 @@ CKeyPool::CKeyPool(const CPubKey& vchPubKeyIn, bool internalIn)
m_pre_split = false;
}
-int CWalletTx::GetDepthInMainChain() const
+int CWallet::GetTxDepthInMainChain(const CWalletTx& wtx) const
{
- assert(pwallet != nullptr);
- AssertLockHeld(pwallet->cs_wallet);
- if (isUnconfirmed() || isAbandoned()) return 0;
+ AssertLockHeld(cs_wallet);
+ if (wtx.isUnconfirmed() || wtx.isAbandoned()) return 0;
- return (pwallet->GetLastBlockHeight() - m_confirm.block_height + 1) * (isConflicted() ? -1 : 1);
+ return (GetLastBlockHeight() - wtx.m_confirm.block_height + 1) * (wtx.isConflicted() ? -1 : 1);
}
-int CWalletTx::GetBlocksToMaturity() const
+int CWallet::GetTxBlocksToMaturity(const CWalletTx& wtx) const
{
- if (!IsCoinBase())
+ if (!wtx.IsCoinBase())
return 0;
- int chain_depth = GetDepthInMainChain();
+ int chain_depth = GetTxDepthInMainChain(wtx);
assert(chain_depth >= 0); // coinbase tx should not be conflicted
return std::max(0, (COINBASE_MATURITY+1) - chain_depth);
}
-bool CWalletTx::IsImmatureCoinBase() const
+bool CWallet::IsTxImmatureCoinBase(const CWalletTx& wtx) const
{
// note GetBlocksToMaturity is 0 for non-coinbase tx
- return GetBlocksToMaturity() > 0;
+ return GetTxBlocksToMaturity(wtx) > 0;
}
bool CWallet::IsCrypted() const
@@ -3230,12 +3252,13 @@ DescriptorScriptPubKeyMan* CWallet::GetDescriptorScriptPubKeyMan(const WalletDes
ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label, bool internal)
{
+ AssertLockHeld(cs_wallet);
+
if (!IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
WalletLogPrintf("Cannot add WalletDescriptor to a non-descriptor wallet\n");
return nullptr;
}
- LOCK(cs_wallet);
auto spk_man = GetDescriptorScriptPubKeyMan(desc);
if (spk_man) {
WalletLogPrintf("Update existing descriptor: %s\n", desc.descriptor->ToString());
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 3997751f52..2dc9eff712 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -21,9 +21,7 @@
#include <validationinterface.h>
#include <wallet/coinselection.h>
#include <wallet/crypter.h>
-#include <wallet/receive.h>
#include <wallet/scriptpubkeyman.h>
-#include <wallet/spend.h>
#include <wallet/transaction.h>
#include <wallet/walletdb.h>
#include <wallet/walletutil.h>
@@ -42,6 +40,8 @@
#include <boost/signals2/signal.hpp>
+struct WalletContext;
+
using LoadWalletFn = std::function<void(std::unique_ptr<interfaces::Wallet> wallet)>;
struct bilingual_str;
@@ -53,14 +53,14 @@ struct bilingual_str;
//! by the shared pointer deleter.
void UnloadWallet(std::shared_ptr<CWallet>&& wallet);
-bool AddWallet(const std::shared_ptr<CWallet>& wallet);
-bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start, std::vector<bilingual_str>& warnings);
-bool RemoveWallet(const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start);
-std::vector<std::shared_ptr<CWallet>> GetWallets();
-std::shared_ptr<CWallet> GetWallet(const std::string& name);
-std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings);
-std::shared_ptr<CWallet> CreateWallet(interfaces::Chain& chain, const std::string& name, std::optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings);
-std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet);
+bool AddWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet);
+bool RemoveWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start, std::vector<bilingual_str>& warnings);
+bool RemoveWallet(WalletContext& context, const std::shared_ptr<CWallet>& wallet, std::optional<bool> load_on_start);
+std::vector<std::shared_ptr<CWallet>> GetWallets(WalletContext& context);
+std::shared_ptr<CWallet> GetWallet(WalletContext& context, const std::string& name);
+std::shared_ptr<CWallet> LoadWallet(WalletContext& context, const std::string& name, std::optional<bool> load_on_start, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings);
+std::shared_ptr<CWallet> CreateWallet(WalletContext& context, const std::string& name, std::optional<bool> load_on_start, DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error, std::vector<bilingual_str>& warnings);
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(WalletContext& context, LoadWalletFn load_wallet);
std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
//! -paytxfee default
@@ -71,6 +71,8 @@ static const CAmount DEFAULT_FALLBACK_FEE = 0;
static const CAmount DEFAULT_DISCARD_FEE = 10000;
//! -mintxfee default
static const CAmount DEFAULT_TRANSACTION_MINFEE = 1000;
+//! -consolidatefeerate default
+static const CAmount DEFAULT_CONSOLIDATE_FEERATE{10000}; // 10 sat/vbyte
/**
* maximum fee increase allowed to do partial spend avoidance, even for nodes with this feature disabled by default
*
@@ -183,7 +185,7 @@ public:
}
//! Reserve an address
- bool GetReservedDestination(CTxDestination& pubkey, bool internal, std::string& error);
+ bool GetReservedDestination(CTxDestination& pubkey, bool internal, bilingual_str& error);
//! Return reserved address
void ReturnDestination();
//! Keep the address. Do not return it's key to the keypool when this object goes out of scope
@@ -327,8 +329,6 @@ private:
// ScriptPubKeyMan::GetID. In many cases it will be the hash of an internal structure
std::map<uint256, std::unique_ptr<ScriptPubKeyMan>> m_spk_managers;
- bool CreateTransactionInternal(const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, const CCoinControl& coin_control, FeeCalculation& fee_calc_out, bool sign) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
-
/**
* Catch wallet up to current chain, scanning new blocks, updating the best
* block locator and m_last_block_processed, and registering for
@@ -349,17 +349,6 @@ public:
return *m_database;
}
- /**
- * Select a set of coins such that nValueRet >= nTargetValue and at least
- * all coins from coin_control are selected; never select unconfirmed coins if they are not ours
- * param@[out] setCoinsRet Populated with inputs including pre-selected inputs from
- * coin_control and Coin Selection if successful.
- * param@[out] nValueRet Total value of selected coins including pre-selected ones
- * from coin_control and Coin Selection if successful.
- */
- bool SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet,
- const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
-
/** Get a name for this wallet for logging/debugging purposes.
*/
const std::string& GetName() const { return m_name; }
@@ -415,39 +404,40 @@ public:
interfaces::Chain& chain() const { assert(m_chain); return *m_chain; }
const CWalletTx* GetWalletTx(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool IsTrusted(const CWalletTx& wtx, std::set<uint256>& trusted_parents) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
-
- //! check whether we support the named feature
- bool CanSupportFeature(enum WalletFeature wf) const override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); return IsFeatureSupported(nWalletVersion, wf); }
- /**
- * populate vCoins with vector of available COutputs.
- */
- void AvailableCoins(std::vector<COutput>& vCoins, const CCoinControl* coinControl = nullptr, const CAmount& nMinimumAmount = 1, const CAmount& nMaximumAmount = MAX_MONEY, const CAmount& nMinimumSumAmount = MAX_MONEY, const uint64_t nMaximumCount = 0) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
+ // annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The annotation
+ // "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid having to
+ // resolve the issue of member access into incomplete type CWallet. Note
+ // that we still have the runtime check "AssertLockHeld(pwallet->cs_wallet)"
+ // in place.
+ std::set<uint256> GetTxConflicts(const CWalletTx& wtx) const NO_THREAD_SAFETY_ANALYSIS;
/**
- * Return list of available coins and locked coins grouped by non-change output address.
+ * Return depth of transaction in blockchain:
+ * <0 : conflicts with a transaction this deep in the blockchain
+ * 0 : in memory pool, waiting to be included in a block
+ * >=1 : this many blocks deep in the main chain
*/
- std::map<CTxDestination, std::vector<COutput>> ListCoins() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // TODO: Remove "NO_THREAD_SAFETY_ANALYSIS" and replace it with the correct
+ // annotation "EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)". The annotation
+ // "NO_THREAD_SAFETY_ANALYSIS" was temporarily added to avoid having to
+ // resolve the issue of member access into incomplete type CWallet. Note
+ // that we still have the runtime check "AssertLockHeld(pwallet->cs_wallet)"
+ // in place.
+ int GetTxDepthInMainChain(const CWalletTx& wtx) const NO_THREAD_SAFETY_ANALYSIS;
+ bool IsTxInMainChain(const CWalletTx& wtx) const { return GetTxDepthInMainChain(wtx) > 0; }
/**
- * Find non-change parent output.
+ * @return number of blocks to maturity for this transaction:
+ * 0 : is not a coinbase transaction, or is a mature coinbase transaction
+ * >0 : is a coinbase transaction which matures in this many blocks
*/
- const CTxOut& FindNonChangeParentOutput(const CTransaction& tx, int output) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ int GetTxBlocksToMaturity(const CWalletTx& wtx) const;
+ bool IsTxImmatureCoinBase(const CWalletTx& wtx) const;
- /**
- * Shuffle and select coins until nTargetValue is reached while avoiding
- * small change; This method is stochastic for some inputs and upon
- * completion the coin set and corresponding actual target value is
- * assembled
- * param@[in] coins Set of UTXOs to consider. These will be categorized into
- * OutputGroups and filtered using eligibility_filter before
- * selecting coins.
- * param@[out] setCoinsRet Populated with the coins selected if successful.
- * param@[out] nValueRet Used to return the total value of selected coins.
- */
- bool AttemptSelection(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins,
- std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params) const;
+ //! check whether we support the named feature
+ bool CanSupportFeature(enum WalletFeature wf) const override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); return IsFeatureSupported(nWalletVersion, wf); }
bool IsSpent(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
@@ -455,8 +445,6 @@ public:
bool IsSpentKey(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
void SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, const CoinSelectionParams& coin_sel_params, const CoinEligibilityFilter& filter, bool positive_only) const;
-
/** Display address on an external signer. Returns false if external signer support is not compiled */
bool DisplayAddress(const CTxDestination& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
@@ -542,28 +530,13 @@ public:
void transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason, uint64_t mempool_sequence) override;
void ReacceptWalletTransactions() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
void ResendWalletTransactions();
- struct Balance {
- CAmount m_mine_trusted{0}; //!< Trusted, at depth=GetBalance.min_depth or more
- CAmount m_mine_untrusted_pending{0}; //!< Untrusted, but in mempool (pending)
- CAmount m_mine_immature{0}; //!< Immature coinbases in the main chain
- CAmount m_watchonly_trusted{0};
- CAmount m_watchonly_untrusted_pending{0};
- CAmount m_watchonly_immature{0};
- };
- Balance GetBalance(int min_depth = 0, bool avoid_reuse = true) const;
- CAmount GetAvailableBalance(const CCoinControl* coinControl = nullptr) const;
OutputType TransactionChangeType(const std::optional<OutputType>& change_type, const std::vector<CRecipient>& vecSend) const;
- /**
- * Insert additional inputs into the transaction by
- * calling CreateTransaction();
- */
- bool FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl);
/** Fetch the inputs and sign with SIGHASH_ALL. */
bool SignTransaction(CMutableTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/** Sign the tx given the input coins and sighash. */
- bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const;
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const;
SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const;
/**
@@ -587,12 +560,6 @@ public:
size_t* n_signed = nullptr) const;
/**
- * Create a new transaction paying the recipients with a set of coins
- * selected by SelectCoins(); Also create the change output, when needed
- * @note passing nChangePosInOut as -1 will result in setting a random position
- */
- bool CreateTransaction(const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, const CCoinControl& coin_control, FeeCalculation& fee_calc_out, bool sign = true);
- /**
* Submit the transaction to the node's mempool and then relay to peers.
* Should be called after CreateTransaction unless you want to abort
* broadcasting the transaction.
@@ -603,6 +570,9 @@ public:
*/
void CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm);
+ /** Pass this transaction to node for mempool insertion and relay to peers if flag set to true */
+ bool SubmitTxMemoryPoolAndRelay(const CWalletTx& wtx, std::string& err_string, bool relay) const;
+
bool DummySignTx(CMutableTransaction &txNew, const std::set<CTxOut> &txouts, bool use_max_sig = false) const
{
std::vector<CTxOut> v_txouts(txouts.size());
@@ -636,6 +606,12 @@ public:
* output itself, just drop it to fees. */
CFeeRate m_discard_rate{DEFAULT_DISCARD_FEE};
+ /** When the actual feerate is less than the consolidate feerate, we will tend to make transactions which
+ * consolidate inputs. When the actual feerate is greater than the consolidate feerate, we will tend to make
+ * transactions which have the lowest fees.
+ */
+ CFeeRate m_consolidate_feerate{DEFAULT_CONSOLIDATE_FEERATE};
+
/** The maximum fee amount we're willing to pay to prioritize partial spend avoidance. */
CAmount m_max_aps_fee{DEFAULT_MAX_AVOIDPARTIALSPEND_FEE}; //!< note: this is absolute fee, not fee rate
OutputType m_default_address_type{DEFAULT_ADDRESS_TYPE};
@@ -654,10 +630,7 @@ public:
int64_t GetOldestKeyPoolTime() const;
- std::set<std::set<CTxDestination>> GetAddressGroupings() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- std::map<CTxDestination, CAmount> GetAddressBalances() const;
-
- std::set<CTxDestination> GetLabelAddresses(const std::string& label) const;
+ std::set<CTxDestination> GetLabelAddresses(const std::string& label) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/**
* Marks all outputs in each one of the destinations dirty, so their cache is
@@ -665,30 +638,21 @@ public:
*/
void MarkDestinationsDirty(const std::set<CTxDestination>& destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, std::string& error);
- bool GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error);
+ bool GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, bilingual_str& error);
+ bool GetNewChangeDestination(const OutputType type, CTxDestination& dest, bilingual_str& error);
isminetype IsMine(const CTxDestination& dest) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
isminetype IsMine(const CScript& script) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- isminetype IsMine(const CTxIn& txin) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/**
* Returns amount of debit if the input matches the
* filter, otherwise returns 0
*/
CAmount GetDebit(const CTxIn& txin, const isminefilter& filter) const;
isminetype IsMine(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- CAmount GetCredit(const CTxOut& txout, const isminefilter& filter) const;
- bool IsChange(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool IsChange(const CScript& script) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- CAmount GetChange(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
bool IsMine(const CTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/** should probably be renamed to IsRelevantToMe */
bool IsFromMe(const CTransaction& tx) const;
CAmount GetDebit(const CTransaction& tx, const isminefilter& filter) const;
- /** Returns whether all of the inputs match the filter */
- bool IsAllFromMe(const CTransaction& tx, const isminefilter& filter) const;
- CAmount GetCredit(const CTransaction& tx, const isminefilter& filter) const;
- CAmount GetChange(const CTransaction& tx) const;
void chainStateFlushed(const CBlockLocator& loc) override;
DBErrors LoadWallet();
@@ -772,7 +736,7 @@ public:
bool MarkReplaced(const uint256& originalHash, const uint256& newHash);
/* Initializes the wallet, returns a new CWallet instance or a null pointer in case of an error */
- static std::shared_ptr<CWallet> Create(interfaces::Chain* chain, const std::string& name, std::unique_ptr<WalletDatabase> database, uint64_t wallet_creation_flags, bilingual_str& error, std::vector<bilingual_str>& warnings);
+ static std::shared_ptr<CWallet> Create(WalletContext& context, const std::string& name, std::unique_ptr<WalletDatabase> database, uint64_t wallet_creation_flags, bilingual_str& error, std::vector<bilingual_str>& warnings);
/**
* Wallet post-init setup
@@ -912,14 +876,14 @@ public:
DescriptorScriptPubKeyMan* GetDescriptorScriptPubKeyMan(const WalletDescriptor& desc) const;
//! Add a descriptor to the wallet, return a ScriptPubKeyMan & associated output type
- ScriptPubKeyMan* AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label, bool internal);
+ ScriptPubKeyMan* AddWalletDescriptor(WalletDescriptor& desc, const FlatSigningProvider& signing_provider, const std::string& label, bool internal) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
};
/**
* Called periodically by the schedule thread. Prompts individual wallets to resend
* their transactions. Actual rebroadcast schedule is managed by the wallets themselves.
*/
-void MaybeResendWalletTxs();
+void MaybeResendWalletTxs(WalletContext& context);
/** RAII object to check and reserve a wallet rescan */
class WalletRescanReserver
@@ -955,18 +919,6 @@ public:
}
};
-struct TxSize {
- int64_t vsize{-1};
- int64_t weight{-1};
-};
-
-/** Calculate the size of the transaction assuming all signatures are max size
-* Use DummySignatureCreator, which inserts 71 byte signatures everywhere.
-* NOTE: this requires that all inputs must be in mapWallet (eg the tx should
-* be IsAllFromMe). */
-TxSize CalculateMaximumSignedTxSize(const CTransaction& tx, const CWallet* wallet, bool use_max_sig = false) EXCLUSIVE_LOCKS_REQUIRED(wallet->cs_wallet);
-TxSize CalculateMaximumSignedTxSize(const CTransaction& tx, const CWallet* wallet, const std::vector<CTxOut>& txouts, bool use_max_sig = false);
-
//! Add wallet name to persistent configuration so it will be loaded on startup.
bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 1e5d8dfa3a..03464cd2c8 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -954,7 +954,7 @@ DBErrors WalletBatch::FindWalletTx(std::vector<uint256>& vTxHash, std::list<CWal
uint256 hash;
ssKey >> hash;
vTxHash.push_back(hash);
- vWtx.emplace_back(nullptr /* wallet */, nullptr /* tx */);
+ vWtx.emplace_back(nullptr /* tx */);
ssValue >> vWtx.back();
}
}
@@ -1004,14 +1004,14 @@ DBErrors WalletBatch::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<u
return DBErrors::LOAD_OK;
}
-void MaybeCompactWalletDB()
+void MaybeCompactWalletDB(WalletContext& context)
{
static std::atomic<bool> fOneThread(false);
if (fOneThread.exchange(true)) {
return;
}
- for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) {
+ for (const std::shared_ptr<CWallet>& pwallet : GetWallets(context)) {
WalletDatabase& dbh = pwallet->GetDatabase();
unsigned int nUpdateCounter = dbh.nUpdateCounter;
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 9b775eb481..25c2ec5909 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -31,6 +31,7 @@
static const bool DEFAULT_FLUSHWALLET = true;
struct CBlockLocator;
+struct WalletContext;
class CKeyPool;
class CMasterKey;
class CScript;
@@ -279,7 +280,7 @@ private:
};
//! Compacts BDB state so that wallet.dat is self-contained (if there are changes)
-void MaybeCompactWalletDB();
+void MaybeCompactWalletDB(WalletContext& context);
//! Callback for filtering key types to deserialize in ReadKeyValue
using KeyFilterFn = std::function<bool(const std::string&)>;
diff --git a/test/README.md b/test/README.md
index ab34ce42dc..412d5ae106 100644
--- a/test/README.md
+++ b/test/README.md
@@ -5,20 +5,24 @@ etc.
This directory contains the following sets of tests:
+- [fuzz](/test/fuzz) A runner to execute all fuzz targets from
+ [/src/test/fuzz](/src/test/fuzz).
- [functional](/test/functional) which test the functionality of
bitcoind and bitcoin-qt by interacting with them through the RPC and P2P
interfaces.
-- [util](/test/util) which tests the bitcoin utilities, currently only
-bitcoin-tx.
+- [util](/test/util) which tests the utilities (bitcoin-util, bitcoin-tx, ...).
- [lint](/test/lint/) which perform various static analysis checks.
-The util tests are run as part of `make check` target. The functional
+The util tests are run as part of `make check` target. The fuzz tests, functional
tests and lint scripts can be run as explained in the sections below.
# Running tests locally
Before tests can be run locally, Bitcoin Core must be built. See the [building instructions](/doc#building) for help.
+## Fuzz tests
+
+See [/doc/fuzzing.md](/doc/fuzzing.md)
### Functional tests
@@ -84,6 +88,12 @@ Run all possible tests with
test/functional/test_runner.py --extended
```
+In order to run backwards compatibility tests, download the previous node binaries:
+
+```
+test/get_previous_releases.py -b v0.20.1 v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
+```
+
By default, up to 4 tests will be run in parallel by test_runner. To specify
how many jobs to run, append `--jobs=n`
@@ -251,7 +261,7 @@ For ways to generate more granular profiles, see the README in
### Util tests
-Util tests can be run locally by running `test/util/bitcoin-util-test.py`.
+Util tests can be run locally by running `test/util/test_runner.py`.
Use the `-v` option for verbose output.
### Lint tests
@@ -263,7 +273,6 @@ Use the `-v` option for verbose output.
| [`lint-python.sh`](lint/lint-python.sh) | [flake8](https://gitlab.com/pycqa/flake8) | [3.8.3](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install flake8==3.8.3`
| [`lint-python.sh`](lint/lint-python.sh) | [mypy](https://github.com/python/mypy) | [0.781](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install mypy==0.781`
| [`lint-shell.sh`](lint/lint-shell.sh) | [ShellCheck](https://github.com/koalaman/shellcheck) | [0.7.2](https://github.com/bitcoin/bitcoin/pull/21749) | [details...](https://github.com/koalaman/shellcheck#installing)
-| [`lint-shell.sh`](lint/lint-shell.sh) | [yq](https://github.com/kislyuk/yq) | default | `pip3 install yq`
| [`lint-spelling.sh`](lint/lint-spelling.sh) | [codespell](https://github.com/codespell-project/codespell) | [2.0.0](https://github.com/bitcoin/bitcoin/pull/20817) | `pip3 install codespell==2.0.0`
Please be aware that on Linux distributions all dependencies are usually available as packages, but could be outdated.
diff --git a/test/config.ini.in b/test/config.ini.in
index e3872181cd..db80bba6f1 100644
--- a/test/config.ini.in
+++ b/test/config.ini.in
@@ -3,7 +3,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# These environment variables are set by the build process and read by
-# test/functional/test_runner.py and test/util/bitcoin-util-test.py
+# test/*/test_runner.py and test/util/rpcauth-test.py
[environment]
PACKAGE_NAME=@PACKAGE_NAME@
diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py
index fab921ef19..cde0399d8b 100644
--- a/test/functional/data/invalid_txs.py
+++ b/test/functional/data/invalid_txs.py
@@ -29,27 +29,32 @@ from test_framework.messages import (
CTxOut,
MAX_MONEY,
)
-from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS
from test_framework.script import (
CScript,
+ OP_0,
+ OP_2DIV,
+ OP_2MUL,
+ OP_AND,
OP_CAT,
- OP_SUBSTR,
- OP_LEFT,
- OP_RIGHT,
+ OP_CHECKSIG,
+ OP_DIV,
OP_INVERT,
- OP_AND,
+ OP_LEFT,
+ OP_LSHIFT,
+ OP_MOD,
+ OP_MUL,
OP_OR,
+ OP_RIGHT,
+ OP_RSHIFT,
+ OP_SUBSTR,
+ OP_TRUE,
OP_XOR,
- OP_2MUL,
- OP_2DIV,
- OP_MUL,
- OP_DIV,
- OP_MOD,
- OP_LSHIFT,
- OP_RSHIFT
)
-basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
+from test_framework.script_util import (
+ script_to_p2sh_script,
+)
+basic_p2sh = script_to_p2sh_script(CScript([OP_0]))
class BadTxTemplate:
@@ -116,7 +121,7 @@ class SizeTooSmall(BadTxTemplate):
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
- tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE])))
+ tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.calc_sha256()
return tx
@@ -151,6 +156,19 @@ class DuplicateInput(BadTxTemplate):
return tx
+class PrevoutNullInput(BadTxTemplate):
+ reject_reason = 'bad-txns-prevout-null'
+ expect_disconnect = True
+
+ def get_tx(self):
+ tx = CTransaction()
+ tx.vin.append(self.valid_txin)
+ tx.vin.append(CTxIn(COutPoint(hash=0, n=0xffffffff)))
+ tx.vout.append(CTxOut(1, basic_p2sh))
+ tx.calc_sha256()
+ return tx
+
+
class NonexistentInput(BadTxTemplate):
reject_reason = None # Added as an orphan tx.
expect_disconnect = False
@@ -217,7 +235,7 @@ class TooManySigops(BadTxTemplate):
expect_disconnect = False
def get_tx(self):
- lotsa_checksigs = sc.CScript([sc.OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
+ lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
return create_tx_with_script(
self.spend_tx, 0,
script_pub_key=lotsa_checksigs,
diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py
new file mode 100755
index 0000000000..8ccff340f0
--- /dev/null
+++ b/test/functional/feature_addrman.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test addrman functionality"""
+
+import os
+import struct
+
+from test_framework.messages import ser_uint256, hash256
+from test_framework.p2p import MAGIC_BYTES
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+
+def serialize_addrman(*, format=1, lowest_compatible=3):
+ new = []
+ tried = []
+ INCOMPATIBILITY_BASE = 32
+ r = MAGIC_BYTES["regtest"]
+ r += struct.pack("B", format)
+ r += struct.pack("B", INCOMPATIBILITY_BASE + lowest_compatible)
+ r += ser_uint256(1)
+ r += struct.pack("i", len(new))
+ r += struct.pack("i", len(tried))
+ ADDRMAN_NEW_BUCKET_COUNT = 1 << 10
+ r += struct.pack("i", ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30))
+ for _ in range(ADDRMAN_NEW_BUCKET_COUNT):
+ r += struct.pack("i", 0)
+ checksum = hash256(r)
+ r += checksum
+ return r
+
+
+def write_addrman(peers_dat, **kwargs):
+ with open(peers_dat, "wb") as f:
+ f.write(serialize_addrman(**kwargs))
+
+
+class AddrmanTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def run_test(self):
+ peers_dat = os.path.join(self.nodes[0].datadir, self.chain, "peers.dat")
+
+ self.log.info("Check that mocked addrman is valid")
+ self.stop_node(0)
+ write_addrman(peers_dat)
+ with self.nodes[0].assert_debug_log(["Loaded 0 addresses from peers.dat"]):
+ self.start_node(0, extra_args=["-checkaddrman=1"])
+ assert_equal(self.nodes[0].getnodeaddresses(), [])
+
+ self.log.info("Check that addrman from future cannot be read")
+ self.stop_node(0)
+ write_addrman(peers_dat, lowest_compatible=111)
+ with self.nodes[0].assert_debug_log([
+ f'ERROR: DeserializeDB: Deserialize or I/O error - Unsupported format of addrman database: 1. It is compatible with formats >=111, but the maximum supported by this version of {self.config["environment"]["PACKAGE_NAME"]} is 3.',
+ "Recreating peers.dat",
+ ]):
+ self.start_node(0)
+ assert_equal(self.nodes[0].getnodeaddresses(), [])
+
+ self.log.info("Check that corrupt addrman cannot be read")
+ self.stop_node(0)
+ with open(peers_dat, "wb") as f:
+ f.write(serialize_addrman()[:-1])
+ with self.nodes[0].assert_debug_log([
+ "ERROR: DeserializeDB: Deserialize or I/O error - CAutoFile::read: end of file",
+ "Recreating peers.dat",
+ ]):
+ self.start_node(0)
+ assert_equal(self.nodes[0].getnodeaddresses(), [])
+
+ self.log.info("Check that missing addrman is recreated")
+ self.stop_node(0)
+ os.remove(peers_dat)
+ with self.nodes[0].assert_debug_log([
+ f"Missing or invalid file {peers_dat}",
+ "Recreating peers.dat",
+ ]):
+ self.start_node(0)
+ assert_equal(self.nodes[0].getnodeaddresses(), [])
+
+
+if __name__ == "__main__":
+ AddrmanTest().main()
diff --git a/test/functional/feature_anchors.py b/test/functional/feature_anchors.py
index 24bb02bc90..7be393a4ea 100755
--- a/test/functional/feature_anchors.py
+++ b/test/functional/feature_anchors.py
@@ -23,9 +23,7 @@ def check_node_connections(*, node, num_in, num_out):
class AnchorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
-
- def setup_network(self):
- self.setup_nodes()
+ self.disable_autoconnect = False
def run_test(self):
node_anchors_path = os.path.join(
diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py
index 5fcecb4882..704dd6126b 100755
--- a/test/functional/feature_asmap.py
+++ b/test/functional/feature_asmap.py
@@ -31,8 +31,8 @@ ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap
VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853'
def expected_messages(filename):
- return ['Opened asmap file "{}" (59 bytes) from disk'.format(filename),
- 'Using asmap version {} for IP bucketing'.format(VERSION)]
+ return [f'Opened asmap file "{filename}" (59 bytes) from disk',
+ f'Using asmap version {VERSION} for IP bucketing']
class AsmapTest(BitcoinTestFramework):
def set_test_params(self):
@@ -50,7 +50,7 @@ class AsmapTest(BitcoinTestFramework):
filename = os.path.join(self.datadir, 'my-map-file.map')
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
- self.start_node(0, ['-asmap={}'.format(filename)])
+ self.start_node(0, [f'-asmap={filename}'])
os.remove(filename)
def test_asmap_with_relative_path(self):
@@ -60,13 +60,13 @@ class AsmapTest(BitcoinTestFramework):
filename = os.path.join(self.datadir, name)
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
- self.start_node(0, ['-asmap={}'.format(name)])
+ self.start_node(0, [f'-asmap={name}'])
os.remove(filename)
def test_default_asmap(self):
shutil.copyfile(self.asmap_raw, self.default_asmap)
for arg in ['-asmap', '-asmap=']:
- self.log.info('Test bitcoind {} (using default map file)'.format(arg))
+ self.log.info(f'Test bitcoind {arg} (using default map file)')
self.stop_node(0)
with self.node.assert_debug_log(expected_messages(self.default_asmap)):
self.start_node(0, [arg])
@@ -75,7 +75,7 @@ class AsmapTest(BitcoinTestFramework):
def test_default_asmap_with_missing_file(self):
self.log.info('Test bitcoind -asmap with missing default map file')
self.stop_node(0)
- msg = "Error: Could not find asmap file \"{}\"".format(self.default_asmap)
+ msg = f"Error: Could not find asmap file \"{self.default_asmap}\""
self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
def test_empty_asmap(self):
@@ -83,7 +83,7 @@ class AsmapTest(BitcoinTestFramework):
self.stop_node(0)
with open(self.default_asmap, "w", encoding="utf-8") as f:
f.write("")
- msg = "Error: Could not parse asmap file \"{}\"".format(self.default_asmap)
+ msg = f"Error: Could not parse asmap file \"{self.default_asmap}\""
self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
os.remove(self.default_asmap)
diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py
index c712f7141c..978080703e 100755
--- a/test/functional/feature_backwards_compatibility.py
+++ b/test/functional/feature_backwards_compatibility.py
@@ -4,9 +4,8 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Backwards compatibility functional test
-Test various backwards compatibility scenarios. Download the previous node binaries:
-
-test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
+Test various backwards compatibility scenarios. Requires previous releases binaries,
+see test/README.md.
v0.15.2 is not required by this test, but it is used in wallet_upgradewallet.py.
Due to a hardfork in regtest, it can't be used to sync nodes.
@@ -367,7 +366,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
assert_equal(load_res['warning'], '')
wallet = node_master.get_wallet_rpc("u1_v16")
info = wallet.getaddressinfo(v16_addr)
- descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")"
+ descriptor = f"wpkh([{info['hdmasterfingerprint']}{hdkeypath[1:]}]{v16_pubkey})"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it
@@ -390,7 +389,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
node_master.loadwallet("u1_v17")
wallet = node_master.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
- descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
+ descriptor = f"wpkh([{info['hdmasterfingerprint']}{hdkeypath[1:]}]{pubkey})"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it
diff --git a/test/functional/feature_bind_extra.py b/test/functional/feature_bind_extra.py
new file mode 100755
index 0000000000..6802da8d48
--- /dev/null
+++ b/test/functional/feature_bind_extra.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test starting bitcoind with -bind and/or -bind=...=onion and confirm
+that bind happens on the expected ports.
+"""
+
+import sys
+
+from test_framework.netutil import (
+ addr_to_hex,
+ get_bind_addrs,
+)
+from test_framework.test_framework import (
+ BitcoinTestFramework,
+ SkipTest,
+)
+from test_framework.util import (
+ PORT_MIN,
+ PORT_RANGE,
+ assert_equal,
+ rpc_port,
+)
+
+class BindExtraTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ # Avoid any -bind= on the command line. Force the framework to avoid
+ # adding -bind=127.0.0.1.
+ self.bind_to_localhost_only = False
+ self.num_nodes = 2
+
+ def setup_network(self):
+ # Override setup_network() because we want to put the result of
+ # p2p_port() in self.extra_args[], before the nodes are started.
+ # p2p_port() is not usable in set_test_params() because PortSeed.n is
+ # not set at that time.
+
+ # Due to OS-specific network stats queries, we only run on Linux.
+ self.log.info("Checking for Linux")
+ if not sys.platform.startswith('linux'):
+ raise SkipTest("This test can only be run on Linux.")
+
+ loopback_ipv4 = addr_to_hex("127.0.0.1")
+
+ # Start custom ports after p2p and rpc ports.
+ port = PORT_MIN + 2 * PORT_RANGE
+
+ # Array of tuples [command line arguments, expected bind addresses].
+ self.expected = []
+
+ # Node0, no normal -bind=... with -bind=...=onion, thus only the tor target.
+ self.expected.append(
+ [
+ [f"-bind=127.0.0.1:{port}=onion"],
+ [(loopback_ipv4, port)]
+ ],
+ )
+ port += 1
+
+ # Node1, both -bind=... and -bind=...=onion.
+ self.expected.append(
+ [
+ [f"-bind=127.0.0.1:{port}", f"-bind=127.0.0.1:{port + 1}=onion"],
+ [(loopback_ipv4, port), (loopback_ipv4, port + 1)]
+ ],
+ )
+ port += 2
+
+ self.extra_args = list(map(lambda e: e[0], self.expected))
+ self.add_nodes(self.num_nodes, self.extra_args)
+ # Don't start the nodes, as some of them would collide trying to bind on the same port.
+
+ def run_test(self):
+ for i in range(len(self.expected)):
+ self.log.info(f"Starting node {i} with {self.expected[i][0]}")
+ self.start_node(i)
+ pid = self.nodes[i].process.pid
+ binds = set(get_bind_addrs(pid))
+ # Remove IPv6 addresses because on some CI environments "::1" is not configured
+ # on the system (so our test_ipv6_local() would return False), but it is
+ # possible to bind on "::". This makes it unpredictable whether to expect
+ # that bitcoind has bound on "::1" (for RPC) and "::" (for P2P).
+ ipv6_addr_len_bytes = 32
+ binds = set(filter(lambda e: len(e[0]) != ipv6_addr_len_bytes, binds))
+ # Remove RPC ports. They are not relevant for this test.
+ binds = set(filter(lambda e: e[1] != rpc_port(i), binds))
+ assert_equal(binds, set(self.expected[i][1]))
+ self.stop_node(i)
+ self.log.info(f"Stopped node {i}")
+
+if __name__ == '__main__':
+ BindExtraTest().main()
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 389db73bc9..777787ed32 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -22,7 +22,7 @@ from test_framework.messages import (
CTransaction,
CTxIn,
CTxOut,
- MAX_BLOCK_BASE_SIZE,
+ MAX_BLOCK_WEIGHT,
uint256_from_compact,
uint256_from_str,
)
@@ -37,17 +37,17 @@ from test_framework.script import (
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
- OP_EQUAL,
OP_DROP,
OP_FALSE,
- OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
LegacySignatureHash,
- hash160,
+)
+from test_framework.script_util import (
+ script_to_p2sh_script,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -307,33 +307,33 @@ class FullBlockTest(BitcoinTestFramework):
b22 = self.next_block(22, spend=out[5])
self.send_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True)
- # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
+ # Create a block on either side of MAX_BLOCK_WEIGHT and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
- self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
+ self.log.info("Accept a block of weight MAX_BLOCK_WEIGHT")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
- script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
+ script_length = (MAX_BLOCK_WEIGHT - b23.get_weight() - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
- # Make sure the math above worked out to produce a max-sized block
- assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
+ # Make sure the math above worked out to produce a max-weighted block
+ assert_equal(b23.get_weight(), MAX_BLOCK_WEIGHT)
self.send_blocks([b23], True)
self.save_spendable_output()
- self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
+ self.log.info("Reject a block of weight MAX_BLOCK_WEIGHT + 4")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
- script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
+ script_length = (MAX_BLOCK_WEIGHT - b24.get_weight() - 276) // 4
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
- assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
+ assert_equal(b24.get_weight(), MAX_BLOCK_WEIGHT + 1 * 4)
self.send_blocks([b24], success=False, reject_reason='bad-blk-length', reconnect=True)
b25 = self.next_block(25, spend=out[7])
@@ -373,7 +373,9 @@ class FullBlockTest(BitcoinTestFramework):
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
- b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
+ b30.vtx[0].vin[0].scriptSig = bytes(b30.vtx[0].vin[0].scriptSig) # Convert CScript to raw bytes
+ b30.vtx[0].vin[0].scriptSig += b'\x00' * (100 - len(b30.vtx[0].vin[0].scriptSig)) # Fill with 0s
+ assert_equal(len(b30.vtx[0].vin[0].scriptSig), 100)
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.send_blocks([b30], True)
@@ -469,8 +471,7 @@ class FullBlockTest(BitcoinTestFramework):
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
- redeem_script_hash = hash160(redeem_script)
- p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
+ p2sh_script = script_to_p2sh_script(redeem_script)
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
@@ -485,13 +486,13 @@ class FullBlockTest(BitcoinTestFramework):
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
- total_size = len(b39.serialize())
- while(total_size < MAX_BLOCK_BASE_SIZE):
+ total_weight = b39.get_weight()
+ while total_weight < MAX_BLOCK_WEIGHT:
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
- total_size += len(tx_new.serialize())
- if total_size >= MAX_BLOCK_BASE_SIZE:
+ total_weight += tx_new.get_weight()
+ if total_weight >= MAX_BLOCK_WEIGHT:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
@@ -502,7 +503,7 @@ class FullBlockTest(BitcoinTestFramework):
# Make sure we didn't accidentally make too big a block. Note that the
# size of the block has non-determinism due to the ECDSA signature in
# the first transaction.
- while (len(b39.serialize()) >= MAX_BLOCK_BASE_SIZE):
+ while b39.get_weight() >= MAX_BLOCK_WEIGHT:
del b39.vtx[-1]
b39 = self.update_block(39, [])
@@ -834,6 +835,7 @@ class FullBlockTest(BitcoinTestFramework):
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(duplicate_tx.serialize(), b61.vtx[0].serialize())
+ # BIP30 is always checked on regtest, regardless of the BIP34 activation height
self.send_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True)
# Test BIP30 (allow duplicate if spent)
@@ -892,7 +894,7 @@ class FullBlockTest(BitcoinTestFramework):
self.send_blocks([b63], success=False, reject_reason='bad-txns-nonfinal', reconnect=True)
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
- # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
+ # the block is > MAX_BLOCK_WEIGHT with the bloated varint, but <= MAX_BLOCK_WEIGHT without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
@@ -917,12 +919,12 @@ class FullBlockTest(BitcoinTestFramework):
tx = CTransaction()
# use canonical serialization to calculate size
- script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
+ script_length = (MAX_BLOCK_WEIGHT - 4 * len(b64a.normal_serialize()) - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
- assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
+ assert_equal(b64a.get_weight(), MAX_BLOCK_WEIGHT + 8 * 4)
self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
@@ -936,7 +938,7 @@ class FullBlockTest(BitcoinTestFramework):
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
- assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
+ assert_equal(b64.get_weight(), MAX_BLOCK_WEIGHT)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.send_blocks([b64], True)
@@ -1270,12 +1272,12 @@ class FullBlockTest(BitcoinTestFramework):
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
- script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
+ script_length = (MAX_BLOCK_WEIGHT - b.get_weight() - 276) // 4
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
- assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
+ assert_equal(b.get_weight(), MAX_BLOCK_WEIGHT)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py
index 7bfad52c24..d3276e64ee 100755
--- a/test/functional/feature_blocksdir.py
+++ b/test/functional/feature_blocksdir.py
@@ -24,10 +24,10 @@ class BlocksdirTest(BitcoinTestFramework):
initialize_datadir(self.options.tmpdir, 0, self.chain)
self.log.info("Starting with nonexistent blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
- self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
+ self.nodes[0].assert_start_raises_init_error([f"-blocksdir={blocksdir_path}"], f'Error: Specified blocks directory "{blocksdir_path}" does not exist.')
os.mkdir(blocksdir_path)
self.log.info("Starting with existing blocksdir ...")
- self.start_node(0, ["-blocksdir=" + blocksdir_path])
+ self.start_node(0, [f"-blocksdir={blocksdir_path}"])
self.log.info("mining blocks..")
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
assert os.path.isfile(os.path.join(blocksdir_path, self.chain, "blocks", "blk00000.dat"))
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 10d2072dba..3438027db6 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -4,11 +4,11 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
-Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
-1351.
+Test that the CHECKLOCKTIMEVERIFY soft-fork activates.
"""
from test_framework.blocktools import (
+ CLTV_HEIGHT,
create_block,
create_coinbase,
)
@@ -31,8 +31,6 @@ from test_framework.wallet import (
MiniWalletMode,
)
-CLTV_HEIGHT = 1351
-
# Helper function to modify a transaction by
# 1) prepending a given script to the scriptSig of vin 0 and
@@ -63,9 +61,9 @@ def cltv_invalidate(tx, failure_reason):
# +-------------------------------------------------+------------+--------------+
[[OP_CHECKLOCKTIMEVERIFY], None, None],
[[OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP], None, None],
- [[CScriptNum(1000), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 1296688602], # timestamp of genesis block
- [[CScriptNum(1000), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 500],
- [[CScriptNum(500), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0xffffffff, 500],
+ [[CScriptNum(100), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 1296688602], # timestamp of genesis block
+ [[CScriptNum(100), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 50],
+ [[CScriptNum(50), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0xffffffff, 50],
][failure_reason]
cltv_modify_tx(tx, prepend_scriptsig=scheme[0], nsequence=scheme[1], nlocktime=scheme[2])
@@ -106,6 +104,7 @@ class BIP65Test(BitcoinTestFramework):
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
wallet.generate(10)
self.nodes[0].generate(CLTV_HEIGHT - 2 - 10)
+ assert_equal(self.nodes[0].getblockcount(), CLTV_HEIGHT - 2)
self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")
@@ -136,7 +135,7 @@ class BIP65Test(BitcoinTestFramework):
block.nVersion = 3
block.solve()
- with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
+ with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000003)']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
@@ -174,8 +173,7 @@ class BIP65Test(BitcoinTestFramework):
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
- with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with {}'.format(
- block.vtx[-1].hash, expected_cltv_reject_reason)]):
+ with self.nodes[0].assert_debug_log(expected_msgs=[f'CheckInputScripts on {block.vtx[-1].hash} failed with {expected_cltv_reject_reason}']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py
index 5d8ec2a8da..71d522a245 100755
--- a/test/functional/feature_coinstatsindex.py
+++ b/test/functional/feature_coinstatsindex.py
@@ -32,7 +32,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
- try_rpc,
)
class CoinStatsIndexTest(BitcoinTestFramework):
@@ -76,13 +75,11 @@ class CoinStatsIndexTest(BitcoinTestFramework):
self.sync_blocks(timeout=120)
self.log.info("Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option")
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", node.gettxoutsetinfo))
res0 = node.gettxoutsetinfo('none')
# The fields 'disk_size' and 'transactions' do not exist on the index
del res0['disk_size'], res0['transactions']
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
res1 = index_node.gettxoutsetinfo(hash_option)
# The fields 'block_info' and 'total_unspendable_amount' only exist on the index
@@ -97,7 +94,6 @@ class CoinStatsIndexTest(BitcoinTestFramework):
# Generate a new tip
node.generate(5)
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
# Fetch old stats by height
res2 = index_node.gettxoutsetinfo(hash_option, 102)
@@ -176,7 +172,6 @@ class CoinStatsIndexTest(BitcoinTestFramework):
self.nodes[0].generate(1)
self.sync_all()
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
# Check all amounts were registered correctly
res6 = index_node.gettxoutsetinfo(hash_option, 108)
@@ -209,7 +204,6 @@ class CoinStatsIndexTest(BitcoinTestFramework):
self.nodes[0].submitblock(block.serialize().hex())
self.sync_all()
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
for hash_option in index_hash_options:
res7 = index_node.gettxoutsetinfo(hash_option, 109)
assert_equal(res7['total_unspendable_amount'], Decimal('80.98999999'))
@@ -235,7 +229,6 @@ class CoinStatsIndexTest(BitcoinTestFramework):
assert_equal(res8, res9)
index_node.generate(1)
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
res10 = index_node.gettxoutsetinfo('muhash')
assert(res8['txouts'] < res10['txouts'])
@@ -256,14 +249,12 @@ class CoinStatsIndexTest(BitcoinTestFramework):
index_node = self.nodes[1]
reorg_blocks = index_node.generatetoaddress(2, index_node.getnewaddress())
reorg_block = reorg_blocks[1]
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
res_invalid = index_node.gettxoutsetinfo('muhash')
index_node.invalidateblock(reorg_blocks[0])
assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110)
# Add two new blocks
block = index_node.generate(2)[1]
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False)
# Test that the result of the reorged block is not returned for its old block height
@@ -285,9 +276,7 @@ class CoinStatsIndexTest(BitcoinTestFramework):
# Ensure that removing and re-adding blocks yields consistent results
block = index_node.getblockhash(99)
index_node.invalidateblock(block)
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
index_node.reconsiderblock(block)
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash'))
res3 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112)
assert_equal(res2, res3)
@@ -297,8 +286,7 @@ class CoinStatsIndexTest(BitcoinTestFramework):
node.getblock(reorg_block)
self.restart_node(0, ["-coinstatsindex"])
- self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", node.gettxoutsetinfo, 'muhash'))
- assert_raises_rpc_error(-32603, "Unable to read UTXO set", node.gettxoutsetinfo, 'muhash', reorg_block)
+ assert_raises_rpc_error(-32603, "Unable to get data because coinstatsindex is still syncing.", node.gettxoutsetinfo, 'muhash', reorg_block)
def _test_index_rejects_hash_serialized(self):
self.log.info("Test that the rpc raises if the legacy hash is passed with the index")
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index de9d0d2e80..0daa0ba3d8 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -17,13 +17,14 @@ class ConfArgsTest(BitcoinTestFramework):
self.num_nodes = 1
self.supports_cli = False
self.wallet_names = []
+ self.disable_autoconnect = False
def test_config_file_parser(self):
self.stop_node(0)
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
- conf.write('includeconf={}\n'.format(inc_conf_file_path))
+ conf.write(f'includeconf={inc_conf_file_path}\n')
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1',
@@ -42,13 +43,13 @@ class ConfArgsTest(BitcoinTestFramework):
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
- self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
+ self.nodes[0].assert_start_raises_init_error(expected_msg=f'Error: Config setting for -wallet only applied on {self.chain} network when in [{self.chain}] section.')
main_conf_file_path = os.path.join(self.options.tmpdir, 'node0', 'bitcoin_main.conf')
- util.write_config(main_conf_file_path, n=0, chain='', extra_config='includeconf={}\n'.format(inc_conf_file_path))
+ util.write_config(main_conf_file_path, n=0, chain='', extra_config=f'includeconf={inc_conf_file_path}\n')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('acceptnonstdtxn=1\n')
- self.nodes[0].assert_start_raises_init_error(extra_args=["-conf={}".format(main_conf_file_path)], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
+ self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
@@ -68,14 +69,14 @@ class ConfArgsTest(BitcoinTestFramework):
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
- conf.write('includeconf={}\n'.format(inc_conf_file2_path))
+ conf.write(f'includeconf={inc_conf_file2_path}\n')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
- self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
+ self.nodes[0].stop_node(expected_stderr=f'Warning: {inc_conf_file_path}:1 Section [testnot] is not recognized.{os.linesep}{inc_conf_file2_path}:1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
@@ -104,8 +105,8 @@ class ConfArgsTest(BitcoinTestFramework):
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
- 'Config file arg: %s="1"' % self.chain,
- 'Config file arg: [%s] server="1"' % self.chain,
+ f'Config file arg: {self.chain}="1"',
+ f'Config file arg: [{self.chain}] server="1"',
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
@@ -158,8 +159,9 @@ class ConfArgsTest(BitcoinTestFramework):
self.stop_node(0)
# No peers.dat exists and -dnsseed=1
- # We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds
- # So after 60 seconds, the node should fallback to fixed seeds (this is a slow test)
+ # We expect the node will use DNS Seeds, but Regtest mode does not have
+ # any valid DNS seeds. So after 60 seconds, the node should fallback to
+ # fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(expected_msgs=[
@@ -233,7 +235,7 @@ class ConfArgsTest(BitcoinTestFramework):
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
- self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
+ self.nodes[0].assert_start_raises_init_error([f'-datadir={new_data_dir}'], f'Error: Specified data directory "{new_data_dir}" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "bitcoin.conf")
@@ -241,21 +243,25 @@ class ConfArgsTest(BitcoinTestFramework):
# datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
- f.write("datadir=" + new_data_dir + "\n")
+ f.write(f"datadir={new_data_dir}\n")
f.write(conf_file_contents)
- self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
+ self.nodes[0].assert_start_raises_init_error([f'-conf={conf_file}'], f'Error: Error reading configuration file: specified data directory "{new_data_dir}" does not exist.')
+
+ # Check that an explicitly specified config file that cannot be opened fails
+ none_existent_conf_file = os.path.join(default_data_dir, "none_existent_bitcoin.conf")
+ self.nodes[0].assert_start_raises_init_error(['-conf=' + none_existent_conf_file], 'Error: Error reading configuration file: specified config file "' + none_existent_conf_file + '" could not be opened.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
- self.start_node(0, ['-conf='+conf_file])
+ self.start_node(0, [f'-conf={conf_file}'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
- self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file])
+ self.start_node(0, [f'-datadir={new_data_dir_2}', f'-conf={conf_file}'])
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index 5081867319..bc0050d47a 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -41,6 +41,7 @@ from itertools import product
import time
from test_framework.blocktools import (
+ CSV_ACTIVATION_HEIGHT,
create_block,
create_coinbase,
)
@@ -63,7 +64,6 @@ from test_framework.wallet import (
TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
BASE_RELATIVE_LOCKTIME = 10
-CSV_ACTIVATION_HEIGHT = 432
SEQ_DISABLE_FLAG = 1 << 31
SEQ_RANDOM_HIGH_BIT = 1 << 25
SEQ_TYPE_FLAG = 1 << 22
@@ -247,7 +247,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.send_blocks(test_blocks)
assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2)
- self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1))
+ self.log.info(f"Height = {self.tipheight}, CSV not yet active (will activate for block {CSV_ACTIVATION_HEIGHT}, not {CSV_ACTIVATION_HEIGHT - 1})")
assert not softfork_active(self.nodes[0], 'csv')
# Test both version 1 and version 2 transactions for all tests
diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py
index c532300ce2..f0766ca7c2 100755
--- a/test/functional/feature_dbcrash.py
+++ b/test/functional/feature_dbcrash.py
@@ -41,7 +41,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
create_confirmed_utxos,
- hex_str_to_bytes,
)
@@ -103,7 +102,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
- raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
+ raise AssertionError(f"Unable to successfully restart node {node_index} in allotted time")
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
@@ -115,10 +114,10 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
self.nodes[node_index].submitblock(block)
return True
except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e:
- self.log.debug("node %d submitblock raised exception: %s", node_index, e)
+ self.log.debug(f"node {node_index} submitblock raised exception: {e}")
return False
except OSError as e:
- self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
+ self.log.debug(f"node {node_index} submitblock raised OSError exception: errno={e.errno}")
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
@@ -143,15 +142,15 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
- self.log.debug("Syncing blocks to node %d", i)
+ self.log.debug(f"Syncing blocks to node {i}")
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
- self.log.debug("submitting block %s", block_hash)
+ self.log.debug(f"submitting block {block_hash}")
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
- self.log.debug("Restarting node %d after block hash %s", i, block_hash)
+ self.log.debug(f"Restarting node {i} after block hash {block_hash}")
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
@@ -168,7 +167,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
- self.log.debug("Checking txoutsetinfo matches for node %d", i)
+ self.log.debug(f"Checking txoutsetinfo matches for node {i}")
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
@@ -204,7 +203,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
continue
for _ in range(3):
- tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
+ tx.vout.append(CTxOut(output_amount, bytes.fromhex(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
@@ -218,15 +217,15 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
- utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
- self.log.info("Prepped %d utxo entries", len(utxo_list))
+ utxo_list = create_confirmed_utxos(self, self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
+ self.log.info(f"Prepped {len(utxo_list)} utxo entries")
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
- self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
+ self.log.debug(f"Syncing {len(block_hashes_to_sync)} blocks with other nodes")
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
@@ -236,33 +235,34 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
- self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
+ self.log.info(f"Iteration {i}, generating 2500 transactions {self.restart_counts}")
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
- self.log.debug("At height %d, considering height %d", current_height, random_height)
+ self.log.debug(f"At height {current_height}, considering height {random_height}")
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
- self.log.debug("Invalidating block at height %d", random_height)
+ self.log.debug(f"Invalidating block at height {random_height}")
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
- block_hashes.extend(self.nodes[3].generatetoaddress(
+ block_hashes.extend(self.generatetoaddress(
+ self.nodes[3],
nblocks=min(10, current_height + 1 - self.nodes[3].getblockcount()),
# new address to avoid mining a block that has just been invalidated
address=self.nodes[3].getnewaddress(),
))
- self.log.debug("Syncing %d new blocks...", len(block_hashes))
+ self.log.debug(f"Syncing {len(block_hashes)} new blocks...")
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
- self.log.debug("Node3 utxo count: %d", len(utxo_list))
+ self.log.debug(f"Node3 utxo count: {len(utxo_list)}")
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
@@ -270,7 +270,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
self.verify_utxo_hash()
# Check the test coverage
- self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
+ self.log.info(f"Restarted nodes: {self.restart_counts}; crashes on restart: {self.crashed_on_restart}")
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
@@ -281,7 +281,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
- self.log.warning("Node %d never crashed during utxo flush!", i)
+ self.log.warning(f"Node {i} never crashed during utxo flush!")
if __name__ == "__main__":
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index eb027c554a..fd46385cd4 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -4,10 +4,11 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
-Test that the DERSIG soft-fork activates at (regtest) height 1251.
+Test the DERSIG soft-fork activation on regtest.
"""
from test_framework.blocktools import (
+ DERSIG_HEIGHT,
create_block,
create_coinbase,
)
@@ -23,8 +24,6 @@ from test_framework.wallet import (
MiniWalletMode,
)
-DERSIG_HEIGHT = 1251
-
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
@@ -90,8 +89,10 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
+ assert_equal(self.nodes[0].getblockcount(), DERSIG_HEIGHT - 2)
self.test_dersig_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
+ assert_equal(self.nodes[0].getblockcount(), DERSIG_HEIGHT - 1)
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
@@ -103,7 +104,7 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
- with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]):
+ with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000002)']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
@@ -133,7 +134,7 @@ class BIP66Test(BitcoinTestFramework):
block.rehash()
block.solve()
- with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]):
+ with self.nodes[0].assert_debug_log(expected_msgs=[f'CheckInputScripts on {block.vtx[-1].hash} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 8ccdf87ff3..a8f3e12e07 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -18,10 +18,10 @@ from test_framework.script import (
OP_1,
OP_2,
OP_DROP,
- OP_EQUAL,
- OP_HASH160,
OP_TRUE,
- hash160,
+)
+from test_framework.script_util import (
+ script_to_p2sh_script,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -37,8 +37,8 @@ from test_framework.util import (
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
-P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
-P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
+P2SH_1 = script_to_p2sh_script(REDEEM_SCRIPT_1)
+P2SH_2 = script_to_p2sh_script(REDEEM_SCRIPT_2)
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
@@ -72,7 +72,7 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
- raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
+ raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}")
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
@@ -124,8 +124,7 @@ def check_raw_estimates(node, fees_seen):
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
- raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
- % (feerate, min(fees_seen), max(fees_seen)))
+ raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})")
def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
@@ -138,11 +137,9 @@ def check_smart_estimates(node, fees_seen):
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
- raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
- % (feerate, min(fees_seen), max(fees_seen)))
+ raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})")
if feerate - delta > last_feerate:
- raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
- % (feerate, last_feerate))
+ raise AssertionError(f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms")
last_feerate = feerate
if i == 0:
diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py
index 2798d11b0a..e09107802b 100755
--- a/test/functional/feature_filelock.py
+++ b/test/functional/feature_filelock.py
@@ -22,11 +22,11 @@ class FilelockTest(BitcoinTestFramework):
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, self.chain)
- self.log.info("Using datadir {}".format(datadir))
+ self.log.info(f"Using datadir {datadir}")
self.log.info("Check that we can't start a second bitcoind instance using the same datadir")
- expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
- self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
+ expected_msg = f"Error: Cannot obtain a lock on data directory {datadir}. {self.config['environment']['PACKAGE_NAME']} is probably already running."
+ self.nodes[1].assert_start_raises_init_error(extra_args=[f'-datadir={self.nodes[0].datadir}', '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
def check_wallet_filelock(descriptors):
@@ -38,7 +38,7 @@ class FilelockTest(BitcoinTestFramework):
expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
expected_msg = "Error: Error initializing wallet database environment"
- self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
+ self.nodes[1].assert_start_raises_init_error(extra_args=[f'-walletdir={wallet_dir}', f'-wallet={wallet_name}', '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if self.is_bdb_compiled():
check_wallet_filelock(False)
diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py
index babe9bfc80..837e95c128 100755
--- a/test/functional/feature_help.py
+++ b/test/functional/feature_help.py
@@ -40,14 +40,14 @@ class HelpTest(BitcoinTestFramework):
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
- self.log.info("Help text received: {} (...)".format(output[0:60]))
+ self.log.info(f"Help text received: {output[0:60]} (...)")
self.log.info("Start bitcoin with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
- self.log.info("Version text received: {} (...)".format(output[0:60]))
+ self.log.info(f"Version text received: {output[0:60]} (...)")
# Test that arguments not in the help results in an error
self.log.info("Start bitcoind with -fakearg to make sure it does not start")
@@ -55,7 +55,7 @@ class HelpTest(BitcoinTestFramework):
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
- self.log.info("Error message received: {} (...)".format(output[0:60]))
+ self.log.info(f"Error message received: {output[0:60]} (...)")
if __name__ == '__main__':
diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py
index 14f64d63a2..e5db6de085 100755
--- a/test/functional/feature_loadblock.py
+++ b/test/functional/feature_loadblock.py
@@ -45,17 +45,17 @@ class LoadblockTest(BitcoinTestFramework):
self.log.info("Create linearization config file")
with open(cfg_file, "a", encoding="utf-8") as cfg:
- cfg.write("datadir={}\n".format(data_dir))
- cfg.write("rpcuser={}\n".format(node_url.username))
- cfg.write("rpcpassword={}\n".format(node_url.password))
- cfg.write("port={}\n".format(node_url.port))
- cfg.write("host={}\n".format(node_url.hostname))
- cfg.write("output_file={}\n".format(bootstrap_file))
- cfg.write("max_height=100\n")
- cfg.write("netmagic=fabfb5da\n")
- cfg.write("input={}\n".format(blocks_dir))
- cfg.write("genesis={}\n".format(genesis_block))
- cfg.write("hashlist={}\n".format(hash_list.name))
+ cfg.write(f"datadir={data_dir}\n")
+ cfg.write(f"rpcuser={node_url.username}\n")
+ cfg.write(f"rpcpassword={node_url.password}\n")
+ cfg.write(f"port={node_url.port}\n")
+ cfg.write(f"host={node_url.hostname}\n")
+ cfg.write(f"output_file={bootstrap_file}\n")
+ cfg.write(f"max_height=100\n")
+ cfg.write(f"netmagic=fabfb5da\n")
+ cfg.write(f"input={blocks_dir}\n")
+ cfg.write(f"genesis={genesis_block}\n")
+ cfg.write(f"hashlist={hash_list.name}\n")
base_dir = self.config["environment"]["SRCDIR"]
linearize_dir = os.path.join(base_dir, "contrib", "linearize")
@@ -72,7 +72,7 @@ class LoadblockTest(BitcoinTestFramework):
check=True)
self.log.info("Restart second, unsynced node with bootstrap file")
- self.restart_node(1, extra_args=["-loadblock=" + bootstrap_file])
+ self.restart_node(1, extra_args=[f"-loadblock={bootstrap_file}"])
assert_equal(self.nodes[1].getblockcount(), 100) # start_node is blocking on all block files being imported
assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100)
diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py
index afcbcf099a..722219518a 100755
--- a/test/functional/feature_logging.py
+++ b/test/functional/feature_logging.py
@@ -29,7 +29,7 @@ class LoggingTest(BitcoinTestFramework):
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
- self.restart_node(0, ["-debuglogfile=%s" % tempname])
+ self.restart_node(0, [f"-debuglogfile={tempname}"])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
@@ -37,26 +37,26 @@ class LoggingTest(BitcoinTestFramework):
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = r"Error: Could not open debug log file \S+$"
- self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX)
+ self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
- self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
+ self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
- self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX)
+ self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
- self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
+ self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
@@ -67,7 +67,7 @@ class LoggingTest(BitcoinTestFramework):
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
- self.restart_node(0, ["-debuglogfile=%s" % os.devnull])
+ self.restart_node(0, [f"-debuglogfile={os.devnull}"])
if __name__ == '__main__':
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index d0a94658ff..bd615997cb 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -67,7 +67,7 @@ class MaxUploadTest(BitcoinTestFramework):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
- mine_large_block(self.nodes[0], self.utxo_cache)
+ mine_large_block(self, self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
@@ -78,7 +78,7 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
- mine_large_block(self.nodes[0], self.utxo_cache)
+ mine_large_block(self, self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py
index 803feb509a..3950690cf0 100755
--- a/test/functional/feature_minchainwork.py
+++ b/test/functional/feature_minchainwork.py
@@ -45,16 +45,16 @@ class MinimumChainWorkTest(BitcoinTestFramework):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
- self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
+ self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})")
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
- self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
+ self.log.info(f"Generating {num_blocks_to_generate} blocks on node0")
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
- self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
+ self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}")
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
@@ -63,7 +63,7 @@ class MinimumChainWorkTest(BitcoinTestFramework):
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
- self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
+ self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
@@ -84,7 +84,7 @@ class MinimumChainWorkTest(BitcoinTestFramework):
# continue the test.
self.sync_all()
- self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
+ self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")
if __name__ == '__main__':
MinimumChainWorkTest().main()
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index 6fc8773ee3..3f8fe9ccd5 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -20,7 +20,7 @@ FILE_CHARS_DISALLOWED = '/\\?%*:|"<>' if os.name == 'nt' else '/'
UNCONFIRMED_HASH_STRING = 'unconfirmed'
def notify_outputname(walletname, txid):
- return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
+ return txid if os.name == 'nt' else f'{walletname}_{txid}'
class NotificationsTest(BitcoinTestFramework):
@@ -39,11 +39,11 @@ class NotificationsTest(BitcoinTestFramework):
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
- "-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')),
- "-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')),
+ f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}",
+ f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}",
], [
"-rescan",
- "-walletnotify=echo %h_%b > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))),
+ f"-walletnotify=echo %h_%b > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}",
]]
self.wallet_names = [self.default_wallet_name, self.wallet]
super().setup_network()
@@ -54,12 +54,12 @@ class NotificationsTest(BitcoinTestFramework):
seed = "cTdGmKFWpbvpKQ7ejrdzqYT2hhjyb3GPHnLAK7wdi5Em67YLwSm9"
xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
desc_imports = [{
- "desc": descsum_create("wpkh(" + xpriv + "/0/*)"),
+ "desc": descsum_create(f"wpkh({xpriv}/0/*)"),
"timestamp": 0,
"active": True,
"keypool": True,
},{
- "desc": descsum_create("wpkh(" + xpriv + "/1/*)"),
+ "desc": descsum_create(f"wpkh({xpriv}/1/*)"),
"timestamp": 0,
"active": True,
"keypool": True,
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index f467626801..678e103e5f 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -24,7 +24,10 @@ from test_framework.blocktools import (
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_raises_rpc_error
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
@@ -51,6 +54,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.extra_args = [[
f'-segwitheight={COINBASE_MATURITY + 5}',
'-addresstype=legacy',
+ '-par=1', # Use only one script thread to get the exact reject reason for testing
]]
def skip_test_if_missing_module(self):
@@ -86,7 +90,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
- self.block_submit(self.nodes[0], test1txs, False, True)
+ self.block_submit(self.nodes[0], test1txs, accept=True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
@@ -94,28 +98,28 @@ class NULLDUMMYTest(BitcoinTestFramework):
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
- self.block_submit(self.nodes[0], [test2tx], False, True)
+ self.block_submit(self.nodes[0], [test2tx], accept=True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
- self.block_submit(self.nodes[0], [test4tx])
+ self.block_submit(self.nodes[0], [test4tx], accept=False)
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
- self.block_submit(self.nodes[0], [test5tx], True)
+ self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
- self.block_submit(self.nodes[0], test6txs, True, True)
+ self.block_submit(self.nodes[0], test6txs, with_witness=True, accept=True)
- def block_submit(self, node, txs, witness=False, accept=False):
+ def block_submit(self, node, txs, *, with_witness=False, accept):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
@@ -124,11 +128,12 @@ class NULLDUMMYTest(BitcoinTestFramework):
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
- witness and add_witness_commitment(block)
+ if with_witness:
+ add_witness_commitment(block)
block.rehash()
block.solve()
- assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
- if (accept):
+ assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex()))
+ if accept:
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
diff --git a/test/functional/feature_presegwit_node_upgrade.py b/test/functional/feature_presegwit_node_upgrade.py
new file mode 100755
index 0000000000..0428588da3
--- /dev/null
+++ b/test/functional/feature_presegwit_node_upgrade.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017-2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test a pre-segwit node upgrading to segwit consensus"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ softfork_active,
+)
+
+class SegwitUpgradeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [["-segwitheight=10"]]
+
+ def run_test(self):
+ """A pre-segwit node with insufficiently validated blocks needs to redownload blocks"""
+
+ self.log.info("Testing upgrade behaviour for pre-segwit node to segwit rules")
+ node = self.nodes[0]
+
+ # Node hasn't been used or connected yet
+ assert_equal(node.getblockcount(), 0)
+
+ assert not softfork_active(node, "segwit")
+
+ # Generate 8 blocks without witness data
+ node.generate(8)
+ assert_equal(node.getblockcount(), 8)
+
+ self.stop_node(0)
+ # Restarting the node (with segwit activation height set to 5) should result in a shutdown
+ # because the blockchain consists of 3 insufficiently validated blocks per segwit consensus rules.
+ node.assert_start_raises_init_error(
+ extra_args=["-segwitheight=5"],
+ expected_msg=": Witness data for blocks after height 5 requires validation. Please restart with -reindex..\nPlease restart with -reindex or -reindex-chainstate to recover.")
+
+ # As directed, the user restarts the node with -reindex
+ self.start_node(0, extra_args=["-reindex", "-segwitheight=5"])
+
+ # With the segwit consensus rules, the node is able to validate only up to block 4
+ assert_equal(node.getblockcount(), 4)
+
+ # The upgraded node should now have segwit activated
+ assert softfork_active(node, "segwit")
+
+
+if __name__ == '__main__':
+ SegwitUpgradeTest().main()
diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py
index 162814815e..2fb5e328f5 100755
--- a/test/functional/feature_proxy.py
+++ b/test/functional/feature_proxy.py
@@ -97,14 +97,14 @@ class ProxyTest(BitcoinTestFramework):
# Note: proxies are not used to connect to local nodes. This is because the proxy to
# use is based on CService.GetNetwork(), which returns NET_UNROUTABLE for localhost.
args = [
- ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
- ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),
- '-i2psam=%s:%i' % (self.i2p_sam), '-i2pacceptincoming=0', '-proxyrandomize=0'],
- ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
+ ['-listen', f'-proxy={self.conf1.addr[0]}:{self.conf1.addr[1]}','-proxyrandomize=1'],
+ ['-listen', f'-proxy={self.conf1.addr[0]}:{self.conf1.addr[1]}',f'-onion={self.conf2.addr[0]}:{self.conf2.addr[1]}',
+ f'-i2psam={self.i2p_sam[0]}:{self.i2p_sam[1]}', '-i2pacceptincoming=0', '-proxyrandomize=0'],
+ ['-listen', f'-proxy={self.conf2.addr[0]}:{self.conf2.addr[1]}','-proxyrandomize=1'],
[]
]
if self.have_ipv6:
- args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
+ args[3] = ['-listen', f'-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}','-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
@@ -116,7 +116,7 @@ class ProxyTest(BitcoinTestFramework):
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
addr = "15.61.23.23:1234"
- self.log.debug("Test: outgoing IPv4 connection through node for address {}".format(addr))
+ self.log.debug(f"Test: outgoing IPv4 connection through node for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[0].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -132,7 +132,7 @@ class ProxyTest(BitcoinTestFramework):
if self.have_ipv6:
addr = "[1233:3432:2434:2343:3234:2345:6546:4534]:5443"
- self.log.debug("Test: outgoing IPv6 connection through node for address {}".format(addr))
+ self.log.debug(f"Test: outgoing IPv6 connection through node for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[1].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -148,7 +148,7 @@ class ProxyTest(BitcoinTestFramework):
if test_onion:
addr = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:8333"
- self.log.debug("Test: outgoing onion connection through node for address {}".format(addr))
+ self.log.debug(f"Test: outgoing onion connection through node for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[2].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -162,7 +162,7 @@ class ProxyTest(BitcoinTestFramework):
self.network_test(node, addr, network=NET_ONION)
addr = "node.noumenon:8333"
- self.log.debug("Test: outgoing DNS name connection through node for address {}".format(addr))
+ self.log.debug(f"Test: outgoing DNS name connection through node for address {addr}")
node.addnode(addr, "onetry")
cmd = proxies[3].queue.get()
assert isinstance(cmd, Socks5Command)
@@ -218,12 +218,12 @@ class ProxyTest(BitcoinTestFramework):
n1 = networks_dict(self.nodes[1].getnetworkinfo())
assert_equal(NETWORKS, n1.keys())
for net in ['ipv4', 'ipv6']:
- assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
+ assert_equal(n1[net]['proxy'], f'{self.conf1.addr[0]}:{self.conf1.addr[1]}')
assert_equal(n1[net]['proxy_randomize_credentials'], False)
- assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
+ assert_equal(n1['onion']['proxy'], f'{self.conf2.addr[0]}:{self.conf2.addr[1]}')
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
- assert_equal(n1['i2p']['proxy'], '%s:%i' % (self.i2p_sam))
+ assert_equal(n1['i2p']['proxy'], f'{self.i2p_sam[0]}:{self.i2p_sam[1]}')
assert_equal(n1['i2p']['proxy_randomize_credentials'], False)
assert_equal(n1['i2p']['reachable'], True)
@@ -234,7 +234,7 @@ class ProxyTest(BitcoinTestFramework):
expected_proxy = ''
expected_randomize = False
else:
- expected_proxy = '%s:%i' % (self.conf2.addr)
+ expected_proxy = f'{self.conf2.addr[0]}:{self.conf2.addr[1]}'
expected_randomize = True
assert_equal(n2[net]['proxy'], expected_proxy)
assert_equal(n2[net]['proxy_randomize_credentials'], expected_randomize)
@@ -248,7 +248,7 @@ class ProxyTest(BitcoinTestFramework):
if net == NET_I2P:
expected_proxy = ''
else:
- expected_proxy = '[%s]:%i' % (self.conf3.addr)
+ expected_proxy = f'[{self.conf3.addr[0]}]:{self.conf3.addr[1]}'
assert_equal(n3[net]['proxy'], expected_proxy)
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index cedb7b57ca..2f0868e733 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -127,10 +127,28 @@ class PruneTest(BitcoinTestFramework):
self.sync_blocks(self.nodes[0:5])
+ def test_invalid_command_line_options(self):
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg='Error: Prune cannot be configured with a negative value.',
+ extra_args=['-prune=-1'],
+ )
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg='Error: Prune configured below the minimum of 550 MiB. Please use a higher number.',
+ extra_args=['-prune=549'],
+ )
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg='Error: Prune mode is incompatible with -txindex.',
+ extra_args=['-prune=550', '-txindex'],
+ )
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg='Error: Prune mode is incompatible with -coinstatsindex.',
+ extra_args=['-prune=550', '-coinstatsindex'],
+ )
+
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
- self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
+ self.log.info(f"Though we're already using more than 550MiB, current usage: {calc_usage(self.prunedir)}")
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
@@ -140,7 +158,7 @@ class PruneTest(BitcoinTestFramework):
self.log.info("Success")
usage = calc_usage(self.prunedir)
- self.log.info("Usage should be below target: %d" % usage)
+ self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
@@ -163,18 +181,18 @@ class PruneTest(BitcoinTestFramework):
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
- self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
+ self.log.info(f"Usage can be over target because of high stale rate: {calc_usage(self.prunedir)}")
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
- self.log.info("Current block height: %d" % height)
+ self.log.info(f"Current block height: {height}")
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
- self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
+ self.log.info(f"Invalidating block {self.forkhash} at height {self.forkheight}")
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
@@ -186,7 +204,7 @@ class PruneTest(BitcoinTestFramework):
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
- self.log.info("New best height: %d" % self.nodes[1].getblockcount())
+ self.log.info(f"New best height: {self.nodes[1].getblockcount()}")
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
@@ -200,8 +218,8 @@ class PruneTest(BitcoinTestFramework):
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
- self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
- self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
+ self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}")
+ self.log.info(f"Usage possibly still high because of stale blocks in block files: {calc_usage(self.prunedir)}")
self.log.info("Mine 220 more large blocks so we have requisite history")
@@ -209,7 +227,7 @@ class PruneTest(BitcoinTestFramework):
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
- self.log.info("Usage should be below target: %d" % usage)
+ self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def reorg_back(self):
@@ -217,7 +235,7 @@ class PruneTest(BitcoinTestFramework):
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
- self.log.info("Will need to redownload block %d" % self.forkheight)
+ self.log.info(f"Will need to redownload block {self.forkheight}")
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
@@ -241,7 +259,7 @@ class PruneTest(BitcoinTestFramework):
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
- self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
+ self.log.info(f"Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {blocks_to_mine}")
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
@@ -278,7 +296,7 @@ class PruneTest(BitcoinTestFramework):
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
- return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index)))
+ return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat"))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
@@ -453,6 +471,9 @@ class PruneTest(BitcoinTestFramework):
self.log.info("Test wallet re-scan")
self.wallet_test()
+ self.log.info("Test invalid pruning command line options")
+ self.test_invalid_command_line_options()
+
self.log.info("Done")
if __name__ == '__main__':
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index ed944274e3..cb7556feb4 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -4,6 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
+from copy import deepcopy
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
@@ -22,49 +23,6 @@ from test_framework.script_util import DUMMY_P2WPKH_SCRIPT, DUMMY_2_P2WPKH_SCRIP
from test_framework.wallet import MiniWallet
MAX_REPLACEMENT_LIMIT = 100
-
-
-def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
- """Create a txout with a given amount and scriptPubKey
-
- Mines coins as needed.
-
- confirmed - txouts created will be confirmed in the blockchain;
- unconfirmed otherwise.
- """
- fee = 1 * COIN
- while node.getbalance() < satoshi_round((amount + fee) / COIN):
- node.generate(COINBASE_MATURITY)
-
- new_addr = node.getnewaddress()
- txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN))
- tx1 = node.getrawtransaction(txid, 1)
- txid = int(txid, 16)
- i, _ = next(filter(lambda vout: new_addr == vout[1]['scriptPubKey']['address'], enumerate(tx1['vout'])))
-
- tx2 = CTransaction()
- tx2.vin = [CTxIn(COutPoint(txid, i))]
- tx2.vout = [CTxOut(amount, scriptPubKey)]
- tx2.rehash()
-
- signed_tx = node.signrawtransactionwithwallet(tx2.serialize().hex())
-
- txid = node.sendrawtransaction(signed_tx['hex'], 0)
-
- # If requested, ensure txouts are confirmed.
- if confirmed:
- mempool_size = len(node.getrawmempool())
- while mempool_size > 0:
- node.generate(1)
- new_size = len(node.getrawmempool())
- # Error out if we have something stuck in the mempool, as this
- # would likely be a bug.
- assert new_size < mempool_size
- mempool_size = new_size
-
- return COutPoint(int(txid, 16), 0)
-
-
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
@@ -84,10 +42,11 @@ class ReplaceByFeeTest(BitcoinTestFramework):
self.skip_if_no_wallet()
def run_test(self):
- make_utxo(self.nodes[0], 1 * COIN)
-
- # Ensure nodes are synced
- self.sync_all()
+ self.wallet = MiniWallet(self.nodes[0])
+ # the pre-mined test framework chain contains coinbase outputs to the
+ # MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks
+ # 76-100 (see method BitcoinTestFramework._initialize_chain())
+ self.wallet.scan_blocks(start=76, num=2)
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
@@ -127,26 +86,59 @@ class ReplaceByFeeTest(BitcoinTestFramework):
self.log.info("Passed")
+ def make_utxo(self, node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
+ """Create a txout with a given amount and scriptPubKey
+
+ Mines coins as needed.
+
+ confirmed - txouts created will be confirmed in the blockchain;
+ unconfirmed otherwise.
+ """
+ fee = 1 * COIN
+ while node.getbalance() < satoshi_round((amount + fee) / COIN):
+ self.generate(node, COINBASE_MATURITY)
+
+ new_addr = node.getnewaddress()
+ txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN))
+ tx1 = node.getrawtransaction(txid, 1)
+ txid = int(txid, 16)
+ i, _ = next(filter(lambda vout: new_addr == vout[1]['scriptPubKey']['address'], enumerate(tx1['vout'])))
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(COutPoint(txid, i))]
+ tx2.vout = [CTxOut(amount, scriptPubKey)]
+ tx2.rehash()
+
+ signed_tx = node.signrawtransactionwithwallet(tx2.serialize().hex())
+
+ txid = node.sendrawtransaction(signed_tx['hex'], 0)
+
+ # If requested, ensure txouts are confirmed.
+ if confirmed:
+ mempool_size = len(node.getrawmempool())
+ while mempool_size > 0:
+ self.generate(node, 1)
+ new_size = len(node.getrawmempool())
+ # Error out if we have something stuck in the mempool, as this
+ # would likely be a bug.
+ assert new_size < mempool_size
+ mempool_size = new_size
+
+ return COutPoint(int(txid, 16), 0)
+
def test_simple_doublespend(self):
"""Simple doublespend"""
- tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ # we use MiniWallet to create a transaction template with inputs correctly set,
+ # and modify the output (amount, scriptPubKey) according to our needs
+ tx_template = self.wallet.create_self_transfer(from_node=self.nodes[0])['tx']
- # make_utxo may have generated a bunch of blocks, so we need to sync
- # before we can spend the coins generated, or else the resulting
- # transactions might not be accepted by our peers.
- self.sync_all()
-
- tx1a = CTransaction()
- tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a = deepcopy(tx_template)
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
- self.sync_all()
-
# Should fail because we haven't changed the fee
- tx1b = CTransaction()
- tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b = deepcopy(tx_template)
tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
@@ -154,9 +146,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 BTC fee
- tx1b = CTransaction()
- tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
- tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
+ tx1b.vout[0].nValue -= int(0.1 * COIN)
tx1b_hex = tx1b.serialize().hex()
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
@@ -172,7 +162,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
"""Doublespend of a long chain"""
initial_nValue = 50 * COIN
- tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+ tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
@@ -212,7 +202,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50 * COIN
- tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+ tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001 * COIN, _total_txs=None):
if _total_txs is None:
@@ -275,7 +265,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.0001 * COIN)
- tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+ tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
@@ -292,7 +282,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
- tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
@@ -312,8 +302,8 @@ class ReplaceByFeeTest(BitcoinTestFramework):
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
- utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN))
- utxo2 = make_utxo(self.nodes[0], 3 * COIN)
+ utxo1 = self.make_utxo(self.nodes[0], int(1.2 * COIN))
+ utxo2 = self.make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
@@ -352,8 +342,8 @@ class ReplaceByFeeTest(BitcoinTestFramework):
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
- confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN))
- unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False)
+ confirmed_utxo = self.make_utxo(self.nodes[0], int(1.1 * COIN))
+ unconfirmed_utxo = self.make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
@@ -376,7 +366,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# Start by creating a single transaction with many outputs
initial_nValue = 10 * COIN
- utxo = make_utxo(self.nodes[0], initial_nValue)
+ utxo = self.make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001 * COIN)
split_value = int((initial_nValue - fee) / (MAX_REPLACEMENT_LIMIT + 1))
@@ -424,7 +414,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
- tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
@@ -445,7 +435,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
- tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
@@ -501,7 +491,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
- tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
@@ -527,7 +517,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
- tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
+ tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
@@ -574,12 +564,10 @@ class ReplaceByFeeTest(BitcoinTestFramework):
assert_equal(json1["vin"][0]["sequence"], 4294967294)
def test_no_inherited_signaling(self):
- wallet = MiniWallet(self.nodes[0])
- wallet.scan_blocks(start=76, num=1)
- confirmed_utxo = wallet.get_utxo()
+ confirmed_utxo = self.wallet.get_utxo()
# Create an explicitly opt-in parent transaction
- optin_parent_tx = wallet.send_self_transfer(
+ optin_parent_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
@@ -587,7 +575,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
)
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
- replacement_parent_tx = wallet.create_self_transfer(
+ replacement_parent_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
@@ -601,8 +589,8 @@ class ReplaceByFeeTest(BitcoinTestFramework):
assert_equal(res['allowed'], True)
# Create an opt-out child tx spending the opt-in parent
- parent_utxo = wallet.get_utxo(txid=optin_parent_tx['txid'])
- optout_child_tx = wallet.send_self_transfer(
+ parent_utxo = self.wallet.get_utxo(txid=optin_parent_tx['txid'])
+ optout_child_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
@@ -612,7 +600,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# Reports true due to inheritance
assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable'])
- replacement_child_tx = wallet.create_self_transfer(
+ replacement_child_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
@@ -630,10 +618,19 @@ class ReplaceByFeeTest(BitcoinTestFramework):
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0)
+ self.log.info('Check that the child tx can still be replaced (via a tx that also replaces the parent)')
+ replacement_parent_tx = self.wallet.send_self_transfer(
+ from_node=self.nodes[0],
+ utxo_to_spend=confirmed_utxo,
+ sequence=0xffffffff,
+ fee_rate=Decimal('0.03'),
+ )
+ # Check that child is removed and update wallet utxo state
+ assert_raises_rpc_error(-5, 'Transaction not in mempool', self.nodes[0].getmempoolentry, optout_child_tx['txid'])
+ self.wallet.get_utxo(txid=optout_child_tx['txid'])
+
def test_replacement_relay_fee(self):
- wallet = MiniWallet(self.nodes[0])
- wallet.scan_blocks(start=77, num=1)
- tx = wallet.send_self_transfer(from_node=self.nodes[0])['tx']
+ tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['tx']
# Higher fee, higher feerate, different txid, but the replacement does not provide a relay
# fee conforming to node's `incrementalrelayfee` policy of 1000 sat per KB.
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 42910904d7..6f1cecce58 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -23,7 +23,6 @@ from test_framework.messages import (
CTransaction,
CTxIn,
CTxOut,
- sha256,
tx_from_hex,
)
from test_framework.script import (
@@ -34,19 +33,19 @@ from test_framework.script import (
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
- OP_DUP,
- OP_EQUAL,
- OP_EQUALVERIFY,
- OP_HASH160,
OP_TRUE,
- hash160,
+)
+from test_framework.script_util import (
+ key_to_p2pkh_script,
+ key_to_p2wpkh_script,
+ script_to_p2sh_script,
+ script_to_p2wsh_script,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_is_hex_string,
assert_raises_rpc_error,
- hex_str_to_bytes,
try_rpc,
)
@@ -66,7 +65,7 @@ def find_spendable_utxo(node, min_value):
if utxo['spendable']:
return utxo
- raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
+ raise AssertionError(f"Unspent output equal or higher than {min_value} not found")
txs_mined = {} # txindex from txid to blockhash
@@ -140,7 +139,7 @@ class SegWitTest(BitcoinTestFramework):
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
- multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
+ multiscript = CScript([OP_1, bytes.fromhex(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
@@ -260,8 +259,8 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(int(self.nodes[0].getmempoolentry(txid1)["wtxid"], 16), tx1.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid1)
- assert_equal(self.nodes[0].getmempoolentry(txid1)["vsize"], (self.nodes[0].getmempoolentry(txid1)["weight"] + 3) // 4)
- assert_equal(self.nodes[0].getmempoolentry(txid1)["weight"], len(tx1.serialize_without_witness())*3 + len(tx1.serialize_with_witness()))
+ assert_equal(self.nodes[0].getmempoolentry(txid1)["vsize"], tx1.get_vsize())
+ assert_equal(self.nodes[0].getmempoolentry(txid1)["weight"], tx1.get_weight())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
@@ -276,8 +275,8 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(int(self.nodes[0].getmempoolentry(txid2)["wtxid"], 16), tx.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid2)
- assert_equal(self.nodes[0].getmempoolentry(txid2)["vsize"], (self.nodes[0].getmempoolentry(txid2)["weight"] + 3) // 4)
- assert_equal(self.nodes[0].getmempoolentry(txid2)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness()))
+ assert_equal(self.nodes[0].getmempoolentry(txid2)["vsize"], tx.get_vsize())
+ assert_equal(self.nodes[0].getmempoolentry(txid2)["weight"], tx.get_weight())
# Now create tx3, which will spend from txid2
tx = CTransaction()
@@ -299,8 +298,8 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Check that weight and vsize are properly reported in mempool entry (txid3)
- assert_equal(self.nodes[0].getmempoolentry(txid3)["vsize"], (self.nodes[0].getmempoolentry(txid3)["weight"] + 3) // 4)
- assert_equal(self.nodes[0].getmempoolentry(txid3)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness()))
+ assert_equal(self.nodes[0].getmempoolentry(txid3)["vsize"], tx.get_vsize())
+ assert_equal(self.nodes[0].getmempoolentry(txid3)["weight"], tx.get_weight())
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
@@ -352,8 +351,8 @@ class SegWitTest(BitcoinTestFramework):
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
- script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
- solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
+ script = CScript([OP_2, bytes.fromhex(pubkeys[3]), bytes.fromhex(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
+ solvable_after_importaddress.append(script_to_p2sh_script(script))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
@@ -426,11 +425,11 @@ class SegWitTest(BitcoinTestFramework):
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
- unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
- unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
- unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
- p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
- p2wshop1 = CScript([OP_0, sha256(op1)])
+ unsolvable_address_key = bytes.fromhex("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
+ unsolvablep2pkh = key_to_p2pkh_script(unsolvable_address_key)
+ unsolvablep2wshp2pkh = script_to_p2wsh_script(unsolvablep2pkh)
+ p2shop0 = script_to_p2sh_script(op0)
+ p2wshop1 = script_to_p2wsh_script(op1)
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
@@ -448,18 +447,18 @@ class SegWitTest(BitcoinTestFramework):
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
- bare = hex_str_to_bytes(v['hex'])
+ bare = bytes.fromhex(v['hex'])
importlist.append(bare.hex())
- importlist.append(CScript([OP_0, sha256(bare)]).hex())
+ importlist.append(script_to_p2wsh_script(bare).hex())
else:
- pubkey = hex_str_to_bytes(v['pubkey'])
+ pubkey = bytes.fromhex(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
- p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
+ p2pkh = key_to_p2pkh_script(pubkey)
importlist.append(p2pk.hex())
importlist.append(p2pkh.hex())
- importlist.append(CScript([OP_0, hash160(pubkey)]).hex())
- importlist.append(CScript([OP_0, sha256(p2pk)]).hex())
- importlist.append(CScript([OP_0, sha256(p2pkh)]).hex())
+ importlist.append(key_to_p2wpkh_script(pubkey).hex())
+ importlist.append(script_to_p2wsh_script(p2pk).hex())
+ importlist.append(script_to_p2wsh_script(p2pkh).hex())
importlist.append(unsolvablep2pkh.hex())
importlist.append(unsolvablep2wshp2pkh.hex())
@@ -612,24 +611,24 @@ class SegWitTest(BitcoinTestFramework):
return txid
def p2sh_address_to_script(self, v):
- bare = CScript(hex_str_to_bytes(v['hex']))
- p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
- p2wsh = CScript([OP_0, sha256(bare)])
- p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
+ bare = CScript(bytes.fromhex(v['hex']))
+ p2sh = CScript(bytes.fromhex(v['scriptPubKey']))
+ p2wsh = script_to_p2wsh_script(bare)
+ p2sh_p2wsh = script_to_p2sh_script(p2wsh)
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self, v):
- pubkey = hex_str_to_bytes(v['pubkey'])
- p2wpkh = CScript([OP_0, hash160(pubkey)])
- p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
+ pubkey = bytes.fromhex(v['pubkey'])
+ p2wpkh = key_to_p2wpkh_script(pubkey)
+ p2sh_p2wpkh = script_to_p2sh_script(p2wpkh)
p2pk = CScript([pubkey, OP_CHECKSIG])
- p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
- p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
- p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
- p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
- p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
- p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
- p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
+ p2pkh = CScript(bytes.fromhex(v['scriptPubKey']))
+ p2sh_p2pk = script_to_p2sh_script(p2pk)
+ p2sh_p2pkh = script_to_p2sh_script(p2pkh)
+ p2wsh_p2pk = script_to_p2wsh_script(p2pk)
+ p2wsh_p2pkh = script_to_p2wsh_script(p2pkh)
+ p2sh_p2wsh_p2pk = script_to_p2sh_script(p2wsh_p2pk)
+ p2sh_p2wsh_p2pkh = script_to_p2sh_script(p2wsh_p2pkh)
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success=True):
diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py
index 5a0236401d..26048d37f6 100755
--- a/test/functional/feature_settings.py
+++ b/test/functional/feature_settings.py
@@ -83,7 +83,7 @@ class SettingsTest(BitcoinTestFramework):
with altsettings.open("w") as fp:
fp.write('{"key": "value"}')
with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']):
- self.start_node(0, extra_args=["-settings={}".format(altsettings)])
+ self.start_node(0, extra_args=[f"-settings={altsettings}"])
self.stop_node(0)
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index 99283b69b0..f27ab2057c 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -57,7 +57,6 @@ from test_framework.script import (
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
- OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
@@ -76,12 +75,17 @@ from test_framework.script import (
is_op_success,
taproot_construct,
)
+from test_framework.script_util import (
+ key_to_p2wpkh_script,
+ keyhash_to_p2pkh_script,
+ script_to_p2sh_script,
+ script_to_p2wsh_script,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
- sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
@@ -458,13 +462,13 @@ def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
- spk = CScript([OP_0, pubkeyhash])
- conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
+ spk = key_to_p2wpkh_script(pkh)
+ conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
- spk = CScript([OP_0, sha256(script)])
+ spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
@@ -475,7 +479,7 @@ def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
- spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
+ spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
@@ -496,7 +500,7 @@ def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
- spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
+ spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
@@ -518,9 +522,9 @@ def add_spender(spenders, *args, **kwargs):
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
- if (opcode == OP_CHECKSIGVERIFY):
+ if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
- elif (opcode == OP_CHECKSIGADD):
+ elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
@@ -1189,19 +1193,36 @@ def dump_json_test(tx, input_utxos, idx, success, failure):
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
+
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
+ parser.add_argument("--previous_release", dest="previous_release", default=False, action="store_true",
+ help="Use a previous release as taproot-inactive node")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
+ if self.options.previous_release:
+ self.skip_if_no_previous_releases()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
- self.extra_args = [["-par=1", "-vbparams=taproot:1:1"], ["-par=1"]]
+ self.extra_args = [["-par=1"], ["-par=1"]]
+ if self.options.previous_release:
+ self.wallet_names = [None, self.default_wallet_name]
+ else:
+ self.extra_args[0].append("-vbparams=taproot:1:1")
+
+ def setup_nodes(self):
+ self.add_nodes(self.num_nodes, self.extra_args, versions=[
+ 200100 if self.options.previous_release else None,
+ None,
+ ])
+ self.start_nodes()
+ self.import_deterministic_coinbase_privkeys()
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
@@ -1223,7 +1244,7 @@ class TaprootTest(BitcoinTestFramework):
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
- if (accept):
+ if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py
index 1c9e237d78..a7ec5c48e3 100755
--- a/test/functional/feature_versionbits_warning.py
+++ b/test/functional/feature_versionbits_warning.py
@@ -21,7 +21,7 @@ VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
-WARN_UNKNOWN_RULES_ACTIVE = "Unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
+WARN_UNKNOWN_RULES_ACTIVE = f"Unknown new rules activated (versionbit {VB_UNKNOWN_BIT})"
VB_PATTERN = re.compile("Unknown new rules activated.*versionbit")
class VersionBitsWarningTest(BitcoinTestFramework):
@@ -34,7 +34,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
- self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
+ self.extra_args = [[f"-alertnotify=echo %s >> \"{self.alert_filename}\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
index 22eec59600..4b5363ec49 100755
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -5,6 +5,7 @@
"""Test bitcoin-cli"""
from decimal import Decimal
+import re
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
@@ -29,6 +30,41 @@ TOO_MANY_ARGS = 'error: too many arguments (maximum 2 for nblocks and maxtries)'
WALLET_NOT_LOADED = 'Requested wallet does not exist or is not loaded'
WALLET_NOT_SPECIFIED = 'Wallet file not specified'
+
+def cli_get_info_string_to_dict(cli_get_info_string):
+ """Helper method to convert human-readable -getinfo into a dictionary"""
+ cli_get_info = {}
+ lines = cli_get_info_string.splitlines()
+ line_idx = 0
+ ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
+ while line_idx < len(lines):
+ # Remove ansi colour code
+ line = ansi_escape.sub('', lines[line_idx])
+ if "Balances" in line:
+ # When "Balances" appears in a line, all of the following lines contain "balance: wallet" until an empty line
+ cli_get_info["Balances"] = {}
+ while line_idx < len(lines) and not (lines[line_idx + 1] == ''):
+ line_idx += 1
+ balance, wallet = lines[line_idx].strip().split(" ")
+ # Remove right justification padding
+ wallet = wallet.strip()
+ if wallet == '""':
+ # Set default wallet("") to empty string
+ wallet = ''
+ cli_get_info["Balances"][wallet] = balance.strip()
+ elif ": " in line:
+ key, value = line.split(": ")
+ if key == 'Wallet' and value == '""':
+ # Set default wallet("") to empty string
+ value = ''
+ if key == "Proxy" and value == "N/A":
+ # Set N/A to empty string to represent no proxy
+ value = ''
+ cli_get_info[key.strip()] = value.strip()
+ line_idx += 1
+ return cli_get_info
+
+
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
@@ -51,12 +87,12 @@ class TestBitcoinCli(BitcoinTestFramework):
user, password = get_auth_cookie(self.nodes[0].datadir, self.chain)
self.log.info("Test -stdinrpcpass option")
- assert_equal(BLOCKS, self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input=password).getblockcount())
- assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdinrpcpass', input='foo').echo)
+ assert_equal(BLOCKS, self.nodes[0].cli(f'-rpcuser={user}', '-stdinrpcpass', input=password).getblockcount())
+ assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(f'-rpcuser={user}', '-stdinrpcpass', input='foo').echo)
self.log.info("Test -stdin and -stdinrpcpass")
- assert_equal(['foo', 'bar'], self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input=password + '\nfoo\nbar').echo())
- assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli('-rpcuser={}'.format(user), '-stdin', '-stdinrpcpass', input='foo').echo)
+ assert_equal(['foo', 'bar'], self.nodes[0].cli(f'-rpcuser={user}', '-stdin', '-stdinrpcpass', input=f'{password}\nfoo\nbar').echo())
+ assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(f'-rpcuser={user}', '-stdin', '-stdinrpcpass', input='foo').echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
@@ -67,37 +103,43 @@ class TestBitcoinCli(BitcoinTestFramework):
self.log.info("Test -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
+ self.log.info("Test -getinfo with -color=never does not return ANSI escape codes")
+ assert "\u001b[0m" not in self.nodes[0].cli('-getinfo', '-color=never').send_cli()
+
+ self.log.info("Test -getinfo with -color=always returns ANSI escape codes")
+ assert "\u001b[0m" in self.nodes[0].cli('-getinfo', '-color=always').send_cli()
+
+ self.log.info("Test -getinfo with invalid value for -color option")
+ assert_raises_process_error(1, "Invalid value for -color option. Valid values: always, auto, never.", self.nodes[0].cli('-getinfo', '-color=foo').send_cli)
+
self.log.info("Test -getinfo returns expected network and blockchain info")
if self.is_wallet_compiled():
self.nodes[0].encryptwallet(password)
- cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info_string = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
- assert_equal(cli_get_info['version'], network_info['version'])
- assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
- assert_equal(cli_get_info['headers'], blockchain_info['headers'])
- assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
- assert_equal(
- cli_get_info['connections'],
- {
- 'in': network_info['connections_in'],
- 'out': network_info['connections_out'],
- 'total': network_info['connections']
- }
- )
- assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
- assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
- assert_equal(cli_get_info['chain'], blockchain_info['chain'])
+ assert_equal(int(cli_get_info['Version']), network_info['version'])
+ assert_equal(cli_get_info['Verification progress'], "%.4f%%" % (blockchain_info['verificationprogress'] * 100))
+ assert_equal(int(cli_get_info['Blocks']), blockchain_info['blocks'])
+ assert_equal(int(cli_get_info['Headers']), blockchain_info['headers'])
+ assert_equal(int(cli_get_info['Time offset (s)']), network_info['timeoffset'])
+ expected_network_info = f"in {network_info['connections_in']}, out {network_info['connections_out']}, total {network_info['connections']}"
+ assert_equal(cli_get_info["Network"], expected_network_info)
+ assert_equal(cli_get_info['Proxy'], network_info['networks'][0]['proxy'])
+ assert_equal(Decimal(cli_get_info['Difficulty']), blockchain_info['difficulty'])
+ assert_equal(cli_get_info['Chain'], blockchain_info['chain'])
if self.is_wallet_compiled():
self.log.info("Test -getinfo and bitcoin-cli getwalletinfo return expected wallet info")
- assert_equal(cli_get_info['balance'], BALANCE)
- assert 'balances' not in cli_get_info.keys()
+ assert_equal(Decimal(cli_get_info['Balance']), BALANCE)
+ assert 'Balances' not in cli_get_info_string
wallet_info = self.nodes[0].getwalletinfo()
- assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
- assert_equal(cli_get_info['unlocked_until'], wallet_info['unlocked_until'])
- assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
- assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
+ assert_equal(int(cli_get_info['Keypool size']), wallet_info['keypoolsize'])
+ assert_equal(int(cli_get_info['Unlocked until']), wallet_info['unlocked_until'])
+ assert_equal(Decimal(cli_get_info['Transaction fee rate (-paytxfee) (BTC/kvB)']), wallet_info['paytxfee'])
+ assert_equal(Decimal(cli_get_info['Min tx relay fee rate (BTC/kvB)']), network_info['relayfee'])
assert_equal(self.nodes[0].cli.getwalletinfo(), wallet_info)
# Setup to test -getinfo, -generate, and -rpcwallet= with multiple wallets.
@@ -108,8 +150,8 @@ class TestBitcoinCli(BitcoinTestFramework):
w1 = self.nodes[0].get_wallet_rpc(wallets[0])
w2 = self.nodes[0].get_wallet_rpc(wallets[1])
w3 = self.nodes[0].get_wallet_rpc(wallets[2])
- rpcwallet2 = '-rpcwallet={}'.format(wallets[1])
- rpcwallet3 = '-rpcwallet={}'.format(wallets[2])
+ rpcwallet2 = f'-rpcwallet={wallets[1]}'
+ rpcwallet3 = f'-rpcwallet={wallets[2]}'
w1.walletpassphrase(password, self.rpc_timeout)
w2.encryptwallet(password)
w1.sendtoaddress(w2.getnewaddress(), amounts[1])
@@ -120,44 +162,57 @@ class TestBitcoinCli(BitcoinTestFramework):
self.log.info("Test -getinfo with multiple wallets and -rpcwallet returns specified wallet balance")
for i in range(len(wallets)):
- cli_get_info = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[i])).send_cli()
- assert 'balances' not in cli_get_info.keys()
- assert_equal(cli_get_info['balance'], amounts[i])
+ cli_get_info_string = self.nodes[0].cli('-getinfo', f'-rpcwallet={wallets[i]}').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balances' not in cli_get_info_string
+ assert_equal(cli_get_info["Wallet"], wallets[i])
+ assert_equal(Decimal(cli_get_info['Balance']), amounts[i])
self.log.info("Test -getinfo with multiple wallets and -rpcwallet=non-existing-wallet returns no balances")
- cli_get_info_keys = self.nodes[0].cli('-getinfo', '-rpcwallet=does-not-exist').send_cli().keys()
- assert 'balance' not in cli_get_info_keys
- assert 'balances' not in cli_get_info_keys
+ cli_get_info_string = self.nodes[0].cli('-getinfo', '-rpcwallet=does-not-exist').send_cli()
+ assert 'Balance' not in cli_get_info_string
+ assert 'Balances' not in cli_get_info_string
self.log.info("Test -getinfo with multiple wallets returns all loaded wallet names and balances")
assert_equal(set(self.nodes[0].listwallets()), set(wallets))
- cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
- assert 'balance' not in cli_get_info.keys()
- assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets, amounts)})
+ cli_get_info_string = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balance' not in cli_get_info
+ for k, v in zip(wallets, amounts):
+ assert_equal(Decimal(cli_get_info['Balances'][k]), v)
# Unload the default wallet and re-verify.
self.nodes[0].unloadwallet(wallets[0])
assert wallets[0] not in self.nodes[0].listwallets()
- cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
- assert 'balance' not in cli_get_info.keys()
- assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets[1:], amounts[1:])})
+ cli_get_info_string = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balance' not in cli_get_info
+ assert 'Balances' in cli_get_info_string
+ for k, v in zip(wallets[1:], amounts[1:]):
+ assert_equal(Decimal(cli_get_info['Balances'][k]), v)
+ assert wallets[0] not in cli_get_info
self.log.info("Test -getinfo after unloading all wallets except a non-default one returns its balance")
self.nodes[0].unloadwallet(wallets[2])
assert_equal(self.nodes[0].listwallets(), [wallets[1]])
- cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
- assert 'balances' not in cli_get_info.keys()
- assert_equal(cli_get_info['balance'], amounts[1])
+ cli_get_info_string = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balances' not in cli_get_info_string
+ assert_equal(cli_get_info['Wallet'], wallets[1])
+ assert_equal(Decimal(cli_get_info['Balance']), amounts[1])
self.log.info("Test -getinfo with -rpcwallet=remaining-non-default-wallet returns only its balance")
- cli_get_info = self.nodes[0].cli('-getinfo', rpcwallet2).send_cli()
- assert 'balances' not in cli_get_info.keys()
- assert_equal(cli_get_info['balance'], amounts[1])
+ cli_get_info_string = self.nodes[0].cli('-getinfo', rpcwallet2).send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balances' not in cli_get_info_string
+ assert_equal(cli_get_info['Wallet'], wallets[1])
+ assert_equal(Decimal(cli_get_info['Balance']), amounts[1])
self.log.info("Test -getinfo with -rpcwallet=unloaded wallet returns no balances")
- cli_get_info_keys = self.nodes[0].cli('-getinfo', rpcwallet3).send_cli().keys()
- assert 'balance' not in cli_get_info_keys
- assert 'balances' not in cli_get_info_keys
+ cli_get_info_string = self.nodes[0].cli('-getinfo', rpcwallet3).send_cli()
+ cli_get_info_keys = cli_get_info_string_to_dict(cli_get_info_string)
+ assert 'Balance' not in cli_get_info_keys
+ assert 'Balances' not in cli_get_info_string
# Test bitcoin-cli -generate.
n1 = 3
@@ -241,7 +296,7 @@ class TestBitcoinCli(BitcoinTestFramework):
self.log.info("Test -version with node stopped")
self.stop_node(0)
cli_response = self.nodes[0].cli('-version').send_cli()
- assert "{} RPC client version".format(self.config['environment']['PACKAGE_NAME']) in cli_response
+ assert f"{self.config['environment']['PACKAGE_NAME']} RPC client version" in cli_response
self.log.info("Test -rpcwait option successfully waits for RPC connection")
self.nodes[0].start() # start node without RPC connection
diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py
index d007490f80..075224c011 100755
--- a/test/functional/interface_http.py
+++ b/test/functional/interface_http.py
@@ -24,8 +24,8 @@ class HTTPBasicsTest (BitcoinTestFramework):
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
- authpair = url.username + ':' + url.password
- headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+ authpair = f'{url.username}:{url.password}'
+ headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
@@ -42,7 +42,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
- headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
+ headers = {"Authorization": f"Basic {str_to_b64str(authpair)}", "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
@@ -59,7 +59,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
conn.close()
#now do the same with "Connection: close"
- headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
+ headers = {"Authorization": f"Basic {str_to_b64str(authpair)}", "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
@@ -70,8 +70,8 @@ class HTTPBasicsTest (BitcoinTestFramework):
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
- authpair = urlNode1.username + ':' + urlNode1.password
- headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+ authpair = f'{urlNode1.username}:{urlNode1.password}'
+ headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
@@ -81,8 +81,8 @@ class HTTPBasicsTest (BitcoinTestFramework):
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
- authpair = urlNode2.username + ':' + urlNode2.password
- headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+ authpair = f'{urlNode2.username}:{urlNode2.password}'
+ headers = {"Authorization": f"Basic {str_to_b64str(authpair)}"}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
@@ -94,13 +94,13 @@ class HTTPBasicsTest (BitcoinTestFramework):
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
- conn.request('GET', '/' + ('x'*1000), '', headers)
+ conn.request('GET', f'/{"x"*1000}', '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
- conn.request('GET', '/' + ('x'*10000), '', headers)
+ conn.request('GET', f'/{"x"*10000}', '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
index e73ec90819..47c95f5673 100755
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -4,7 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
-import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
@@ -19,7 +18,6 @@ from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
- hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
@@ -59,7 +57,7 @@ class RESTTest (BitcoinTestFramework):
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
- self.log.debug('%s %s %s', http_method, rest_uri, body)
+ self.log.debug(f'{http_method} {rest_uri} {body}')
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
@@ -94,11 +92,11 @@ class RESTTest (BitcoinTestFramework):
self.log.info("Test the /tx URI")
- json_obj = self.test_rest_request("/tx/{}".format(txid))
+ json_obj = self.test_rest_request(f"/tx/{txid}")
assert_equal(json_obj['txid'], txid)
# Check hex format response
- hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
+ hex_response = self.test_rest_request(f"/tx/{txid}", req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
@@ -116,7 +114,7 @@ class RESTTest (BitcoinTestFramework):
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
- json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
+ json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}")
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
@@ -125,7 +123,7 @@ class RESTTest (BitcoinTestFramework):
self.log.info("Query a spent TXO using the /getutxos URI")
- json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
+ json_obj = self.test_rest_request(f"/getutxos/{spent[0]}-{spent[1]}")
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
@@ -138,7 +136,7 @@ class RESTTest (BitcoinTestFramework):
self.log.info("Query two TXOs using the /getutxos URI")
- json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
+ json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}/{spent[0]}-{spent[1]}")
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
@@ -147,7 +145,7 @@ class RESTTest (BitcoinTestFramework):
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
- bin_request += hex_str_to_bytes(txid)
+ bin_request += bytes.fromhex(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
@@ -165,32 +163,32 @@ class RESTTest (BitcoinTestFramework):
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
- json_obj = self.test_rest_request("/tx/{}".format(txid))
+ json_obj = self.test_rest_request(f"/tx/{txid}")
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
- json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
+ json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}")
assert_equal(len(json_obj['utxos']), 0)
- json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
+ json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spending[0]}-{spending[1]}")
assert_equal(len(json_obj['utxos']), 1)
- json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
+ json_obj = self.test_rest_request(f"/getutxos/{spent[0]}-{spent[1]}")
assert_equal(len(json_obj['utxos']), 1)
- json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
+ json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spent[0]}-{spent[1]}")
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
- json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
+ json_obj = self.test_rest_request(f"/getutxos/{spending[0]}-{spending[1]}")
assert_equal(len(json_obj['utxos']), 1)
- json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
+ json_obj = self.test_rest_request(f"/getutxos/checkmempool/{spending[0]}-{spending[1]}")
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
@@ -199,11 +197,11 @@ class RESTTest (BitcoinTestFramework):
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
- long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
- self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
+ long_uri = '/'.join([f"{txid}-{n_}" for n_ in range(20)])
+ self.test_rest_request(f"/getutxos/checkmempool/{long_uri}", http_method='POST', status=400, ret_type=RetType.OBJ)
- long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
- self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
+ long_uri = '/'.join([f'{txid}-{n_}' for n_ in range(15)])
+ self.test_rest_request(f"/getutxos/checkmempool/{long_uri}", http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
@@ -217,42 +215,42 @@ class RESTTest (BitcoinTestFramework):
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
- assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
- self.test_rest_request('/block/{}'.format(bb_hash))
+ assert_equal(self.test_rest_request(f'/headers/1/{bb_hash}'), [])
+ self.test_rest_request(f'/block/{bb_hash}')
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
- response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
+ response = self.test_rest_request(f"/block/{bb_hash}", req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
- response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
+ response_header = self.test_rest_request(f"/headers/1/{bb_hash}", req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
- response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
+ response_hex = self.test_rest_request(f"/block/{bb_hash}", req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
- assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
+ assert_equal(response_bytes.hex().encode(), response_hex_bytes)
# Compare with hex block header
- response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
+ response_header_hex = self.test_rest_request(f"/headers/1/{bb_hash}", req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
- assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
+ assert_equal(response_bytes[:BLOCK_HEADER_SIZE].hex().encode(), response_header_hex_bytes)
# Check json format
- block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
+ block_json_obj = self.test_rest_request(f"/block/{bb_hash}")
assert_equal(block_json_obj['hash'], bb_hash)
- assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
+ assert_equal(self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}")['blockhash'], bb_hash)
# Check hex/bin format
- resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
+ resp_hex = self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}", req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
- resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
+ resp_bytes = self.test_rest_request(f"/blockhashbyheight/{block_json_obj['height']}", req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
@@ -266,7 +264,7 @@ class RESTTest (BitcoinTestFramework):
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
- json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
+ json_obj = self.test_rest_request(f"/headers/1/{bb_hash}")
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
@@ -278,7 +276,7 @@ class RESTTest (BitcoinTestFramework):
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
- json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
+ json_obj = self.test_rest_request(f"/headers/5/{bb_hash}")
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
@@ -308,13 +306,13 @@ class RESTTest (BitcoinTestFramework):
self.sync_all()
# Check if the 3 tx show up in the new block
- json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
+ json_obj = self.test_rest_request(f"/block/{newblockhash[0]}")
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
- json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
+ json_obj = self.test_rest_request(f"/block/notxdetails/{newblockhash[0]}")
for tx in txs:
assert tx in json_obj['tx']
diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py
index 4d5666f414..89a7d29b24 100755
--- a/test/functional/interface_rpc.py
+++ b/test/functional/interface_rpc.py
@@ -16,7 +16,7 @@ def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
- raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
+ raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none")
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py
index 15f352d68c..6686f77a44 100755
--- a/test/functional/interface_zmq.py
+++ b/test/functional/interface_zmq.py
@@ -82,8 +82,8 @@ class ZMQTestSetupBlock:
raw transaction data.
"""
- def __init__(self, node):
- self.block_hash = node.generate(1)[0]
+ def __init__(self, test_framework, node):
+ self.block_hash = test_framework.generate(node, 1)[0]
coinbase = node.getblock(self.block_hash, 2)['tx'][0]
self.tx_hash = coinbase['txid']
self.raw_tx = coinbase['hex']
@@ -132,7 +132,7 @@ class ZMQTest (BitcoinTestFramework):
socket = self.ctx.socket(zmq.SUB)
subscribers.append(ZMQSubscriber(socket, topic.encode()))
- self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services] +
+ self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services] +
self.extra_args[0])
for i, sub in enumerate(subscribers):
@@ -147,7 +147,7 @@ class ZMQTest (BitcoinTestFramework):
for sub in subscribers:
sub.socket.set(zmq.RCVTIMEO, 1000)
while True:
- test_block = ZMQTestSetupBlock(self.nodes[0])
+ test_block = ZMQTestSetupBlock(self, self.nodes[0])
recv_failed = False
for sub in subscribers:
try:
@@ -184,7 +184,7 @@ class ZMQTest (BitcoinTestFramework):
rawtx = subs[3]
num_blocks = 5
- self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
+ self.log.info(f"Generate {num_blocks} blocks (and {num_blocks} coinbase txes)")
genhashes = self.nodes[0].generatetoaddress(num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
@@ -504,7 +504,7 @@ class ZMQTest (BitcoinTestFramework):
if mempool_sequence is not None:
zmq_mem_seq = mempool_sequence
if zmq_mem_seq > get_raw_seq:
- raise Exception("We somehow jumped mempool sequence numbers! zmq_mem_seq: {} > get_raw_seq: {}".format(zmq_mem_seq, get_raw_seq))
+ raise Exception(f"We somehow jumped mempool sequence numbers! zmq_mem_seq: {zmq_mem_seq} > get_raw_seq: {get_raw_seq}")
# 4) Moving forward, we apply the delta to our local view
# remaining txs(5) + 1 rbf(A+R) + 1 block connect + 1 final tx
@@ -520,7 +520,7 @@ class ZMQTest (BitcoinTestFramework):
assert mempool_sequence > expected_sequence
r_gap += mempool_sequence - expected_sequence
else:
- raise Exception("WARNING: txhash has unexpected mempool sequence value: {} vs expected {}".format(mempool_sequence, expected_sequence))
+ raise Exception(f"WARNING: txhash has unexpected mempool sequence value: {mempool_sequence} vs expected {expected_sequence}")
if label == "A":
assert hash_str not in mempool_view
mempool_view.add(hash_str)
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 1705d957aa..97d29ff197 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -13,22 +13,24 @@ from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
+ CTxIn,
CTxOut,
- MAX_BLOCK_BASE_SIZE,
+ MAX_BLOCK_WEIGHT,
MAX_MONEY,
tx_from_hex,
)
from test_framework.script import (
- hash160,
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
- OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
+from test_framework.script_util import (
+ script_to_p2sh_script,
+)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
@@ -207,7 +209,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.log.info('A really large transaction')
tx = tx_from_hex(raw_tx_reference)
- tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
+ tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_WEIGHT // 4 / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
@@ -247,6 +249,14 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
rawtxs=[tx.serialize().hex()],
)
+ self.log.info('A non-coinbase transaction with coinbase-like outpoint')
+ tx = tx_from_hex(raw_tx_reference)
+ tx.vin.append(CTxIn(COutPoint(hash=0, n=0xffffffff)))
+ self.check_mempool_result(
+ result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-prevout-null'}],
+ rawtxs=[tx.serialize().hex()],
+ )
+
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
@@ -291,7 +301,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
- output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
+ output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=script_to_p2sh_script(b'burn'))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
diff --git a/test/functional/mempool_accept_wtxid.py b/test/functional/mempool_accept_wtxid.py
new file mode 100755
index 0000000000..82752f2d70
--- /dev/null
+++ b/test/functional/mempool_accept_wtxid.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test mempool acceptance in case of an already known transaction
+with identical non-witness data but different witness.
+"""
+
+from copy import deepcopy
+from test_framework.messages import (
+ COIN,
+ COutPoint,
+ CTransaction,
+ CTxIn,
+ CTxInWitness,
+ CTxOut,
+ sha256,
+)
+from test_framework.p2p import P2PTxInvStore
+from test_framework.script import (
+ CScript,
+ OP_0,
+ OP_ELSE,
+ OP_ENDIF,
+ OP_EQUAL,
+ OP_HASH160,
+ OP_IF,
+ OP_TRUE,
+ hash160,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+class MempoolWtxidTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def run_test(self):
+ node = self.nodes[0]
+
+ self.log.info('Start with empty mempool and 101 blocks')
+ # The last 100 coinbase transactions are premature
+ blockhash = node.generate(101)[0]
+ txid = node.getblock(blockhash=blockhash, verbosity=2)["tx"][0]["txid"]
+ assert_equal(node.getmempoolinfo()['size'], 0)
+
+ self.log.info("Submit parent with multiple script branches to mempool")
+ hashlock = hash160(b'Preimage')
+ witness_script = CScript([OP_IF, OP_HASH160, hashlock, OP_EQUAL, OP_ELSE, OP_TRUE, OP_ENDIF])
+ witness_program = sha256(witness_script)
+ script_pubkey = CScript([OP_0, witness_program])
+
+ parent = CTransaction()
+ parent.vin.append(CTxIn(COutPoint(int(txid, 16), 0), b""))
+ parent.vout.append(CTxOut(int(9.99998 * COIN), script_pubkey))
+ parent.rehash()
+
+ privkeys = [node.get_deterministic_priv_key().key]
+ raw_parent = node.signrawtransactionwithkey(hexstring=parent.serialize().hex(), privkeys=privkeys)['hex']
+ parent_txid = node.sendrawtransaction(hexstring=raw_parent, maxfeerate=0)
+ node.generate(1)
+
+ peer_wtxid_relay = node.add_p2p_connection(P2PTxInvStore())
+
+ # Create a new transaction with witness solving first branch
+ child_witness_script = CScript([OP_TRUE])
+ child_witness_program = sha256(child_witness_script)
+ child_script_pubkey = CScript([OP_0, child_witness_program])
+
+ child_one = CTransaction()
+ child_one.vin.append(CTxIn(COutPoint(int(parent_txid, 16), 0), b""))
+ child_one.vout.append(CTxOut(int(9.99996 * COIN), child_script_pubkey))
+ child_one.wit.vtxinwit.append(CTxInWitness())
+ child_one.wit.vtxinwit[0].scriptWitness.stack = [b'Preimage', b'\x01', witness_script]
+ child_one_wtxid = child_one.getwtxid()
+ child_one_txid = child_one.rehash()
+
+ # Create another identical transaction with witness solving second branch
+ child_two = deepcopy(child_one)
+ child_two.wit.vtxinwit[0].scriptWitness.stack = [b'', witness_script]
+ child_two_wtxid = child_two.getwtxid()
+ child_two_txid = child_two.rehash()
+
+ assert_equal(child_one_txid, child_two_txid)
+ assert child_one_wtxid != child_two_wtxid
+
+ self.log.info("Submit child_one to the mempool")
+ txid_submitted = node.sendrawtransaction(child_one.serialize().hex())
+ assert_equal(node.getmempoolentry(txid_submitted)['wtxid'], child_one_wtxid)
+
+ peer_wtxid_relay.wait_for_broadcast([child_one_wtxid])
+ assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0)
+
+ # testmempoolaccept reports the "already in mempool" error
+ assert_equal(node.testmempoolaccept([child_one.serialize().hex()]), [{
+ "txid": child_one_txid,
+ "wtxid": child_one_wtxid,
+ "allowed": False,
+ "reject-reason": "txn-already-in-mempool"
+ }])
+ assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], {
+ "txid": child_two_txid,
+ "wtxid": child_two_wtxid,
+ "allowed": False,
+ "reject-reason": "txn-same-nonwitness-data-in-mempool"
+ })
+
+ # sendrawtransaction will not throw but quits early when the exact same transaction is already in mempool
+ node.sendrawtransaction(child_one.serialize().hex())
+
+ self.log.info("Connect another peer that hasn't seen child_one before")
+ peer_wtxid_relay_2 = node.add_p2p_connection(P2PTxInvStore())
+
+ self.log.info("Submit child_two to the mempool")
+ # sendrawtransaction will not throw but quits early when a transaction with the same non-witness data is already in mempool
+ node.sendrawtransaction(child_two.serialize().hex())
+
+ # The node should rebroadcast the transaction using the wtxid of the correct transaction
+ # (child_one, which is in its mempool).
+ peer_wtxid_relay_2.wait_for_broadcast([child_one_wtxid])
+ assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0)
+
+if __name__ == '__main__':
+ MempoolWtxidTest().main()
diff --git a/test/functional/mempool_compatibility.py b/test/functional/mempool_compatibility.py
index 6de6778909..9a50647a01 100755
--- a/test/functional/mempool_compatibility.py
+++ b/test/functional/mempool_compatibility.py
@@ -7,10 +7,7 @@
NOTE: The test is designed to prevent cases when compatibility is broken accidentally.
In case we need to break mempool compatibility we can continue to use the test by just bumping the version number.
-Download node binaries:
-test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
-
-Only v0.15.2 is required by this test. The rest is used in other backwards compatibility tests.
+The previous release v0.15.2 is required by this test, see test/README.md.
"""
import os
@@ -68,8 +65,7 @@ class MempoolCompatibilityTest(BitcoinTestFramework):
self.log.info("Add unbroadcasted tx to mempool on new node and shutdown")
unbroadcasted_tx_hash = new_wallet.send_self_transfer(from_node=new_node)['txid']
assert unbroadcasted_tx_hash in new_node.getrawmempool()
- mempool = new_node.getrawmempool(True)
- assert mempool[unbroadcasted_tx_hash]['unbroadcast']
+ assert new_node.getmempoolentry(unbroadcasted_tx_hash)['unbroadcast']
self.stop_node(1)
self.log.info("Move mempool.dat from new to old node")
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
index 39035f7cb1..1b1ac23024 100755
--- a/test/functional/mempool_limit.py
+++ b/test/functional/mempool_limit.py
@@ -32,7 +32,7 @@ class MempoolLimitTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
- utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
+ utxos = create_confirmed_utxos(self, relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
new file mode 100755
index 0000000000..a54c1d0457
--- /dev/null
+++ b/test/functional/mempool_package_limits.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test logic for limiting mempool and package ancestors/descendants."""
+
+from decimal import Decimal
+
+from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.messages import (
+ COIN,
+ CTransaction,
+ CTxInWitness,
+ tx_from_hex,
+ WITNESS_SCALE_FACTOR,
+)
+from test_framework.script import (
+ CScript,
+ OP_TRUE,
+)
+from test_framework.util import (
+ assert_equal,
+)
+from test_framework.wallet import (
+ bulk_transaction,
+ create_child_with_parents,
+ make_chain,
+)
+
+class MempoolPackageLimitsTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def run_test(self):
+ self.log.info("Generate blocks to create UTXOs")
+ node = self.nodes[0]
+ self.privkeys = [node.get_deterministic_priv_key().key]
+ self.address = node.get_deterministic_priv_key().address
+ self.coins = []
+ # The last 100 coinbase transactions are premature
+ for b in node.generatetoaddress(200, self.address)[:100]:
+ coinbase = node.getblock(blockhash=b, verbosity=2)["tx"][0]
+ self.coins.append({
+ "txid": coinbase["txid"],
+ "amount": coinbase["vout"][0]["value"],
+ "scriptPubKey": coinbase["vout"][0]["scriptPubKey"],
+ })
+
+ self.test_chain_limits()
+ self.test_desc_count_limits()
+ self.test_anc_count_limits()
+ self.test_anc_count_limits_2()
+ self.test_anc_count_limits_bushy()
+
+ # The node will accept our (nonstandard) extra large OP_RETURN outputs
+ self.restart_node(0, extra_args=["-acceptnonstdtxn=1"])
+ self.test_anc_size_limits()
+ self.test_desc_size_limits()
+
+ def test_chain_limits_helper(self, mempool_count, package_count):
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ first_coin = self.coins.pop()
+ spk = None
+ txid = first_coin["txid"]
+ chain_hex = []
+ chain_txns = []
+ value = first_coin["amount"]
+
+ for i in range(mempool_count + package_count):
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk)
+ txid = tx.rehash()
+ if i < mempool_count:
+ node.sendrawtransaction(txhex)
+ assert_equal(node.getmempoolentry(txid)["ancestorcount"], i + 1)
+ else:
+ chain_hex.append(txhex)
+ chain_txns.append(tx)
+ testres_too_long = node.testmempoolaccept(rawtxs=chain_hex)
+ for txres in testres_too_long:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=chain_hex)])
+
+ def test_chain_limits(self):
+ """Create chains from mempool and package transactions that are longer than 25,
+ but only if both in-mempool and in-package transactions are considered together.
+ This checks that both mempool and in-package transactions are taken into account when
+ calculating ancestors/descendant limits.
+ """
+ self.log.info("Check that in-package ancestors count for mempool ancestor limits")
+
+ # 24 transactions in the mempool and 2 in the package. The parent in the package has
+ # 24 in-mempool ancestors and 1 in-package descendant. The child has 0 direct parents
+ # in the mempool, but 25 in-mempool and in-package ancestors in total.
+ self.test_chain_limits_helper(24, 2)
+ # 2 transactions in the mempool and 24 in the package.
+ self.test_chain_limits_helper(2, 24)
+ # 13 transactions in the mempool and 13 in the package.
+ self.test_chain_limits_helper(13, 13)
+
+ def test_desc_count_limits(self):
+ """Create an 'A' shaped package with 24 transactions in the mempool and 2 in the package:
+ M1
+ ^ ^
+ M2a M2b
+ . .
+ . .
+ . .
+ M12a ^
+ ^ M13b
+ ^ ^
+ Pa Pb
+ The top ancestor in the package exceeds descendant limits but only if the in-mempool and in-package
+ descendants are all considered together (24 including in-mempool descendants and 26 including both
+ package transactions).
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ self.log.info("Check that in-mempool and in-package descendants are calculated properly in packages")
+ # Top parent in mempool, M1
+ first_coin = self.coins.pop()
+ parent_value = (first_coin["amount"] - Decimal("0.0002")) / 2 # Deduct reasonable fee and make 2 outputs
+ inputs = [{"txid": first_coin["txid"], "vout": 0}]
+ outputs = [{self.address : parent_value}, {ADDRESS_BCRT1_P2WSH_OP_TRUE : parent_value}]
+ rawtx = node.createrawtransaction(inputs, outputs)
+
+ parent_signed = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys)
+ assert parent_signed["complete"]
+ parent_tx = tx_from_hex(parent_signed["hex"])
+ parent_txid = parent_tx.rehash()
+ node.sendrawtransaction(parent_signed["hex"])
+
+ package_hex = []
+
+ # Chain A
+ spk = parent_tx.vout[0].scriptPubKey.hex()
+ value = parent_value
+ txid = parent_txid
+ for i in range(12):
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk)
+ txid = tx.rehash()
+ if i < 11: # M2a... M12a
+ node.sendrawtransaction(txhex)
+ else: # Pa
+ package_hex.append(txhex)
+
+ # Chain B
+ value = parent_value - Decimal("0.0001")
+ rawtx_b = node.createrawtransaction([{"txid": parent_txid, "vout": 1}], {self.address : value})
+ tx_child_b = tx_from_hex(rawtx_b) # M2b
+ tx_child_b.wit.vtxinwit = [CTxInWitness()]
+ tx_child_b.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
+ tx_child_b_hex = tx_child_b.serialize().hex()
+ node.sendrawtransaction(tx_child_b_hex)
+ spk = tx_child_b.vout[0].scriptPubKey.hex()
+ txid = tx_child_b.rehash()
+ for i in range(12):
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk)
+ txid = tx.rehash()
+ if i < 11: # M3b... M13b
+ node.sendrawtransaction(txhex)
+ else: # Pb
+ package_hex.append(txhex)
+
+ assert_equal(24, node.getmempoolinfo()["size"])
+ assert_equal(2, len(package_hex))
+ testres_too_long = node.testmempoolaccept(rawtxs=package_hex)
+ for txres in testres_too_long:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
+
+ def test_anc_count_limits(self):
+ """Create a 'V' shaped chain with 24 transactions in the mempool and 3 in the package:
+ M1a M1b
+ ^ ^
+ M2a M2b
+ . .
+ . .
+ . .
+ M12a M12b
+ ^ ^
+ Pa Pb
+ ^ ^
+ Pc
+ The lowest descendant, Pc, exceeds ancestor limits, but only if the in-mempool
+ and in-package ancestors are all considered together.
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ package_hex = []
+ parents_tx = []
+ values = []
+ scripts = []
+
+ self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
+
+ # Two chains of 13 transactions each
+ for _ in range(2):
+ spk = None
+ top_coin = self.coins.pop()
+ txid = top_coin["txid"]
+ value = top_coin["amount"]
+ for i in range(13):
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk)
+ txid = tx.rehash()
+ if i < 12:
+ node.sendrawtransaction(txhex)
+ else: # Save the 13th transaction for the package
+ package_hex.append(txhex)
+ parents_tx.append(tx)
+ scripts.append(spk)
+ values.append(value)
+
+ # Child Pc
+ child_hex = create_child_with_parents(node, self.address, self.privkeys, parents_tx, values, scripts)
+ package_hex.append(child_hex)
+
+ assert_equal(24, node.getmempoolinfo()["size"])
+ assert_equal(3, len(package_hex))
+ testres_too_long = node.testmempoolaccept(rawtxs=package_hex)
+ for txres in testres_too_long:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
+
+ def test_anc_count_limits_2(self):
+ """Create a 'Y' shaped chain with 24 transactions in the mempool and 2 in the package:
+ M1a M1b
+ ^ ^
+ M2a M2b
+ . .
+ . .
+ . .
+ M12a M12b
+ ^ ^
+ Pc
+ ^
+ Pd
+ The lowest descendant, Pd, exceeds ancestor limits, but only if the in-mempool
+ and in-package ancestors are all considered together.
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ parents_tx = []
+ values = []
+ scripts = []
+
+ self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
+ # Two chains of 12 transactions each
+ for _ in range(2):
+ spk = None
+ top_coin = self.coins.pop()
+ txid = top_coin["txid"]
+ value = top_coin["amount"]
+ for i in range(12):
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk)
+ txid = tx.rehash()
+ value -= Decimal("0.0001")
+ node.sendrawtransaction(txhex)
+ if i == 11:
+ # last 2 transactions will be the parents of Pc
+ parents_tx.append(tx)
+ values.append(value)
+ scripts.append(spk)
+
+ # Child Pc
+ pc_hex = create_child_with_parents(node, self.address, self.privkeys, parents_tx, values, scripts)
+ pc_tx = tx_from_hex(pc_hex)
+ pc_value = sum(values) - Decimal("0.0002")
+ pc_spk = pc_tx.vout[0].scriptPubKey.hex()
+
+ # Child Pd
+ (_, pd_hex, _, _) = make_chain(node, self.address, self.privkeys, pc_tx.rehash(), pc_value, 0, pc_spk)
+
+ assert_equal(24, node.getmempoolinfo()["size"])
+ testres_too_long = node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])
+ for txres in testres_too_long:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])])
+
+ def test_anc_count_limits_bushy(self):
+ """Create a tree with 20 transactions in the mempool and 6 in the package:
+ M1...M4 M5...M8 M9...M12 M13...M16 M17...M20
+ ^ ^ ^ ^ ^ (each with 4 parents)
+ P0 P1 P2 P3 P4
+ ^ ^ ^ ^ ^ (5 parents)
+ PC
+ Where M(4i+1)...M+(4i+4) are the parents of Pi and P0, P1, P2, P3, and P4 are the parents of PC.
+ P0... P4 individually only have 4 parents each, and PC has no in-mempool parents. But
+ combined, PC has 25 in-mempool and in-package parents.
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ package_hex = []
+ parent_txns = []
+ parent_values = []
+ scripts = []
+ for _ in range(5): # Make package transactions P0 ... P4
+ gp_tx = []
+ gp_values = []
+ gp_scripts = []
+ for _ in range(4): # Make mempool transactions M(4i+1)...M(4i+4)
+ parent_coin = self.coins.pop()
+ value = parent_coin["amount"]
+ txid = parent_coin["txid"]
+ (tx, txhex, value, spk) = make_chain(node, self.address, self.privkeys, txid, value)
+ gp_tx.append(tx)
+ gp_values.append(value)
+ gp_scripts.append(spk)
+ node.sendrawtransaction(txhex)
+ # Package transaction Pi
+ pi_hex = create_child_with_parents(node, self.address, self.privkeys, gp_tx, gp_values, gp_scripts)
+ package_hex.append(pi_hex)
+ pi_tx = tx_from_hex(pi_hex)
+ parent_txns.append(pi_tx)
+ parent_values.append(Decimal(pi_tx.vout[0].nValue) / COIN)
+ scripts.append(pi_tx.vout[0].scriptPubKey.hex())
+ # Package transaction PC
+ package_hex.append(create_child_with_parents(node, self.address, self.privkeys, parent_txns, parent_values, scripts))
+
+ assert_equal(20, node.getmempoolinfo()["size"])
+ assert_equal(6, len(package_hex))
+ testres = node.testmempoolaccept(rawtxs=package_hex)
+ for txres in testres:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
+
+ def test_anc_size_limits(self):
+ """Test Case with 2 independent transactions in the mempool and a parent + child in the
+ package, where the package parent is the child of both mempool transactions (30KvB each):
+ A B
+ ^ ^
+ C
+ ^
+ D
+ The lowest descendant, D, exceeds ancestor size limits, but only if the in-mempool
+ and in-package ancestors are all considered together.
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ parents_tx = []
+ values = []
+ scripts = []
+ target_weight = WITNESS_SCALE_FACTOR * 1000 * 30 # 30KvB
+ high_fee = Decimal("0.003") # 10 sats/vB
+ self.log.info("Check that in-mempool and in-package ancestor size limits are calculated properly in packages")
+ # Mempool transactions A and B
+ for _ in range(2):
+ spk = None
+ top_coin = self.coins.pop()
+ txid = top_coin["txid"]
+ value = top_coin["amount"]
+ (tx, _, _, _) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk, high_fee)
+ bulked_tx = bulk_transaction(tx, node, target_weight, self.privkeys)
+ node.sendrawtransaction(bulked_tx.serialize().hex())
+ parents_tx.append(bulked_tx)
+ values.append(Decimal(bulked_tx.vout[0].nValue) / COIN)
+ scripts.append(bulked_tx.vout[0].scriptPubKey.hex())
+
+ # Package transaction C
+ small_pc_hex = create_child_with_parents(node, self.address, self.privkeys, parents_tx, values, scripts, high_fee)
+ pc_tx = bulk_transaction(tx_from_hex(small_pc_hex), node, target_weight, self.privkeys)
+ pc_value = Decimal(pc_tx.vout[0].nValue) / COIN
+ pc_spk = pc_tx.vout[0].scriptPubKey.hex()
+ pc_hex = pc_tx.serialize().hex()
+
+ # Package transaction D
+ (small_pd, _, val, spk) = make_chain(node, self.address, self.privkeys, pc_tx.rehash(), pc_value, 0, pc_spk, high_fee)
+ prevtxs = [{
+ "txid": pc_tx.rehash(),
+ "vout": 0,
+ "scriptPubKey": spk,
+ "amount": val,
+ }]
+ pd_tx = bulk_transaction(small_pd, node, target_weight, self.privkeys, prevtxs)
+ pd_hex = pd_tx.serialize().hex()
+
+ assert_equal(2, node.getmempoolinfo()["size"])
+ testres_too_heavy = node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])
+ for txres in testres_too_heavy:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=[pc_hex, pd_hex])])
+
+ def test_desc_size_limits(self):
+ """Create 3 mempool transactions and 2 package transactions (25KvB each):
+ Ma
+ ^ ^
+ Mb Mc
+ ^ ^
+ Pd Pe
+ The top ancestor in the package exceeds descendant size limits but only if the in-mempool
+ and in-package descendants are all considered together.
+ """
+ node = self.nodes[0]
+ assert_equal(0, node.getmempoolinfo()["size"])
+ target_weight = 21 * 1000 * WITNESS_SCALE_FACTOR
+ high_fee = Decimal("0.0021") # 10 sats/vB
+ self.log.info("Check that in-mempool and in-package descendant sizes are calculated properly in packages")
+ # Top parent in mempool, Ma
+ first_coin = self.coins.pop()
+ parent_value = (first_coin["amount"] - high_fee) / 2 # Deduct fee and make 2 outputs
+ inputs = [{"txid": first_coin["txid"], "vout": 0}]
+ outputs = [{self.address : parent_value}, {ADDRESS_BCRT1_P2WSH_OP_TRUE: parent_value}]
+ rawtx = node.createrawtransaction(inputs, outputs)
+ parent_tx = bulk_transaction(tx_from_hex(rawtx), node, target_weight, self.privkeys)
+ node.sendrawtransaction(parent_tx.serialize().hex())
+
+ package_hex = []
+ for j in range(2): # Two legs (left and right)
+ # Mempool transaction (Mb and Mc)
+ mempool_tx = CTransaction()
+ spk = parent_tx.vout[j].scriptPubKey.hex()
+ value = Decimal(parent_tx.vout[j].nValue) / COIN
+ txid = parent_tx.rehash()
+ prevtxs = [{
+ "txid": txid,
+ "vout": j,
+ "scriptPubKey": spk,
+ "amount": value,
+ }]
+ if j == 0: # normal key
+ (tx_small, _, _, _) = make_chain(node, self.address, self.privkeys, txid, value, j, spk, high_fee)
+ mempool_tx = bulk_transaction(tx_small, node, target_weight, self.privkeys, prevtxs)
+ else: # OP_TRUE
+ inputs = [{"txid": txid, "vout": 1}]
+ outputs = {self.address: value - high_fee}
+ small_tx = tx_from_hex(node.createrawtransaction(inputs, outputs))
+ mempool_tx = bulk_transaction(small_tx, node, target_weight, None, prevtxs)
+ node.sendrawtransaction(mempool_tx.serialize().hex())
+
+ # Package transaction (Pd and Pe)
+ spk = mempool_tx.vout[0].scriptPubKey.hex()
+ value = Decimal(mempool_tx.vout[0].nValue) / COIN
+ txid = mempool_tx.rehash()
+ (tx_small, _, _, _) = make_chain(node, self.address, self.privkeys, txid, value, 0, spk, high_fee)
+ prevtxs = [{
+ "txid": txid,
+ "vout": 0,
+ "scriptPubKey": spk,
+ "amount": value,
+ }]
+ package_tx = bulk_transaction(tx_small, node, target_weight, self.privkeys, prevtxs)
+ package_hex.append(package_tx.serialize().hex())
+
+ assert_equal(3, node.getmempoolinfo()["size"])
+ assert_equal(2, len(package_hex))
+ testres_too_heavy = node.testmempoolaccept(rawtxs=package_hex)
+ for txres in testres_too_heavy:
+ assert_equal(txres["package-error"], "package-mempool-limits")
+
+ # Clear mempool and check that the package passes now
+ node.generate(1)
+ assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
+
+if __name__ == "__main__":
+ MempoolPackageLimitsTest().main()
diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py
index fcd8b061fa..3ee50f5a8a 100755
--- a/test/functional/mempool_package_onemore.py
+++ b/test/functional/mempool_package_onemore.py
@@ -51,7 +51,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
(second_chain, second_chain_value) = chain_transaction(self.nodes[0], [utxo[1]['txid']], [utxo[1]['vout']], utxo[1]['amount'], fee, 1)
# Check mempool has MAX_ANCESTORS + 1 transactions in it
- assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 1)
+ assert_equal(len(self.nodes[0].getrawmempool()), MAX_ANCESTORS + 1)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", chain_transaction, self.nodes[0], [txid], [0], value, fee, 1)
@@ -74,7 +74,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
self.nodes[0].sendrawtransaction(signed_second_tx['hex'])
# Finally, check that we added two transactions
- assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 3)
+ assert_equal(len(self.nodes[0].getrawmempool()), MAX_ANCESTORS + 3)
if __name__ == '__main__':
MempoolPackagesTest().main()
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 5fc3ec23ae..173d1d7493 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -89,28 +89,28 @@ class MempoolPackagesTest(BitcoinTestFramework):
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
- assert_equal(mempool[x]['descendantcount'], descendant_count)
- descendant_fees += mempool[x]['fee']
- assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
- assert_equal(mempool[x]['fees']['base'], mempool[x]['fee'])
- assert_equal(mempool[x]['fees']['modified'], mempool[x]['modifiedfee'])
- assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
- assert_equal(mempool[x]['fees']['descendant'], descendant_fees)
- descendant_vsize += mempool[x]['vsize']
- assert_equal(mempool[x]['descendantsize'], descendant_vsize)
+ assert_equal(entry['descendantcount'], descendant_count)
+ descendant_fees += entry['fee']
+ assert_equal(entry['modifiedfee'], entry['fee'])
+ assert_equal(entry['fees']['base'], entry['fee'])
+ assert_equal(entry['fees']['modified'], entry['modifiedfee'])
+ assert_equal(entry['descendantfees'], descendant_fees * COIN)
+ assert_equal(entry['fees']['descendant'], descendant_fees)
+ descendant_vsize += entry['vsize']
+ assert_equal(entry['descendantsize'], descendant_vsize)
descendant_count += 1
# Check that ancestor calculations are correct
- assert_equal(mempool[x]['ancestorcount'], ancestor_count)
- assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN)
- assert_equal(mempool[x]['ancestorsize'], ancestor_vsize)
- ancestor_vsize -= mempool[x]['vsize']
- ancestor_fees -= mempool[x]['fee']
+ assert_equal(entry['ancestorcount'], ancestor_count)
+ assert_equal(entry['ancestorfees'], ancestor_fees * COIN)
+ assert_equal(entry['ancestorsize'], ancestor_vsize)
+ ancestor_vsize -= entry['vsize']
+ ancestor_fees -= entry['fee']
ancestor_count -= 1
# Check that parent/child list is correct
- assert_equal(mempool[x]['spentby'], descendants[-1:])
- assert_equal(mempool[x]['depends'], ancestors[-2:-1])
+ assert_equal(entry['spentby'], descendants[-1:])
+ assert_equal(entry['depends'], ancestors[-2:-1])
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
@@ -153,12 +153,12 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
- mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
- ancestor_fees += mempool[x]['fee']
- assert_equal(mempool[x]['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
- assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
+ entry = self.nodes[0].getmempoolentry(x)
+ ancestor_fees += entry['fee']
+ assert_equal(entry['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
+ assert_equal(entry['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
@@ -166,13 +166,13 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
- mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
- descendant_fees += mempool[x]['fee']
- assert_equal(mempool[x]['fees']['descendant'], descendant_fees + Decimal('0.00001'))
- assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
+ entry = self.nodes[0].getmempoolentry(x)
+ descendant_fees += entry['fee']
+ assert_equal(entry['fees']['descendant'], descendant_fees + Decimal('0.00001'))
+ assert_equal(entry['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [txid], [vout], value, fee, 1)
@@ -190,16 +190,15 @@ class MempoolPackagesTest(BitcoinTestFramework):
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
- mempool = self.nodes[0].getrawmempool(True)
-
descendant_fees = 0
for x in reversed(chain):
- descendant_fees += mempool[x]['fee']
+ entry = self.nodes[0].getmempoolentry(x)
+ descendant_fees += entry['fee']
if (x == chain[-1]):
- assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
- assert_equal(mempool[x]['fees']['modified'], mempool[x]['fee']+satoshi_round(0.00002))
- assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
- assert_equal(mempool[x]['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
+ assert_equal(entry['modifiedfee'], entry['fee']+satoshi_round(0.00002))
+ assert_equal(entry['fees']['modified'], entry['fee']+satoshi_round(0.00002))
+ assert_equal(entry['descendantfees'], descendant_fees * COIN + 2000)
+ assert_equal(entry['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
@@ -255,7 +254,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# - txs from previous ancestor test (-> custom ancestor limit)
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
- self.wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
+ self.wait_until(lambda: len(self.nodes[1].getrawmempool()) ==
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index bcc6aa7bcc..b5086e1df1 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -80,7 +80,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
self.log.info("Generate a block")
last_block = self.nodes[0].generate(1)
# Sync blocks, so that peer 1 gets the block before timelock_tx
- # Otherwise, peer 1 would put the timelock_tx in recentRejects
+ # Otherwise, peer 1 would put the timelock_tx in m_recent_rejects
self.sync_all()
self.log.info("The time-locked transaction can now be spent")
diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py
index b475b65e68..d10b510c6a 100755
--- a/test/functional/mempool_unbroadcast.py
+++ b/test/functional/mempool_unbroadcast.py
@@ -32,7 +32,7 @@ class MempoolUnbroadcastTest(BitcoinTestFramework):
node = self.nodes[0]
min_relay_fee = node.getnetworkinfo()["relayfee"]
- utxos = create_confirmed_utxos(min_relay_fee, node, 10)
+ utxos = create_confirmed_utxos(self, min_relay_fee, node, 10)
self.disconnect_nodes(0, 1)
@@ -92,6 +92,10 @@ class MempoolUnbroadcastTest(BitcoinTestFramework):
self.disconnect_nodes(0, 1)
node.disconnect_p2ps()
+ self.log.info("Rebroadcast transaction and ensure it is not added to unbroadcast set when already in mempool")
+ rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])
+ assert not node.getmempoolentry(rpc_tx_hsh)['unbroadcast']
+
def test_txn_removal(self):
self.log.info("Test that transactions removed from mempool are removed from unbroadcast set")
node = self.nodes[0]
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 8baf974a0a..4cd11e9d11 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -86,7 +86,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(unsigned_raw_tx)
tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
- tx_size.append(self.nodes[0].getrawmempool(True)[tx_id[-1]]['vsize'])
+ tx_size.append(self.nodes[0].getmempoolentry(tx_id[-1])['vsize'])
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
@@ -109,10 +109,11 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
self.log.info('Checking descendants/ancestors properties of all of the in-mempool transactions...')
for k, tx in enumerate(tx_id):
self.log.debug('Check transaction #{}.'.format(k))
- assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantcount'], size - k)
- assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantsize'], sum(tx_size[k:size]))
- assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorcount'], k + 1)
- assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorsize'], sum(tx_size[0:(k + 1)]))
+ entry = self.nodes[0].getmempoolentry(tx)
+ assert_equal(entry['descendantcount'], size - k)
+ assert_equal(entry['descendantsize'], sum(tx_size[k:size]))
+ assert_equal(entry['ancestorcount'], k + 1)
+ assert_equal(entry['ancestorsize'], sum(tx_size[0:(k + 1)]))
def run_test(self):
# Use batch size limited by DEFAULT_ANCESTOR_LIMIT = 25 to not fire "too many unconfirmed parents" error.
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
index ba467c1517..23711646a2 100755
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -13,6 +13,7 @@ from decimal import Decimal
from test_framework.blocktools import (
create_coinbase,
+ get_witness_script,
NORMAL_GBT_REQUEST_PARAMS,
TIME_GENESIS_BLOCK,
)
@@ -20,6 +21,7 @@ from test_framework.messages import (
CBlock,
CBlockHeader,
BLOCK_HEADER_SIZE,
+ ser_uint256,
)
from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
@@ -49,6 +51,9 @@ class MiningTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.supports_cli = False
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
@@ -60,10 +65,10 @@ class MiningTest(BitcoinTestFramework):
assert_equal(mining_info['currentblockweight'], 4000)
self.log.info('test blockversion')
- self.restart_node(0, extra_args=['-mocktime={}'.format(t), '-blockversion=1337'])
+ self.restart_node(0, extra_args=[f'-mocktime={t}', '-blockversion=1337'])
self.connect_nodes(0, 1)
assert_equal(1337, self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)['version'])
- self.restart_node(0, extra_args=['-mocktime={}'.format(t)])
+ self.restart_node(0, extra_args=[f'-mocktime={t}'])
self.connect_nodes(0, 1)
assert_equal(VERSIONBITS_TOP_BITS + (1 << VERSIONBITS_DEPLOYMENT_TESTDUMMY_BIT), self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)['version'])
self.restart_node(0)
@@ -89,7 +94,21 @@ class MiningTest(BitcoinTestFramework):
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
- # Mine a block to leave initial block download
+ self.log.info("getblocktemplate: Test default witness commitment")
+ txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
+ tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
+
+ # Check that default_witness_commitment is present.
+ assert 'default_witness_commitment' in tmpl
+ witness_commitment = tmpl['default_witness_commitment']
+
+ # Check that default_witness_commitment is correct.
+ witness_root = CBlock.get_merkle_root([ser_uint256(0),
+ ser_uint256(txid)])
+ script = get_witness_script(witness_root, 0)
+ assert_equal(witness_commitment, script.hex())
+
+ # Mine a block to leave initial block download and clear the mempool
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
self.log.info("getblocktemplate: Test capability advertised")
diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py
index 715b68e04c..3003627778 100755
--- a/test/functional/mining_getblocktemplate_longpoll.py
+++ b/test/functional/mining_getblocktemplate_longpoll.py
@@ -48,9 +48,9 @@ class GetBlockTemplateLPTest(BitcoinTestFramework):
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
- miniwallets = [ MiniWallet(node) for node in self.nodes ]
+ miniwallets = [MiniWallet(node) for node in self.nodes]
self.log.info("Test that longpoll will terminate if another node generates a block")
- miniwallets[1].generate(1) # generate a block on another node
+ self.generate(miniwallets[1], 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
@@ -58,7 +58,7 @@ class GetBlockTemplateLPTest(BitcoinTestFramework):
self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
- miniwallets[0].generate(1) # generate a block on own node
+ self.generate(miniwallets[0], 1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py
index 1426fdaacb..d357e2ed47 100755
--- a/test/functional/mining_prioritisetransaction.py
+++ b/test/functional/mining_prioritisetransaction.py
@@ -6,7 +6,7 @@
import time
-from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
+from test_framework.messages import COIN, MAX_BLOCK_WEIGHT
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
@@ -48,7 +48,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
- utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
+ utxos = create_confirmed_utxos(self, self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
@@ -61,15 +61,15 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
- # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
- # more transactions.
+ # MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to
+ # create more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['vsize']
- assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count
+ assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py
index 1a414959b9..e93698b08e 100755
--- a/test/functional/p2p_addr_relay.py
+++ b/test/functional/p2p_addr_relay.py
@@ -11,23 +11,29 @@ from test_framework.messages import (
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
- msg_getaddr
+ msg_getaddr,
+ msg_verack
)
-from test_framework.p2p import P2PInterface
-from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (
- assert_equal,
+from test_framework.p2p import (
+ P2PInterface,
+ p2p_lock,
)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal, assert_greater_than
+import random
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
+ _tokens = 1
+ send_getaddr = True
- def __init__(self, test_addr_contents=False):
+ def __init__(self, test_addr_contents=False, send_getaddr=True):
super().__init__()
self.test_addr_contents = test_addr_contents
+ self.send_getaddr = send_getaddr
def on_addr(self, message):
for addr in message.addrs:
@@ -40,9 +46,28 @@ class AddrReceiver(P2PInterface):
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
+ def on_getaddr(self, message):
+ # When the node sends us a getaddr, it increments the addr relay tokens for the connection by 1000
+ self._tokens += 1000
+
+ @property
+ def tokens(self):
+ with p2p_lock:
+ return self._tokens
+
+ def increment_tokens(self, n):
+ # When we move mocktime forward, the node increments the addr relay tokens for its peers
+ with p2p_lock:
+ self._tokens += n
+
def addr_received(self):
return self.num_ipv4_received != 0
+ def on_version(self, message):
+ self.send_message(msg_verack())
+ if (self.send_getaddr):
+ self.send_message(msg_getaddr())
+
def getaddr_received(self):
return self.message_count['getaddr'] > 0
@@ -53,12 +78,18 @@ class AddrTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
+ self.inbound_blackhole_tests()
+
+ # This test populates the addrman, which can impact the node's behavior
+ # in subsequent tests
self.getaddr_tests()
self.blocksonly_mode_tests()
+ self.rate_limit_tests()
def setup_addr_msg(self, num):
addrs = []
@@ -75,6 +106,19 @@ class AddrTest(BitcoinTestFramework):
msg.addrs = addrs
return msg
+ def setup_rand_addr_msg(self, num):
+ addrs = []
+ for i in range(num):
+ addr = CAddress()
+ addr.time = self.mocktime + i
+ addr.nServices = NODE_NETWORK | NODE_WITNESS
+ addr.ip = f"{random.randrange(128,169)}.{random.randrange(1,255)}.{random.randrange(1,255)}.{random.randrange(1,255)}"
+ addr.port = 8333
+ addrs.append(addr)
+ msg = msg_addr()
+ msg.addrs = addrs
+ return msg
+
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
@@ -124,7 +168,7 @@ class AddrTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
- inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
+ inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True, send_getaddr=False))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
@@ -135,6 +179,9 @@ class AddrTest(BitcoinTestFramework):
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
+ # Send an empty ADDR message to initialize address relay on this connection.
+ inbound_peer.send_and_ping(msg_addr())
+
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
@@ -152,7 +199,64 @@ class AddrTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
+ def sum_addr_messages(self, msgs_dict):
+ return sum(bytes_received for (msg, bytes_received) in msgs_dict.items() if msg in ['addr', 'addrv2', 'getaddr'])
+
+ def inbound_blackhole_tests(self):
+ self.log.info('Check that we only relay addresses to inbound peers who have previously sent us addr related messages')
+
+ addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
+ receiver_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
+ blackhole_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False))
+ initial_addrs_received = receiver_peer.num_ipv4_received
+
+ peerinfo = self.nodes[0].getpeerinfo()
+ assert_equal(peerinfo[0]['addr_relay_enabled'], True) # addr_source
+ assert_equal(peerinfo[1]['addr_relay_enabled'], True) # receiver_peer
+ assert_equal(peerinfo[2]['addr_relay_enabled'], False) # blackhole_peer
+
+ # addr_source sends 2 addresses to node0
+ msg = self.setup_addr_msg(2)
+ addr_source.send_and_ping(msg)
+ self.mocktime += 30 * 60
+ self.nodes[0].setmocktime(self.mocktime)
+ receiver_peer.sync_with_ping()
+ blackhole_peer.sync_with_ping()
+
+ peerinfo = self.nodes[0].getpeerinfo()
+
+ # Confirm node received addr-related messages from receiver peer
+ assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0)
+ # And that peer received addresses
+ assert_equal(receiver_peer.num_ipv4_received - initial_addrs_received, 2)
+
+ # Confirm node has not received addr-related messages from blackhole peer
+ assert_equal(self.sum_addr_messages(peerinfo[2]['bytesrecv_per_msg']), 0)
+ # And that peer did not receive addresses
+ assert_equal(blackhole_peer.num_ipv4_received, 0)
+
+ self.log.info("After blackhole peer sends addr message, it becomes eligible for addr gossip")
+ blackhole_peer.send_and_ping(msg_addr())
+
+ # Confirm node has now received addr-related messages from blackhole peer
+ assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0)
+ assert_equal(self.nodes[0].getpeerinfo()[2]['addr_relay_enabled'], True)
+
+ msg = self.setup_addr_msg(2)
+ self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer])
+
+ # And that peer received addresses
+ assert_equal(blackhole_peer.num_ipv4_received, 2)
+
+ self.nodes[0].disconnect_p2ps()
+
def getaddr_tests(self):
+ # In the previous tests, the node answered GETADDR requests with an
+ # empty addrman. Due to GETADDR response caching (see
+ # CConnman::GetAddresses), the node would continue to provide 0 addrs
+ # in response until enough time has passed or the node is restarted.
+ self.restart_node(0)
+
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
@@ -165,7 +269,7 @@ class AddrTest(BitcoinTestFramework):
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
- inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
+ inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False))
inbound_peer.sync_with_ping()
# Add some addresses to addrman
@@ -191,7 +295,7 @@ class AddrTest(BitcoinTestFramework):
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
- self.restart_node(0, ["-blocksonly"])
+ self.restart_node(0, ["-blocksonly", "-whitelist=addr@127.0.0.1"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
@@ -207,6 +311,63 @@ class AddrTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
+ def send_addrs_and_test_rate_limiting(self, peer, no_relay, *, new_addrs, total_addrs):
+ """Send an addr message and check that the number of addresses processed and rate-limited is as expected"""
+
+ peer.send_and_ping(self.setup_rand_addr_msg(new_addrs))
+
+ peerinfo = self.nodes[0].getpeerinfo()[0]
+ addrs_processed = peerinfo['addr_processed']
+ addrs_rate_limited = peerinfo['addr_rate_limited']
+ self.log.debug(f"addrs_processed = {addrs_processed}, addrs_rate_limited = {addrs_rate_limited}")
+
+ if no_relay:
+ assert_equal(addrs_processed, 0)
+ assert_equal(addrs_rate_limited, 0)
+ else:
+ assert_equal(addrs_processed, min(total_addrs, peer.tokens))
+ assert_equal(addrs_rate_limited, max(0, total_addrs - peer.tokens))
+
+ def rate_limit_tests(self):
+ self.mocktime = int(time.time())
+ self.restart_node(0, [])
+ self.nodes[0].setmocktime(self.mocktime)
+
+ for conn_type, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]:
+ self.log.info(f'Test rate limiting of addr processing for {conn_type} peers')
+ if conn_type == "inbound":
+ peer = self.nodes[0].add_p2p_connection(AddrReceiver())
+ else:
+ peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type=conn_type)
+
+ # Send 600 addresses. For all but the block-relay-only peer this should result in addresses being processed.
+ self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=600, total_addrs=600)
+
+ # Send 600 more addresses. For the outbound-full-relay peer (which we send a GETADDR, and thus will
+ # process up to 1001 incoming addresses), this means more addresses will be processed.
+ self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=600, total_addrs=1200)
+
+ # Send 10 more. As we reached the processing limit for all nodes, no more addresses should be procesesd.
+ self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=10, total_addrs=1210)
+
+ # Advance the time by 100 seconds, permitting the processing of 10 more addresses.
+ # Send 200 and verify that 10 are processed.
+ self.mocktime += 100
+ self.nodes[0].setmocktime(self.mocktime)
+ peer.increment_tokens(10)
+
+ self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=200, total_addrs=1410)
+
+ # Advance the time by 1000 seconds, permitting the processing of 100 more addresses.
+ # Send 200 and verify that 100 are processed.
+ self.mocktime += 1000
+ self.nodes[0].setmocktime(self.mocktime)
+ peer.increment_tokens(100)
+
+ self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=200, total_addrs=1610)
+
+ self.nodes[0].disconnect_p2ps()
+
if __name__ == '__main__':
AddrTest().main()
diff --git a/test/functional/p2p_addrfetch.py b/test/functional/p2p_addrfetch.py
new file mode 100755
index 0000000000..2a0f6432a9
--- /dev/null
+++ b/test/functional/p2p_addrfetch.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test p2p addr-fetch connections
+"""
+
+import time
+
+from test_framework.messages import msg_addr, CAddress, NODE_NETWORK, NODE_WITNESS
+from test_framework.p2p import P2PInterface, p2p_lock
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+ADDR = CAddress()
+ADDR.time = int(time.time())
+ADDR.nServices = NODE_NETWORK | NODE_WITNESS
+ADDR.ip = "192.0.0.8"
+ADDR.port = 18444
+
+
+class P2PAddrFetch(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def assert_getpeerinfo(self, *, peer_ids):
+ num_peers = len(peer_ids)
+ info = self.nodes[0].getpeerinfo()
+ assert_equal(len(info), num_peers)
+ for n in range(0, num_peers):
+ assert_equal(info[n]['id'], peer_ids[n])
+ assert_equal(info[n]['connection_type'], 'addr-fetch')
+
+ def run_test(self):
+ node = self.nodes[0]
+ self.log.info("Connect to an addr-fetch peer")
+ peer_id = 0
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch")
+ self.assert_getpeerinfo(peer_ids=[peer_id])
+
+ self.log.info("Check that we send getaddr but don't try to sync headers with the addr-fetch peer")
+ peer.sync_send_with_ping()
+ with p2p_lock:
+ assert peer.message_count['getaddr'] == 1
+ assert peer.message_count['getheaders'] == 0
+
+ self.log.info("Check that answering the getaddr with a single address does not lead to disconnect")
+ # This prevents disconnecting on self-announcements
+ msg = msg_addr()
+ msg.addrs = [ADDR]
+ peer.send_and_ping(msg)
+ self.assert_getpeerinfo(peer_ids=[peer_id])
+
+ self.log.info("Check that answering with larger addr messages leads to disconnect")
+ msg.addrs = [ADDR] * 2
+ peer.send_message(msg)
+ peer.wait_for_disconnect(timeout=5)
+
+ self.log.info("Check timeout for addr-fetch peer that does not send addrs")
+ peer_id = 1
+ peer = node.add_outbound_p2p_connection(P2PInterface(), p2p_idx=peer_id, connection_type="addr-fetch")
+
+ time_now = int(time.time())
+ self.assert_getpeerinfo(peer_ids=[peer_id])
+
+ # Expect addr-fetch peer connection to be maintained up to 5 minutes.
+ node.setmocktime(time_now + 295)
+ self.assert_getpeerinfo(peer_ids=[peer_id])
+
+ # Expect addr-fetch peer connection to be disconnected after 5 minutes.
+ node.setmocktime(time_now + 301)
+ peer.wait_for_disconnect(timeout=5)
+ self.assert_getpeerinfo(peer_ids=[])
+
+
+if __name__ == '__main__':
+ P2PAddrFetch().main()
diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py
index 23ce3e5d04..32c1d42b1c 100755
--- a/test/functional/p2p_addrv2_relay.py
+++ b/test/functional/p2p_addrv2_relay.py
@@ -18,12 +18,19 @@ from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+I2P_ADDR = "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p"
+
ADDRS = []
for i in range(10):
addr = CAddress()
addr.time = int(time.time()) + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
- addr.ip = "123.123.123.{}".format(i % 256)
+ # Add one I2P address at an arbitrary position.
+ if i == 5:
+ addr.net = addr.NET_I2P
+ addr.ip = I2P_ADDR
+ else:
+ addr.ip = f"123.123.123.{i % 256}"
addr.port = 8333 + i
ADDRS.append(addr)
@@ -35,11 +42,10 @@ class AddrReceiver(P2PInterface):
super().__init__(support_addrv2 = True)
def on_addrv2(self, message):
- for addr in message.addrs:
- assert_equal(addr.nServices, 9)
- assert addr.ip.startswith('123.123.123.')
- assert (8333 <= addr.port < 8343)
- self.addrv2_received_and_checked = True
+ expected_set = set((addr.ip, addr.port) for addr in ADDRS)
+ received_set = set((addr.ip, addr.port) for addr in message.addrs)
+ if expected_set == received_set:
+ self.addrv2_received_and_checked = True
def wait_for_addrv2(self):
self.wait_until(lambda: "addrv2" in self.last_message)
@@ -49,6 +55,7 @@ class AddrTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
+ self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.log.info('Create connection that sends addrv2 messages')
@@ -64,15 +71,18 @@ class AddrTest(BitcoinTestFramework):
addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
msg.addrs = ADDRS
with self.nodes[0].assert_debug_log([
- 'Added 10 addresses from 127.0.0.1: 0 tried',
- 'received: addrv2 (131 bytes) peer=0',
- 'sending addrv2 (131 bytes) peer=1',
+ # The I2P address is not added to node's own addrman because it has no
+ # I2P reachability (thus 10 - 1 = 9).
+ 'Added 9 addresses from 127.0.0.1: 0 tried',
+ 'received: addrv2 (159 bytes) peer=0',
+ 'sending addrv2 (159 bytes) peer=1',
]):
addr_source.send_and_ping(msg)
self.nodes[0].setmocktime(int(time.time()) + 30 * 60)
addr_receiver.wait_for_addrv2()
assert addr_receiver.addrv2_received_and_checked
+ assert_equal(len(self.nodes[0].getnodeaddresses(count=0, network="i2p")), 0)
if __name__ == '__main__':
diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py
new file mode 100755
index 0000000000..e58ad8e0fc
--- /dev/null
+++ b/test/functional/p2p_dns_seeds.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test ThreadDNSAddressSeed logic for querying DNS seeds."""
+
+import itertools
+
+from test_framework.p2p import P2PInterface
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class P2PDNSSeeds(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [["-dnsseed=1"]]
+
+ def run_test(self):
+ self.init_arg_tests()
+ self.existing_outbound_connections_test()
+ self.existing_block_relay_connections_test()
+ self.force_dns_test()
+ self.wait_time_tests()
+
+ def init_arg_tests(self):
+ fakeaddr = "fakenodeaddr.fakedomain.invalid."
+
+ self.log.info("Check that setting -connect disables -dnsseed by default")
+ self.nodes[0].stop_node()
+ with self.nodes[0].assert_debug_log(expected_msgs=["DNS seeding disabled"]):
+ self.start_node(0, [f"-connect={fakeaddr}"])
+
+ self.log.info("Check that running -connect and -dnsseed means DNS logic runs.")
+ with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12):
+ self.restart_node(0, [f"-connect={fakeaddr}", "-dnsseed=1"])
+
+ self.log.info("Check that running -forcednsseed and -dnsseed=0 throws an error.")
+ self.nodes[0].stop_node()
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg="Error: Cannot set -forcednsseed to true when setting -dnsseed to false.",
+ extra_args=["-forcednsseed=1", "-dnsseed=0"],
+ )
+
+ self.log.info("Check that running -forcednsseed and -connect throws an error.")
+ # -connect soft sets -dnsseed to false, so throws the same error
+ self.nodes[0].stop_node()
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg="Error: Cannot set -forcednsseed to true when setting -dnsseed to false.",
+ extra_args=["-forcednsseed=1", f"-connect={fakeaddr}"],
+ )
+
+ # Restore default bitcoind settings
+ self.restart_node(0)
+
+ def existing_outbound_connections_test(self):
+ # Make sure addrman is populated to enter the conditional where we
+ # delay and potentially skip DNS seeding.
+ self.nodes[0].addpeeraddress("192.0.0.8", 8333)
+
+ self.log.info("Check that we *do not* query DNS seeds if we have 2 outbound connections")
+
+ self.restart_node(0)
+ with self.nodes[0].assert_debug_log(expected_msgs=["P2P peers available. Skipped DNS seeding."], timeout=12):
+ for i in range(2):
+ self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay")
+
+ def existing_block_relay_connections_test(self):
+ # Make sure addrman is populated to enter the conditional where we
+ # delay and potentially skip DNS seeding. No-op when run after
+ # existing_outbound_connections_test.
+ self.nodes[0].addpeeraddress("192.0.0.8", 8333)
+
+ self.log.info("Check that we *do* query DNS seeds if we only have 2 block-relay-only connections")
+
+ self.restart_node(0)
+ with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12):
+ # This mimics the "anchors" logic where nodes are likely to
+ # reconnect to block-relay-only connections on startup.
+ # Since we do not participate in addr relay with these connections,
+ # we still want to query the DNS seeds.
+ for i in range(2):
+ self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only")
+
+ def force_dns_test(self):
+ self.log.info("Check that we query DNS seeds if -forcednsseed param is set")
+
+ with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12):
+ # -dnsseed defaults to 1 in bitcoind, but 0 in the test framework,
+ # so pass it explicitly here
+ self.restart_node(0, ["-forcednsseed", "-dnsseed=1"])
+
+ # Restore default for subsequent tests
+ self.restart_node(0)
+
+ def wait_time_tests(self):
+ self.log.info("Check the delay before querying DNS seeds")
+
+ # Populate addrman with < 1000 addresses
+ for i in range(5):
+ a = f"192.0.0.{i}"
+ self.nodes[0].addpeeraddress(a, 8333)
+
+ # The delay should be 11 seconds
+ with self.nodes[0].assert_debug_log(expected_msgs=["Waiting 11 seconds before querying DNS seeds.\n"]):
+ self.restart_node(0)
+
+ # Populate addrman with > 1000 addresses
+ for i in itertools.count():
+ first_octet = i % 2 + 1
+ second_octet = i % 256
+ third_octet = i % 100
+ a = f"{first_octet}.{second_octet}.{third_octet}.1"
+ self.nodes[0].addpeeraddress(a, 8333)
+ if (i > 1000 and i % 100 == 0):
+ # The addrman size is non-deterministic because new addresses
+ # are sorted into buckets, potentially displacing existing
+ # addresses. Periodically check if we have met the desired
+ # threshold.
+ if len(self.nodes[0].getnodeaddresses(0)) > 1000:
+ break
+
+ # The delay should be 5 mins
+ with self.nodes[0].assert_debug_log(expected_msgs=["Waiting 300 seconds before querying DNS seeds.\n"]):
+ self.restart_node(0)
+
+
+if __name__ == '__main__':
+ P2PDNSSeeds().main()
diff --git a/test/functional/p2p_i2p_ports.py b/test/functional/p2p_i2p_ports.py
new file mode 100755
index 0000000000..13188b9305
--- /dev/null
+++ b/test/functional/p2p_i2p_ports.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021-2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test ports handling for I2P hosts
+"""
+
+import re
+
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class I2PPorts(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ # The test assumes that an I2P SAM proxy is not listening here.
+ self.extra_args = [["-i2psam=127.0.0.1:60000"]]
+
+ def run_test(self):
+ node = self.nodes[0]
+
+ self.log.info("Ensure we don't try to connect if port!=0")
+ addr = "zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:8333"
+ raised = False
+ try:
+ with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]):
+ node.addnode(node=addr, command="onetry")
+ except AssertionError as e:
+ raised = True
+ if not re.search(r"Expected messages .* does not partially match log", str(e)):
+ raise AssertionError(f"Assertion raised as expected, but with an unexpected message: {str(e)}")
+ if not raised:
+ raise AssertionError("Assertion should have been raised")
+
+ self.log.info("Ensure we try to connect if port=0 and get an error due to missing I2P proxy")
+ addr = "h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0"
+ with node.assert_debug_log(expected_msgs=[f"Error connecting to {addr}"]):
+ node.addnode(node=addr, command="onetry")
+
+
+if __name__ == '__main__':
+ I2PPorts().main()
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 788a81d4af..f3b80abb59 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -28,7 +28,6 @@ from test_framework.p2p import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- hex_str_to_bytes,
)
VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix
@@ -58,6 +57,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
+ self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.test_buffer()
@@ -186,7 +186,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
[
'received: addrv2 (1 bytes)',
],
- hex_str_to_bytes('00'))
+ bytes.fromhex('00'))
def test_addrv2_too_long_address(self):
self.test_addrv2('too long address',
@@ -195,7 +195,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
'ProcessMessages(addrv2, 525 bytes): Exception',
'Address too long: 513 > 512',
],
- hex_str_to_bytes(
+ bytes.fromhex(
'01' + # number of entries
'61bc6649' + # time, Fri Jan 9 02:54:25 UTC 2009
'00' + # service flags, COMPACTSIZE(NODE_NONE)
@@ -212,7 +212,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
'IP 9.9.9.9 mapped',
'Added 1 addresses',
],
- hex_str_to_bytes(
+ bytes.fromhex(
'02' + # number of entries
# this should be ignored without impeding acceptance of subsequent ones
now_hex + # time
diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py
index 8783c244c3..8fef2d173f 100755
--- a/test/functional/p2p_invalid_tx.py
+++ b/test/functional/p2p_invalid_tx.py
@@ -141,9 +141,9 @@ class InvalidTxRequestTest(BitcoinTestFramework):
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
- # Transactions that do not end up in the mempool
- # tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
- # tx_orphan_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
+ # Transactions that do not end up in the mempool:
+ # tx_orphan_2_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
+ # tx_orphan_2_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 594a28d662..8b285907c5 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -130,7 +130,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
tx.vout[0].nValue += 1
txid = tx.rehash()
# Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts
- # with a mempool transaction. The second time, it'll be in the recentRejects filter.
+ # with a mempool transaction. The second time, it'll be in the m_recent_rejects filter.
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 95c7aec318..e5093855ff 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -4,16 +4,14 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from decimal import Decimal
-import math
import random
import struct
import time
-from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
+from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
- CBlock,
CBlockHeader,
CInv,
COutPoint,
@@ -22,7 +20,7 @@ from test_framework.messages import (
CTxInWitness,
CTxOut,
CTxWitness,
- MAX_BLOCK_BASE_SIZE,
+ MAX_BLOCK_WEIGHT,
MSG_BLOCK,
MSG_TX,
MSG_WITNESS_FLAG,
@@ -41,7 +39,6 @@ from test_framework.messages import (
ser_vector,
sha256,
tx_from_hex,
- uint256_from_str,
)
from test_framework.p2p import (
P2PInterface,
@@ -60,12 +57,8 @@ from test_framework.script import (
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
- OP_DUP,
OP_ELSE,
OP_ENDIF,
- OP_EQUAL,
- OP_EQUALVERIFY,
- OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
@@ -77,11 +70,16 @@ from test_framework.script import (
LegacySignatureHash,
hash160,
)
+from test_framework.script_util import (
+ key_to_p2wpkh_script,
+ keyhash_to_p2pkh_script,
+ script_to_p2sh_script,
+ script_to_p2wsh_script,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
softfork_active,
- hex_str_to_bytes,
assert_raises_rpc_error,
)
@@ -100,27 +98,13 @@ class UTXO():
self.n = n
self.nValue = value
-def get_p2pkh_script(pubkeyhash):
- """Get the script associated with a P2PKH."""
- return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
-
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
- """Add signature for a P2PK witness program."""
+ """Add signature for a P2PK witness script."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
-def get_virtual_size(witness_block):
- """Calculate the virtual size of a witness block.
-
- Virtual size is base + witness/4."""
- base_size = len(witness_block.serialize(with_witness=False))
- total_size = len(witness_block.serialize())
- # the "+3" is so we round up
- vsize = int((3 * base_size + total_size + 3) / 4)
- return vsize
-
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
@@ -209,24 +193,17 @@ class TestP2PConn(P2PInterface):
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 3
+ self.num_nodes = 2
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "-whitelist=noban@127.0.0.1"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
- ["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
- def setup_network(self):
- self.setup_nodes()
- self.connect_nodes(0, 1)
- self.connect_nodes(0, 2)
- self.sync_all()
-
# Helper functions
def build_next_block(self, version=4):
@@ -267,7 +244,6 @@ class SegWitTest(BitcoinTestFramework):
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
- self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
@@ -284,7 +260,7 @@ class SegWitTest(BitcoinTestFramework):
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
- self.test_max_witness_program_length()
+ self.test_max_witness_script_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
@@ -295,7 +271,6 @@ class SegWitTest(BitcoinTestFramework):
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
- self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
self.test_wtxid_relay()
@@ -428,7 +403,7 @@ class SegWitTest(BitcoinTestFramework):
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
- assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
+ assert_equal(block.serialize(), bytes.fromhex(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
@@ -443,7 +418,7 @@ class SegWitTest(BitcoinTestFramework):
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
- assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
+ assert_equal(wit_block.serialize(), bytes.fromhex(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
@@ -451,8 +426,7 @@ class SegWitTest(BitcoinTestFramework):
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
- weight = 3 * len(block.serialize(False)) + len(block.serialize())
- assert_equal(rpc_details["weight"], weight)
+ assert_equal(rpc_details["weight"], block.get_weight())
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
@@ -485,18 +459,10 @@ class SegWitTest(BitcoinTestFramework):
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
- # node2 doesn't need to be connected for this test.
- # (If it's connected, node0 may propagate an invalid block to it over
- # compact blocks and the nodes would have inconsistent tips.)
- self.disconnect_nodes(0, 2)
-
# Create two outputs, a p2wsh and p2sh-p2wsh
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
-
- p2sh_pubkey = hash160(script_pubkey)
- p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
+ p2sh_script_pubkey = script_to_p2sh_script(script_pubkey)
value = self.utxo[0].nValue // 3
@@ -550,38 +516,10 @@ class SegWitTest(BitcoinTestFramework):
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
- self.connect_nodes(0, 2)
-
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest # type: ignore
- def test_getblocktemplate_before_lockin(self):
- txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
-
- for node in [self.nodes[0], self.nodes[2]]:
- gbt_results = node.getblocktemplate({"rules": ["segwit"]})
- if node == self.nodes[2]:
- # If this is a non-segwit node, we should not get a witness
- # commitment.
- assert 'default_witness_commitment' not in gbt_results
- else:
- # For segwit-aware nodes, check the witness
- # commitment is correct.
- assert 'default_witness_commitment' in gbt_results
- witness_commitment = gbt_results['default_witness_commitment']
-
- # Check that default_witness_commitment is present.
- witness_root = CBlock.get_merkle_root([ser_uint256(0),
- ser_uint256(txid)])
- script = get_witness_script(witness_root, 0)
- assert_equal(witness_commitment, script.hex())
-
- # Clear out the mempool
- self.nodes[0].generate(1)
- self.sync_blocks()
-
- @subtest # type: ignore
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
@@ -630,12 +568,9 @@ class SegWitTest(BitcoinTestFramework):
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
-
- p2sh_pubkey = hash160(witness_program)
- p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
+ p2sh_script_pubkey = script_to_p2sh_script(witness_script)
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
@@ -651,7 +586,7 @@ class SegWitTest(BitcoinTestFramework):
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
- tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_script]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
@@ -662,13 +597,14 @@ class SegWitTest(BitcoinTestFramework):
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
+ witness_hash = sha256(witness_script)
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
@@ -681,7 +617,7 @@ class SegWitTest(BitcoinTestFramework):
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
- tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
@@ -739,11 +675,9 @@ class SegWitTest(BitcoinTestFramework):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
- witness_program = CScript([OP_DROP, OP_TRUE])
- witness_hash = sha256(witness_program)
- p2wsh_pubkey = CScript([OP_0, witness_hash])
- p2sh_witness_hash = hash160(p2wsh_pubkey)
- script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
+ witness_script = CScript([OP_DROP, OP_TRUE])
+ p2wsh_pubkey = script_to_p2wsh_script(witness_script)
+ script_pubkey = script_to_p2sh_script(p2wsh_pubkey)
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
@@ -786,7 +720,7 @@ class SegWitTest(BitcoinTestFramework):
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
- spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
+ spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_script]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
@@ -835,19 +769,18 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
- # Let's construct a witness program
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ # Let's construct a witness script
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
- tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script))
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
tx2.rehash()
block_3 = self.build_next_block()
@@ -882,7 +815,7 @@ class SegWitTest(BitcoinTestFramework):
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
- tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
+ tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
@@ -904,7 +837,7 @@ class SegWitTest(BitcoinTestFramework):
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
- assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
+ assert block.get_weight() > MAX_BLOCK_WEIGHT
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
@@ -913,7 +846,7 @@ class SegWitTest(BitcoinTestFramework):
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
- assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
+ assert block.get_weight() < MAX_BLOCK_WEIGHT
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() == block.hash
@@ -944,15 +877,14 @@ class SegWitTest(BitcoinTestFramework):
assert len(self.utxo) > 0
# Create a P2WSH transaction.
- # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
+ # The witness script will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
- witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
- witness_hash = uint256_from_str(sha256(witness_program))
- script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
+ witness_script = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
@@ -972,15 +904,14 @@ class SegWitTest(BitcoinTestFramework):
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for _ in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
- child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
+ child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_script]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
- vsize = get_virtual_size(block)
- additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
+ additional_bytes = MAX_BLOCK_WEIGHT - block.get_weight()
i = 0
while additional_bytes > 0:
- # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
+ # Add some more bytes to each input until we hit MAX_BLOCK_WEIGHT+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
@@ -989,8 +920,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
- vsize = get_virtual_size(block)
- assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
+ assert_equal(block.get_weight(), MAX_BLOCK_WEIGHT + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
@@ -1003,7 +933,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
- assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
+ assert block.get_weight() == MAX_BLOCK_WEIGHT
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@@ -1053,9 +983,8 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
- witness_program = CScript([OP_DROP, OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_DROP, OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
@@ -1086,7 +1015,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
- tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_script]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
@@ -1126,9 +1055,8 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
- witness_program = CScript([OP_DROP, OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_DROP, OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
@@ -1140,7 +1068,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
- tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_script]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
@@ -1158,16 +1086,15 @@ class SegWitTest(BitcoinTestFramework):
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
- def test_max_witness_program_length(self):
+ def test_max_witness_script_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
- MAX_PROGRAM_LENGTH = 10000
+ MAX_WITNESS_SCRIPT_LENGTH = 10000
- # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
- long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
- assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
- long_witness_hash = sha256(long_witness_program)
- long_script_pubkey = CScript([OP_0, long_witness_hash])
+ # This script is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
+ long_witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
+ assert len(long_witness_script) == MAX_WITNESS_SCRIPT_LENGTH + 1
+ long_script_pubkey = script_to_p2wsh_script(long_witness_script)
block = self.build_next_block()
@@ -1180,23 +1107,22 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_script]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
- # Try again with one less byte in the witness program
- witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
- assert len(witness_program) == MAX_PROGRAM_LENGTH
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ # Try again with one less byte in the witness script
+ witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
+ assert len(witness_script) == MAX_WITNESS_SCRIPT_LENGTH
+ script_pubkey = script_to_p2wsh_script(witness_script)
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
- tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_script]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
@@ -1209,9 +1135,8 @@ class SegWitTest(BitcoinTestFramework):
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
- witness_program = CScript([OP_DROP, OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_DROP, OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
@@ -1255,7 +1180,7 @@ class SegWitTest(BitcoinTestFramework):
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
+ tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_script]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
@@ -1271,15 +1196,15 @@ class SegWitTest(BitcoinTestFramework):
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
- tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_script]
+ tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_script]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
- tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
+ tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_script]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@@ -1317,9 +1242,8 @@ class SegWitTest(BitcoinTestFramework):
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
@@ -1330,11 +1254,10 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
- p2sh_program = CScript([OP_TRUE])
- p2sh_pubkey = hash160(p2sh_program)
- witness_program2 = CScript([b'a' * 400000])
- tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
- tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
+ p2sh_script = CScript([OP_TRUE])
+ witness_script2 = CScript([b'a' * 400000])
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_script)))
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script2]
tx3.rehash()
# Node will not be blinded to the transaction, requesting it any number of times
@@ -1348,14 +1271,14 @@ class SegWitTest(BitcoinTestFramework):
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
- tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_script]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
- tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed
@@ -1367,12 +1290,11 @@ class SegWitTest(BitcoinTestFramework):
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
- weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
- vsize = math.ceil(weight / 4)
+ vsize = tx3.get_vsize()
assert_equal(raw_tx["vsize"], vsize)
- assert_equal(raw_tx["weight"], weight)
+ assert_equal(raw_tx["weight"], tx3.get_weight())
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
- assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
+ assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_script.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
@@ -1408,8 +1330,8 @@ class SegWitTest(BitcoinTestFramework):
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
+ witness_script = CScript([OP_TRUE])
+ witness_hash = sha256(witness_script)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
@@ -1437,7 +1359,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
@@ -1452,7 +1374,7 @@ class SegWitTest(BitcoinTestFramework):
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
- tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
+ tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_script]
tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))
tx3.rehash()
@@ -1481,9 +1403,8 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
# Change the output of the block to be a witness output.
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
@@ -1492,9 +1413,9 @@ class SegWitTest(BitcoinTestFramework):
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
- spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
+ spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_script)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
- spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
spend_tx.rehash()
# Now test a premature spend.
@@ -1530,7 +1451,7 @@ class SegWitTest(BitcoinTestFramework):
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
- script_pkh = CScript([OP_0, pubkeyhash])
+ script_pkh = key_to_p2wpkh_script(pubkey)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
@@ -1543,14 +1464,13 @@ class SegWitTest(BitcoinTestFramework):
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
- witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
- witness_hash = sha256(witness_program)
- script_wsh = CScript([OP_0, witness_hash])
+ witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ script_wsh = script_to_p2wsh_script(witness_script)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
- script = get_p2pkh_script(pubkeyhash)
+ script = keyhash_to_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
@@ -1567,15 +1487,14 @@ class SegWitTest(BitcoinTestFramework):
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
- p2sh_witness_hash = hash160(script_wsh)
- script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
+ script_p2sh = script_to_p2sh_script(script_wsh)
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
- sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
+ sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
@@ -1587,12 +1506,12 @@ class SegWitTest(BitcoinTestFramework):
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
- script_pubkey = get_p2pkh_script(pubkeyhash)
+ script_pubkey = keyhash_to_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
- sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
+ sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
@@ -1623,9 +1542,8 @@ class SegWitTest(BitcoinTestFramework):
key.generate()
pubkey = key.get_pubkey().get_bytes()
- witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ script_pubkey = script_to_p2wsh_script(witness_script)
# First create a witness output for use in the tests.
tx = CTransaction()
@@ -1652,18 +1570,18 @@ class SegWitTest(BitcoinTestFramework):
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
- sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
+ sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
- sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
+ sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
- sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
+ sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@@ -1684,7 +1602,7 @@ class SegWitTest(BitcoinTestFramework):
for _ in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
- sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
+ sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
@@ -1719,7 +1637,7 @@ class SegWitTest(BitcoinTestFramework):
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
- sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
+ sign_p2pk_witness_input(witness_script, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
@@ -1730,7 +1648,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
- if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
+ if block.get_weight() > MAX_BLOCK_WEIGHT - 4000:
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
@@ -1744,17 +1662,17 @@ class SegWitTest(BitcoinTestFramework):
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
- script_pkh = CScript([OP_0, pubkeyhash])
+ script_pkh = key_to_p2wpkh_script(pubkey)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
- sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
+ sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
- script = get_p2pkh_script(pubkeyhash)
+ script = keyhash_to_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
@@ -1789,7 +1707,7 @@ class SegWitTest(BitcoinTestFramework):
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
- sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
+ sign_p2pk_witness_input(witness_script, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
@@ -1806,8 +1724,7 @@ class SegWitTest(BitcoinTestFramework):
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
- p2sh_pubkey = hash160(p2sh_program)
- script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+ script_pubkey = script_to_p2sh_script(p2sh_program)
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
@@ -1872,11 +1789,10 @@ class SegWitTest(BitcoinTestFramework):
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
- p2wsh = CScript([OP_0, sha256(i)])
- p2sh = hash160(p2wsh)
+ p2wsh = script_to_p2wsh_script(i)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
- tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
+ tx.vout.append(CTxOut(outputvalue, script_to_p2sh_script(p2wsh)))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
@@ -1890,13 +1806,13 @@ class SegWitTest(BitcoinTestFramework):
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
- p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
+ p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
- p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
+ p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
@@ -1953,46 +1869,12 @@ class SegWitTest(BitcoinTestFramework):
self.utxo.pop(0)
@subtest # type: ignore
- def test_upgrade_after_activation(self):
- """Test the behavior of starting up a segwit-aware node after the softfork has activated."""
-
- # All nodes are caught up and node 2 is a pre-segwit node that will soon upgrade.
- for n in range(2):
- assert_equal(self.nodes[n].getblockcount(), self.nodes[2].getblockcount())
- assert softfork_active(self.nodes[n], "segwit")
- assert SEGWIT_HEIGHT < self.nodes[2].getblockcount()
- assert 'segwit' not in self.nodes[2].getblockchaininfo()['softforks']
-
- # Restarting node 2 should result in a shutdown because the blockchain consists of
- # insufficiently validated blocks per segwit consensus rules.
- self.stop_node(2)
- self.nodes[2].assert_start_raises_init_error(
- extra_args=[f"-segwitheight={SEGWIT_HEIGHT}"],
- expected_msg=f": Witness data for blocks after height {SEGWIT_HEIGHT} requires validation. Please restart with -reindex..\nPlease restart with -reindex or -reindex-chainstate to recover.",
- )
-
- # As directed, the user restarts the node with -reindex
- self.start_node(2, extra_args=["-reindex", f"-segwitheight={SEGWIT_HEIGHT}"])
-
- # With the segwit consensus rules, the node is able to validate only up to SEGWIT_HEIGHT - 1
- assert_equal(self.nodes[2].getblockcount(), SEGWIT_HEIGHT - 1)
- self.connect_nodes(0, 2)
-
- # We reconnect more than 100 blocks, give it plenty of time
- # sync_blocks() also verifies the best block hash is the same for all nodes
- self.sync_blocks(timeout=240)
-
- # The upgraded node should now have segwit activated
- assert softfork_active(self.nodes[2], "segwit")
-
- @subtest # type: ignore
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
- witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
+ script_pubkey = script_to_p2wsh_script(witness_script)
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
@@ -2007,15 +1889,13 @@ class SegWitTest(BitcoinTestFramework):
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
- witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
- witness_hash_toomany = sha256(witness_program_toomany)
- script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
+ witness_script_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
+ script_pubkey_toomany = script_to_p2wsh_script(witness_script_toomany)
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
- witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
- witness_hash_justright = sha256(witness_program_justright)
- script_pubkey_justright = CScript([OP_0, witness_hash_justright])
+ witness_script_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
+ script_pubkey_justright = script_to_p2wsh_script(witness_script_justright)
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
@@ -2038,9 +1918,9 @@ class SegWitTest(BitcoinTestFramework):
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script]
total_value += tx.vout[i].nValue
- tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
@@ -2079,7 +1959,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
@@ -2147,9 +2027,8 @@ class SegWitTest(BitcoinTestFramework):
# Create a Segwit output from the latest UTXO
# and announce it to the network
- witness_program = CScript([OP_TRUE])
- witness_hash = sha256(witness_program)
- script_pubkey = CScript([OP_0, witness_hash])
+ witness_script = CScript([OP_TRUE])
+ script_pubkey = script_to_p2wsh_script(witness_script)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
@@ -2161,7 +2040,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
- tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
tx2.rehash()
# Announce Segwit transaction with wtxid
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
index e7a05d8547..a9d5ed970a 100755
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -77,7 +77,7 @@ class AcceptBlockTest(BitcoinTestFramework):
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
- [n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes]
+ [self.generatetoaddress(n, 1, n.get_deterministic_priv_key().address) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
diff --git a/test/functional/rpc_addresses_deprecation.py b/test/functional/rpc_addresses_deprecation.py
index ac430f5b39..251cc85ae9 100755
--- a/test/functional/rpc_addresses_deprecation.py
+++ b/test/functional/rpc_addresses_deprecation.py
@@ -10,7 +10,6 @@ from test_framework.messages import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- hex_str_to_bytes
)
@@ -36,7 +35,7 @@ class AddressesDeprecationTest(BitcoinTestFramework):
# This transaction is derived from test/util/data/txcreatemultisig1.json
tx = tx_from_hex(signed)
- tx.vout[0].scriptPubKey = hex_str_to_bytes("522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae")
+ tx.vout[0].scriptPubKey = bytes.fromhex("522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae")
tx_signed = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid = node.sendrawtransaction(hexstring=tx_signed, maxfeerate=0)
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 90715cae26..2ee5c7ee0a 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -6,13 +6,15 @@
Test the following RPCs:
- getblockchaininfo
+ - getchaintxstats
- gettxoutsetinfo
- - getdifficulty
- - getbestblockhash
- - getblockhash
- getblockheader
- - getchaintxstats
+ - getdifficulty
- getnetworkhashps
+ - waitforblockheight
+ - getblock
+ - getblockhash
+ - getbestblockhash
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
@@ -25,6 +27,8 @@ import subprocess
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.blocktools import (
+ CLTV_HEIGHT,
+ DERSIG_HEIGHT,
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
@@ -49,6 +53,12 @@ from test_framework.util import (
from test_framework.wallet import MiniWallet
+HEIGHT = 200 # blocks mined
+TIME_RANGE_STEP = 600 # ten-minute steps
+TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP
+TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP
+
+
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
@@ -71,12 +81,11 @@ class BlockchainTest(BitcoinTestFramework):
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
- self.log.info('Create some old blocks')
- for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
- # ten-minute steps from genesis block time
+ self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps")
+ for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP):
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE)
- assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
+ assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
@@ -93,11 +102,15 @@ class BlockchainTest(BitcoinTestFramework):
'pruned',
'size_on_disk',
'softforks',
+ 'time',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
+ assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP)
+ assert_equal(res['mediantime'], TIME_RANGE_MTP)
+
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
@@ -129,9 +142,9 @@ class BlockchainTest(BitcoinTestFramework):
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
- 'bip34': {'type': 'buried', 'active': False, 'height': 500},
- 'bip66': {'type': 'buried', 'active': False, 'height': 1251},
- 'bip65': {'type': 'buried', 'active': False, 'height': 1351},
+ 'bip34': {'type': 'buried', 'active': True, 'height': 2},
+ 'bip66': {'type': 'buried', 'active': True, 'height': DERSIG_HEIGHT},
+ 'bip65': {'type': 'buried', 'active': True, 'height': CLTV_HEIGHT},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
@@ -145,8 +158,8 @@ class BlockchainTest(BitcoinTestFramework):
'statistics': {
'period': 144,
'threshold': 108,
- 'elapsed': 57,
- 'count': 57,
+ 'elapsed': HEIGHT - 143,
+ 'count': HEIGHT - 143,
'possible': True,
},
'min_activation_height': 0,
@@ -183,33 +196,33 @@ class BlockchainTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
- blockhash = self.nodes[0].getblockhash(200)
+ blockhash = self.nodes[0].getblockhash(HEIGHT)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
- assert_equal(chaintxstats['txcount'], 201)
+ assert_equal(chaintxstats['txcount'], HEIGHT + 1)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
- assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
+ assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
- b200_hash = self.nodes[0].getblockhash(200)
+ b200_hash = self.nodes[0].getblockhash(HEIGHT)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
- assert_equal(chaintxstats['txcount'], 201)
+ assert_equal(chaintxstats['txcount'], HEIGHT + 1)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
- assert_equal(chaintxstats['window_final_block_height'], 200)
- assert_equal(chaintxstats['window_block_count'], 199)
- assert_equal(chaintxstats['window_tx_count'], 199)
+ assert_equal(chaintxstats['window_final_block_height'], HEIGHT )
+ assert_equal(chaintxstats['window_block_count'], HEIGHT - 1)
+ assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1)
assert_equal(chaintxstats['window_interval'], time_diff)
- assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
+ assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
@@ -226,18 +239,18 @@ class BlockchainTest(BitcoinTestFramework):
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
- assert_equal(res['transactions'], 200)
- assert_equal(res['height'], 200)
- assert_equal(res['txouts'], 200)
+ assert_equal(res['transactions'], HEIGHT)
+ assert_equal(res['height'], HEIGHT)
+ assert_equal(res['txouts'], HEIGHT)
assert_equal(res['bogosize'], 16800),
- assert_equal(res['bestblock'], node.getblockhash(200))
+ assert_equal(res['bestblock'], node.getblockhash(HEIGHT))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
- self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
+ self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
@@ -250,7 +263,7 @@ class BlockchainTest(BitcoinTestFramework):
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
- self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
+ self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
@@ -259,7 +272,7 @@ class BlockchainTest(BitcoinTestFramework):
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
- self.log.info("Test hash_type option for gettxoutsetinfo()")
+ self.log.info("Test gettxoutsetinfo hash_type option")
# Adding hash_type 'hash_serialized_2', which is the default, should
# not change the result.
res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2')
@@ -283,6 +296,7 @@ class BlockchainTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash")
def _test_getblockheader(self):
+ self.log.info("Test getblockheader")
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
@@ -290,11 +304,11 @@ class BlockchainTest(BitcoinTestFramework):
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
- secondbesthash = node.getblockhash(199)
+ secondbesthash = node.getblockhash(HEIGHT - 1)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
- assert_equal(header['height'], 200)
+ assert_equal(header['height'], HEIGHT)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
@@ -304,7 +318,7 @@ class BlockchainTest(BitcoinTestFramework):
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
- assert isinstance(header['mediantime'], int)
+ assert_equal(header['mediantime'], TIME_RANGE_MTP)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
@@ -322,20 +336,23 @@ class BlockchainTest(BitcoinTestFramework):
assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash())
def _test_getdifficulty(self):
+ self.log.info("Test getdifficulty")
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
+ self.log.info("Test getnetworkhashps")
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
- assert_equal(self.nodes[0].getblockcount(), 200)
+ self.log.info("Test stopping at height")
+ assert_equal(self.nodes[0].getblockcount(), HEIGHT)
self.nodes[0].generatetoaddress(6, ADDRESS_BCRT1_P2WSH_OP_TRUE)
- assert_equal(self.nodes[0].getblockcount(), 206)
+ assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
@@ -345,7 +362,7 @@ class BlockchainTest(BitcoinTestFramework):
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
- assert_equal(self.nodes[0].getblockcount(), 207)
+ assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
@@ -397,20 +414,20 @@ class BlockchainTest(BitcoinTestFramework):
miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = node.generate(1)[0]
- self.log.info("Test that getblock with verbosity 1 doesn't include fee")
+ self.log.info("Test getblock with verbosity 1 doesn't include fee")
block = node.getblock(blockhash, 1)
assert 'fee' not in block['tx'][1]
- self.log.info('Test that getblock with verbosity 2 includes expected fee')
+ self.log.info('Test getblock with verbosity 2 includes expected fee')
block = node.getblock(blockhash, 2)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
- self.log.info("Test that getblock with verbosity 2 still works with pruned Undo data")
+ self.log.info("Test getblock with verbosity 2 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
- self.log.info("Test that getblock with invalid verbosity type returns proper error message")
+ self.log.info("Test getblock with invalid verbosity type returns proper error message")
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", node.getblock, blockhash, "2")
def move_block_file(old, new):
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
index 816ec67492..bb2e51c2f7 100755
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -3,7 +3,6 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
-import binascii
import decimal
import itertools
import json
@@ -66,9 +65,9 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
# decompress pk2
pk_obj = ECPubKey()
- pk_obj.set(binascii.unhexlify(pk2))
+ pk_obj.set(bytes.fromhex(pk2))
pk_obj.compressed = False
- pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
+ pk2 = pk_obj.get_bytes().hex()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py
index f6643c7167..5b1514af6f 100755
--- a/test/functional/rpc_decodescript.py
+++ b/test/functional/rpc_decodescript.py
@@ -11,7 +11,6 @@ from test_framework.messages import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- hex_str_to_bytes,
)
@@ -86,7 +85,7 @@ class DecodeScriptTest(BitcoinTestFramework):
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
- multisig_script_hash = sha256(hex_str_to_bytes(multisig_script)).hex()
+ multisig_script_hash = sha256(bytes.fromhex(multisig_script)).hex()
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
@@ -124,7 +123,7 @@ class DecodeScriptTest(BitcoinTestFramework):
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
- cltv_script_hash = sha256(hex_str_to_bytes(cltv_script)).hex()
+ cltv_script_hash = sha256(bytes.fromhex(cltv_script)).hex()
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
@@ -209,23 +208,23 @@ class DecodeScriptTest(BitcoinTestFramework):
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
- txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
+ txSave.vin[0].scriptSig = bytes.fromhex(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
- txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
+ txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
- txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
+ txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
- txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
+ txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index fa98c44152..0ce58efbcf 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -99,6 +99,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.test_subtract_fee_with_presets()
self.test_transaction_too_large()
self.test_include_unsafe()
+ self.test_22670()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
@@ -379,7 +380,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
- signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+ signedFee = self.nodes[0].getmempoolentry(txId)['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
@@ -402,7 +403,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
- signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+ signedFee = self.nodes[0].getmempoolentry(txId)['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
@@ -426,7 +427,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
- signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+ signedFee = self.nodes[0].getmempoolentry(txId)['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
@@ -467,7 +468,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
- signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+ signedFee = self.nodes[0].getmempoolentry(txId)['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
@@ -542,7 +543,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
- outputs = {self.nodes[0].getnewaddress():1.09999500}
+ outputs = {self.nodes[0].getnewaddress():1.19999500}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that does not require a new key for the change output
self.nodes[1].fundrawtransaction(rawtx)
@@ -599,7 +600,7 @@ class RawTransactionsTest(BitcoinTestFramework):
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
- signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
+ signedFee = self.nodes[1].getmempoolentry(txId)['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
@@ -969,6 +970,62 @@ class RawTransactionsTest(BitcoinTestFramework):
signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex'])
wallet.sendrawtransaction(signedtx['hex'])
+ def test_22670(self):
+ # In issue #22670, it was observed that ApproximateBestSubset may
+ # choose enough value to cover the target amount but not enough to cover the transaction fees.
+ # This leads to a transaction whose actual transaction feerate is lower than expected.
+ # However at normal feerates, the difference between the effective value and the real value
+ # that this bug is not detected because the transaction fee must be at least 0.01 BTC (the minimum change value).
+ # Otherwise the targeted minimum change value will be enough to cover the transaction fees that were not
+ # being accounted for. So the minimum relay fee is set to 0.1 BTC/kvB in this test.
+ self.log.info("Test issue 22670 ApproximateBestSubset bug")
+ # Make sure the default wallet will not be loaded when restarted with a high minrelaytxfee
+ self.nodes[0].unloadwallet(self.default_wallet_name, False)
+ feerate = Decimal("0.1")
+ self.restart_node(0, [f"-minrelaytxfee={feerate}", "-discardfee=0"]) # Set high minrelayfee, set discardfee to 0 for easier calculation
+
+ self.nodes[0].loadwallet(self.default_wallet_name, True)
+ funds = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
+ self.nodes[0].createwallet(wallet_name="tester")
+ tester = self.nodes[0].get_wallet_rpc("tester")
+
+ # Because this test is specifically for ApproximateBestSubset, the target value must be greater
+ # than any single input available, and require more than 1 input. So we make 3 outputs
+ for i in range(0, 3):
+ funds.sendtoaddress(tester.getnewaddress(address_type="bech32"), 1)
+ self.nodes[0].generate(1)
+
+ # Create transactions in order to calculate fees for the target bounds that can trigger this bug
+ change_tx = tester.fundrawtransaction(tester.createrawtransaction([], [{funds.getnewaddress(): 1.5}]))
+ tx = tester.createrawtransaction([], [{funds.getnewaddress(): 2}])
+ no_change_tx = tester.fundrawtransaction(tx, {"subtractFeeFromOutputs": [0]})
+
+ overhead_fees = feerate * len(tx) / 2 / 1000
+ cost_of_change = change_tx["fee"] - no_change_tx["fee"]
+ fees = no_change_tx["fee"]
+ assert_greater_than(fees, 0.01)
+
+ def do_fund_send(target):
+ create_tx = tester.createrawtransaction([], [{funds.getnewaddress(): target}])
+ funded_tx = tester.fundrawtransaction(create_tx)
+ signed_tx = tester.signrawtransactionwithwallet(funded_tx["hex"])
+ assert signed_tx["complete"]
+ decoded_tx = tester.decoderawtransaction(signed_tx["hex"])
+ assert_equal(len(decoded_tx["vin"]), 3)
+ assert tester.testmempoolaccept([signed_tx["hex"]])[0]["allowed"]
+
+ # We want to choose more value than is available in 2 inputs when considering the fee,
+ # but not enough to need 3 inputs when not considering the fee.
+ # So the target value must be at least 2.00000001 - fee.
+ lower_bound = Decimal("2.00000001") - fees
+ # The target value must be at most 2 - cost_of_change - not_input_fees - min_change (these are all
+ # included in the target before ApproximateBestSubset).
+ upper_bound = Decimal("2.0") - cost_of_change - overhead_fees - Decimal("0.01")
+ assert_greater_than_or_equal(upper_bound, lower_bound)
+ do_fund_send(lower_bound)
+ do_fund_send(upper_bound)
+
+ self.restart_node(0)
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py
index 52c8fa883d..13f33c321f 100755
--- a/test/functional/rpc_misc.py
+++ b/test/functional/rpc_misc.py
@@ -54,13 +54,27 @@ class RpcMiscTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")
- self.log.info("test logging")
+ self.log.info("test logging rpc and help")
+
+ # Test logging RPC returns the expected number of logging categories.
+ assert_equal(len(node.logging()), 25)
+
+ # Test toggling a logging category on/off/on with the logging RPC.
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
+ # Test logging RPC returns the logging categories in alphabetical order.
+ sorted_logging_categories = sorted(node.logging())
+ assert_equal(list(node.logging()), sorted_logging_categories)
+
+ # Test logging help returns the logging categories string in alphabetical order.
+ categories = ', '.join(sorted_logging_categories)
+ logging_help = self.nodes[0].help('logging')
+ assert f"valid logging categories are: {categories}" in logging_help
+
self.log.info("test echoipc (testing spawned process in multiprocess build)")
assert_equal(node.echoipc("hello"), "hello")
diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py
index 4b2ed20958..3cb4154601 100755
--- a/test/functional/rpc_packages.py
+++ b/test/functional/rpc_packages.py
@@ -22,6 +22,11 @@ from test_framework.script import (
from test_framework.util import (
assert_equal,
)
+from test_framework.wallet import (
+ create_child_with_parents,
+ create_raw_chain,
+ make_chain,
+)
class RPCPackagesTest(BitcoinTestFramework):
def set_test_params(self):
@@ -78,26 +83,6 @@ class RPCPackagesTest(BitcoinTestFramework):
self.test_conflicting()
self.test_rbf()
- def chain_transaction(self, parent_txid, parent_value, n=0, parent_locking_script=None):
- """Build a transaction that spends parent_txid.vout[n] and produces one output with
- amount = parent_value with a fee deducted.
- Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
- """
- node = self.nodes[0]
- inputs = [{"txid": parent_txid, "vout": n}]
- my_value = parent_value - Decimal("0.0001")
- outputs = {self.address : my_value}
- rawtx = node.createrawtransaction(inputs, outputs)
- prevtxs = [{
- "txid": parent_txid,
- "vout": n,
- "scriptPubKey": parent_locking_script,
- "amount": parent_value,
- }] if parent_locking_script else None
- signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys, prevtxs=prevtxs)
- assert signedtx["complete"]
- tx = tx_from_hex(signedtx["hex"])
- return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
def test_independent(self):
self.log.info("Test multiple independent transactions in a package")
@@ -148,20 +133,7 @@ class RPCPackagesTest(BitcoinTestFramework):
def test_chain(self):
node = self.nodes[0]
first_coin = self.coins.pop()
-
- # Chain of 25 transactions
- parent_locking_script = None
- txid = first_coin["txid"]
- chain_hex = []
- chain_txns = []
- value = first_coin["amount"]
-
- for _ in range(25):
- (tx, txhex, value, parent_locking_script) = self.chain_transaction(txid, value, 0, parent_locking_script)
- txid = tx.rehash()
- chain_hex.append(txhex)
- chain_txns.append(tx)
-
+ (chain_hex, chain_txns) = create_raw_chain(node, first_coin, self.address, self.privkeys)
self.log.info("Check that testmempoolaccept requires packages to be sorted by dependency")
assert_equal(node.testmempoolaccept(rawtxs=chain_hex[::-1]),
[{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "package-error": "package-not-sorted"} for tx in chain_txns[::-1]])
@@ -201,7 +173,7 @@ class RPCPackagesTest(BitcoinTestFramework):
child_value = value - Decimal("0.0001")
# Child A
- (_, tx_child_a_hex, _, _) = self.chain_transaction(parent_txid, child_value, 0, parent_locking_script_a)
+ (_, tx_child_a_hex, _, _) = make_chain(node, self.address, self.privkeys, parent_txid, child_value, 0, parent_locking_script_a)
assert not node.testmempoolaccept([tx_child_a_hex])[0]["allowed"]
# Child B
@@ -226,19 +198,6 @@ class RPCPackagesTest(BitcoinTestFramework):
node.sendrawtransaction(rawtx)
assert_equal(testres_single, testres_multiple_ab)
- def create_child_with_parents(self, parents_tx, values, locking_scripts):
- """Creates a transaction that spends the first output of each parent in parents_tx."""
- num_parents = len(parents_tx)
- total_value = sum(values)
- inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
- outputs = {self.address : total_value - num_parents * Decimal("0.0001")}
- rawtx_child = self.nodes[0].createrawtransaction(inputs, outputs)
- prevtxs = []
- for i in range(num_parents):
- prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
- signedtx_child = self.nodes[0].signrawtransactionwithkey(hexstring=rawtx_child, privkeys=self.privkeys, prevtxs=prevtxs)
- assert signedtx_child["complete"]
- return signedtx_child["hex"]
def test_multiple_parents(self):
node = self.nodes[0]
@@ -253,12 +212,12 @@ class RPCPackagesTest(BitcoinTestFramework):
for _ in range(num_parents):
parent_coin = self.coins.pop()
value = parent_coin["amount"]
- (tx, txhex, value, parent_locking_script) = self.chain_transaction(parent_coin["txid"], value)
+ (tx, txhex, value, parent_locking_script) = make_chain(node, self.address, self.privkeys, parent_coin["txid"], value)
package_hex.append(txhex)
parents_tx.append(tx)
values.append(value)
parent_locking_scripts.append(parent_locking_script)
- child_hex = self.create_child_with_parents(parents_tx, values, parent_locking_scripts)
+ child_hex = create_child_with_parents(node, self.address, self.privkeys, parents_tx, values, parent_locking_scripts)
# Package accept should work with the parents in any order (as long as parents come before child)
for _ in range(10):
random.shuffle(package_hex)
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index db57368eae..fbf8c6ef15 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -5,11 +5,11 @@
"""Test the rawtransaction RPCs.
Test the following RPCs:
+ - getrawtransaction
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- - getrawtransaction
"""
from collections import OrderedDict
@@ -28,6 +28,9 @@ from test_framework.util import (
)
+TXID = "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000"
+
+
class multidict(dict):
"""Dictionary that allows duplicate keys.
@@ -46,16 +49,20 @@ class multidict(dict):
return self.x
-# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 3
+ self.num_nodes = 4
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
+ [],
]
+ # whitelist all peers to speed up tx relay / mempool sync
+ for args in self.extra_args:
+ args.append("-whitelist=noban@127.0.0.1")
+
self.supports_cli = False
def skip_test_if_missing_module(self):
@@ -66,23 +73,112 @@ class RawTransactionsTest(BitcoinTestFramework):
self.connect_nodes(0, 2)
def run_test(self):
- self.log.info('prepare some coins for multiple *rawtransaction commands')
+ self.log.info("Prepare some coins for multiple *rawtransaction commands")
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(COINBASE_MATURITY + 1)
self.sync_all()
- self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
- self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
- self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
+ for amount in [1.5, 1.0, 5.0]:
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), amount)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
- self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
+ self.getrawtransaction_tests()
+ self.createrawtransaction_tests()
+ self.signrawtransactionwithwallet_tests()
+ self.sendrawtransaction_tests()
+ self.sendrawtransaction_testmempoolaccept_tests()
+ self.decoderawtransaction_tests()
+ self.transaction_version_number_tests()
+ if not self.options.descriptors:
+ self.raw_multisig_transaction_legacy_tests()
+
+ def getrawtransaction_tests(self):
+ addr = self.nodes[1].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 10)
+ self.nodes[0].generate(1)
+ self.sync_all()
+ vout = find_vout_for_address(self.nodes[1], txid, addr)
+ rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
+ rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
+ txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ for n in [0, 3]:
+ self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
+ # 1. valid parameters - only supply txid
+ assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
+
+ # 2. valid parameters - supply txid and 0 for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
+
+ # 3. valid parameters - supply txid and False for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
+
+ # 4. valid parameters - supply txid and 1 for verbose.
+ # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
+ assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
+
+ # 5. valid parameters - supply txid and True for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
+
+ # 6. invalid parameters - supply txid and invalid boolean values (strings) for verbose
+ for value in ["True", "False"]:
+ assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txid=txId, verbose=value)
+
+ # 7. invalid parameters - supply txid and empty array
+ assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, [])
+
+ # 8. invalid parameters - supply txid and empty dict
+ assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, {})
+
+ # Make a tx by sending, then generate 2 blocks; block1 has the tx in it
+ tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
+ block1, block2 = self.nodes[2].generate(2)
+ self.sync_all()
+ for n in [0, 3]:
+ self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex, with blockhash")
+ # We should be able to get the raw transaction by providing the correct block
+ gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
+ assert_equal(gottx['txid'], tx)
+ assert_equal(gottx['in_active_chain'], True)
+ if n == 0:
+ self.log.info("Test getrawtransaction with -txindex, without blockhash: 'in_active_chain' should be absent")
+ gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True)
+ assert_equal(gottx['txid'], tx)
+ assert 'in_active_chain' not in gottx
+ else:
+ self.log.info("Test getrawtransaction without -txindex, without blockhash: expect the call to raise")
+ err_msg = (
+ "No such mempool transaction. Use -txindex or provide a block hash to enable"
+ " blockchain transaction queries. Use gettransaction for wallet transactions."
+ )
+ assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txid=tx, verbose=True)
+ # We should not get the tx if we provide an unrelated block
+ assert_raises_rpc_error(-5, "No such transaction found", self.nodes[n].getrawtransaction, txid=tx, blockhash=block2)
+ # An invalid block hash should raise the correct errors
+ assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[n].getrawtransaction, txid=tx, blockhash=True)
+ assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[n].getrawtransaction, txid=tx, blockhash="foobar")
+ assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[n].getrawtransaction, txid=tx, blockhash="abcd1234")
+ foo = "ZZZ0000000000000000000000000000000000000000000000000000000000000"
+ assert_raises_rpc_error(-8, f"parameter 3 must be hexadecimal string (not '{foo}')", self.nodes[n].getrawtransaction, txid=tx, blockhash=foo)
+ bar = "0000000000000000000000000000000000000000000000000000000000000000"
+ assert_raises_rpc_error(-5, "Block hash not found", self.nodes[n].getrawtransaction, txid=tx, blockhash=bar)
+ # Undo the blocks and verify that "in_active_chain" is false.
+ self.nodes[n].invalidateblock(block1)
+ gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
+ assert_equal(gottx['in_active_chain'], False)
+ self.nodes[n].reconsiderblock(block1)
+ assert_equal(self.nodes[n].getbestblockhash(), block2)
+
+ self.log.info("Test getrawtransaction on genesis block coinbase returns an error")
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
- self.log.info('Check parameter types and required parameters of createrawtransaction')
+ def createrawtransaction_tests(self):
+ self.log.info("Test createrawtransaction")
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
@@ -91,16 +187,28 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
- txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
- assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
- assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
- assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
- assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
- assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
+ txid = "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844"
+ assert_raises_rpc_error(-8, f"txid must be hexadecimal string (not '{txid}')", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID}], {})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': 'foo'}], {})
+ assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': -1}], {})
+ # sequence number out of range
+ for invalid_seq in [-1, 4294967296]:
+ inputs = [{'txid': TXID, 'vout': 1, 'sequence': invalid_seq}]
+ outputs = {self.nodes[0].getnewaddress(): 1}
+ assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range',
+ self.nodes[0].createrawtransaction, inputs, outputs)
+ # with valid sequence number
+ for valid_seq in [1000, 4294967294]:
+ inputs = [{'txid': TXID, 'vout': 1, 'sequence': valid_seq}]
+ outputs = {self.nodes[0].getnewaddress(): 1}
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ decrawtx = self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['vin'][0]['sequence'], valid_seq)
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
@@ -127,53 +235,51 @@ class RawTransactionsTest(BitcoinTestFramework):
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
- self.log.info('Check that createrawtransaction accepts an array and object as outputs')
+ # Test that createrawtransaction accepts an array and object as outputs
# One output
- tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))
+ tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs={address: 99}))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
- self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
+ self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
- tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))
+ tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
- self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
+ self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
- tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))
+ tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
- self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
+ self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
+ def signrawtransactionwithwallet_tests(self):
for type in ["bech32", "p2sh-segwit", "legacy"]:
+ self.log.info(f"Test signrawtransactionwithwallet with missing prevtx info ({type})")
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
+ inputs = [{'txid': TXID, 'vout': 3, 'sequence': 1000}]
+ outputs = {self.nodes[0].getnewaddress(): 1}
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
- self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
-
- # Test `signrawtransactionwithwallet` invalid `prevtxs`
- inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
- outputs = { self.nodes[0].getnewaddress() : 1 }
- rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
-
- prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
+ prevtx = dict(txid=TXID, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
+
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
-
- if type != "legacy":
+ else:
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
- "txid": txid,
+ "txid": TXID,
"scriptPubKey": pubkey,
"vout": 3,
}
@@ -181,7 +287,7 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
- "txid": txid,
+ "txid": TXID,
"scriptPubKey": pubkey,
"amount": 1,
}
@@ -195,273 +301,23 @@ class RawTransactionsTest(BitcoinTestFramework):
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
- "txid": txid,
+ "txid": TXID,
"vout": 3,
"amount": 1
}
])
- #########################################
- # sendrawtransaction with missing input #
- #########################################
-
- self.log.info('sendrawtransaction with missing input')
- inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
- outputs = { self.nodes[0].getnewaddress() : 4.998 }
- rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
- rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
-
- # This will raise an exception since there are missing inputs
+ def sendrawtransaction_tests(self):
+ self.log.info("Test sendrawtransaction with missing input")
+ inputs = [{'txid': TXID, 'vout': 1}] # won't exist
+ outputs = {self.nodes[0].getnewaddress(): 4.998}
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex'])
- #####################################
- # getrawtransaction with block hash #
- #####################################
-
- # make a tx by sending then generate 2 blocks; block1 has the tx in it
- tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
- block1, block2 = self.nodes[2].generate(2)
- self.sync_all()
- # We should be able to get the raw transaction by providing the correct block
- gottx = self.nodes[0].getrawtransaction(tx, True, block1)
- assert_equal(gottx['txid'], tx)
- assert_equal(gottx['in_active_chain'], True)
- # We should not have the 'in_active_chain' flag when we don't provide a block
- gottx = self.nodes[0].getrawtransaction(tx, True)
- assert_equal(gottx['txid'], tx)
- assert 'in_active_chain' not in gottx
- # We should not get the tx if we provide an unrelated block
- assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
- # An invalid block hash should raise the correct errors
- assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
- assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
- assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
- assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
- assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
- # Undo the blocks and check in_active_chain
- self.nodes[0].invalidateblock(block1)
- gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
- assert_equal(gottx['in_active_chain'], False)
- self.nodes[0].reconsiderblock(block1)
- assert_equal(self.nodes[0].getbestblockhash(), block2)
-
- if not self.options.descriptors:
- # The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
- # The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
- #########################
- # RAW TX MULTISIG TESTS #
- #########################
- # 2of2 test
- addr1 = self.nodes[2].getnewaddress()
- addr2 = self.nodes[2].getnewaddress()
-
- addr1Obj = self.nodes[2].getaddressinfo(addr1)
- addr2Obj = self.nodes[2].getaddressinfo(addr2)
-
- # Tests for createmultisig and addmultisigaddress
- assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
- self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
- assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
-
- mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
-
- #use balance deltas instead of absolute values
- bal = self.nodes[2].getbalance()
-
- # send 1.2 BTC to msig adr
- txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
- self.sync_all()
- self.nodes[0].generate(1)
- self.sync_all()
- assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
-
-
- # 2of3 test from different nodes
- bal = self.nodes[2].getbalance()
- addr1 = self.nodes[1].getnewaddress()
- addr2 = self.nodes[2].getnewaddress()
- addr3 = self.nodes[2].getnewaddress()
-
- addr1Obj = self.nodes[1].getaddressinfo(addr1)
- addr2Obj = self.nodes[2].getaddressinfo(addr2)
- addr3Obj = self.nodes[2].getaddressinfo(addr3)
-
- mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
-
- txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
- decTx = self.nodes[0].gettransaction(txId)
- rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
- self.sync_all()
- self.nodes[0].generate(1)
- self.sync_all()
-
- #THIS IS AN INCOMPLETE FEATURE
- #NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
- assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
-
- txDetails = self.nodes[0].gettransaction(txId, True)
- rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
- vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
-
- bal = self.nodes[0].getbalance()
- inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
- outputs = { self.nodes[0].getnewaddress() : 2.19 }
- rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
- rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
- assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
-
- rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
- assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
- self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
- rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
- self.sync_all()
- self.nodes[0].generate(1)
- self.sync_all()
- assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
-
- # 2of2 test for combining transactions
- bal = self.nodes[2].getbalance()
- addr1 = self.nodes[1].getnewaddress()
- addr2 = self.nodes[2].getnewaddress()
-
- addr1Obj = self.nodes[1].getaddressinfo(addr1)
- addr2Obj = self.nodes[2].getaddressinfo(addr2)
-
- self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
- mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
- mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
-
- txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
- decTx = self.nodes[0].gettransaction(txId)
- rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
- self.sync_all()
- self.nodes[0].generate(1)
- self.sync_all()
-
- assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
-
- txDetails = self.nodes[0].gettransaction(txId, True)
- rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
- vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
-
- bal = self.nodes[0].getbalance()
- inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
- outputs = { self.nodes[0].getnewaddress() : 2.19 }
- rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
- rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
- self.log.debug(rawTxPartialSigned1)
- assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
-
- rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
- self.log.debug(rawTxPartialSigned2)
- assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
- rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
- self.log.debug(rawTxComb)
- self.nodes[2].sendrawtransaction(rawTxComb)
- rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
- self.sync_all()
- self.nodes[0].generate(1)
- self.sync_all()
- assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
-
- # decoderawtransaction tests
- # witness transaction
- encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
- decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
- assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
- assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
- # non-witness transaction
- encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
- decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
- assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
- # known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
- encrawtx = "020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
- decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
- decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
- assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
- assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
- assert_equal(decrawtx['vin'][0]['coinbase'], "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000")
-
- # Basic signrawtransaction test
- addr = self.nodes[1].getnewaddress()
- txid = self.nodes[0].sendtoaddress(addr, 10)
- self.nodes[0].generate(1)
- self.sync_all()
- vout = find_vout_for_address(self.nodes[1], txid, addr)
- rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
- rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
- txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
- self.nodes[0].generate(1)
- self.sync_all()
-
- # getrawtransaction tests
- # 1. valid parameters - only supply txid
- assert_equal(self.nodes[0].getrawtransaction(txId), rawTxSigned['hex'])
-
- # 2. valid parameters - supply txid and 0 for non-verbose
- assert_equal(self.nodes[0].getrawtransaction(txId, 0), rawTxSigned['hex'])
-
- # 3. valid parameters - supply txid and False for non-verbose
- assert_equal(self.nodes[0].getrawtransaction(txId, False), rawTxSigned['hex'])
-
- # 4. valid parameters - supply txid and 1 for verbose.
- # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
- assert_equal(self.nodes[0].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
-
- # 5. valid parameters - supply txid and True for non-verbose
- assert_equal(self.nodes[0].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
-
- # 6. invalid parameters - supply txid and string "Flase"
- assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, "Flase")
-
- # 7. invalid parameters - supply txid and empty array
- assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, [])
-
- # 8. invalid parameters - supply txid and empty dict
- assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, {})
-
- inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
- outputs = { self.nodes[0].getnewaddress() : 1 }
- rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
- decrawtx= self.nodes[0].decoderawtransaction(rawtx)
- assert_equal(decrawtx['vin'][0]['sequence'], 1000)
-
- # 9. invalid parameters - sequence number out of range
- inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
- outputs = { self.nodes[0].getnewaddress() : 1 }
- assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
-
- # 10. invalid parameters - sequence number out of range
- inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
- outputs = { self.nodes[0].getnewaddress() : 1 }
- assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
-
- inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
- outputs = { self.nodes[0].getnewaddress() : 1 }
- rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
- decrawtx= self.nodes[0].decoderawtransaction(rawtx)
- assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
-
- ####################################
- # TRANSACTION VERSION NUMBER TESTS #
- ####################################
-
- # Test the minimum transaction version number that fits in a signed 32-bit integer.
- # As transaction version is unsigned, this should convert to its unsigned equivalent.
- tx = CTransaction()
- tx.nVersion = -0x80000000
- rawtx = tx.serialize().hex()
- decrawtx = self.nodes[0].decoderawtransaction(rawtx)
- assert_equal(decrawtx['version'], 0x80000000)
-
- # Test the maximum transaction version number that fits in a signed 32-bit integer.
- tx = CTransaction()
- tx.nVersion = 0x7fffffff
- rawtx = tx.serialize().hex()
- decrawtx = self.nodes[0].decoderawtransaction(rawtx)
- assert_equal(decrawtx['version'], 0x7fffffff)
-
- self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
+ def sendrawtransaction_testmempoolaccept_tests(self):
+ self.log.info("Test sendrawtransaction/testmempoolaccept with maxfeerate")
+ fee_exceeds_max = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
# Test a transaction with a small fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
@@ -469,9 +325,9 @@ class RawTransactionsTest(BitcoinTestFramework):
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
- inputs = [{ "txid" : txId, "vout" : vout['n'] }]
+ inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 10,000 satoshis, (1 - (10000 sat * 0.00000001 BTC/sat)) = 0.9999
- outputs = { self.nodes[0].getnewaddress() : Decimal("0.99990000") }
+ outputs = {self.nodes[0].getnewaddress(): Decimal("0.99990000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
@@ -481,7 +337,7 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
- assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
+ assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
@@ -493,9 +349,9 @@ class RawTransactionsTest(BitcoinTestFramework):
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
- inputs = [{ "txid" : txId, "vout" : vout['n'] }]
+ inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 2,000,000 satoshis, (1 - (2000000 sat * 0.00000001 BTC/sat)) = 0.98
- outputs = { self.nodes[0].getnewaddress() : Decimal("0.98000000") }
+ outputs = {self.nodes[0].getnewaddress() : Decimal("0.98000000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
@@ -505,12 +361,181 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
- assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
+ assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
+ self.log.info("Test sendrawtransaction/testmempoolaccept with tx already in the chain")
+ self.nodes[2].generate(1)
+ self.sync_blocks()
+ for node in self.nodes:
+ testres = node.testmempoolaccept([rawTxSigned['hex']])[0]
+ assert_equal(testres['allowed'], False)
+ assert_equal(testres['reject-reason'], 'txn-already-known')
+ assert_raises_rpc_error(-27, 'Transaction already in block chain', node.sendrawtransaction, rawTxSigned['hex'])
+
+ def decoderawtransaction_tests(self):
+ self.log.info("Test decoderawtransaction")
+ # witness transaction
+ encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
+ decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
+ assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
+ assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
+ # non-witness transaction
+ encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
+ decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
+ assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
+ # known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
+ coinbase = "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000"
+ encrawtx = f"020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b{coinbase}" \
+ "ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
+ decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
+ decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
+ assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
+ assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
+ assert_equal(decrawtx['vin'][0]['coinbase'], coinbase)
+
+ def transaction_version_number_tests(self):
+ self.log.info("Test transaction version numbers")
+
+ # Test the minimum transaction version number that fits in a signed 32-bit integer.
+ # As transaction version is unsigned, this should convert to its unsigned equivalent.
+ tx = CTransaction()
+ tx.nVersion = -0x80000000
+ rawtx = tx.serialize().hex()
+ decrawtx = self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['version'], 0x80000000)
+
+ # Test the maximum transaction version number that fits in a signed 32-bit integer.
+ tx = CTransaction()
+ tx.nVersion = 0x7fffffff
+ rawtx = tx.serialize().hex()
+ decrawtx = self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['version'], 0x7fffffff)
+
+ def raw_multisig_transaction_legacy_tests(self):
+ self.log.info("Test raw multisig transactions (legacy)")
+ # The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
+ # The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
+
+ # 2of2 test
+ addr1 = self.nodes[2].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[2].getaddressinfo(addr1)
+ addr2Obj = self.nodes[2].getaddressinfo(addr2)
+
+ # Tests for createmultisig and addmultisigaddress
+ assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
+ # createmultisig can only take public keys
+ self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
+ # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here
+ assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
+
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
+
+ # use balance deltas instead of absolute values
+ bal = self.nodes[2].getbalance()
+
+ # send 1.2 BTC to msig adr
+ txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ # node2 has both keys of the 2of2 ms addr, tx should affect the balance
+ assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000'))
+
+
+ # 2of3 test from different nodes
+ bal = self.nodes[2].getbalance()
+ addr1 = self.nodes[1].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+ addr3 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[1].getaddressinfo(addr1)
+ addr2Obj = self.nodes[2].getaddressinfo(addr2)
+ addr3Obj = self.nodes[2].getaddressinfo(addr3)
+
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
+
+ txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
+ decTx = self.nodes[0].gettransaction(txId)
+ rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ # THIS IS AN INCOMPLETE FEATURE
+ # NODE2 HAS TWO OF THREE KEYS AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
+ assert_equal(self.nodes[2].getbalance(), bal) # for now, assume the funds of a 2of3 multisig tx are not marked as spendable
+
+ txDetails = self.nodes[0].gettransaction(txId, True)
+ rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
+ vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
+
+ bal = self.nodes[0].getbalance()
+ inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value']}]
+ outputs = {self.nodes[0].getnewaddress(): 2.19}
+ rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
+ assert_equal(rawTxPartialSigned['complete'], False) # node1 only has one key, can't comp. sign the tx
+
+ rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
+ assert_equal(rawTxSigned['complete'], True) # node2 can sign the tx compl., own two of three keys
+ self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
+ rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
+
+ # 2of2 test for combining transactions
+ bal = self.nodes[2].getbalance()
+ addr1 = self.nodes[1].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[1].getaddressinfo(addr1)
+ addr2Obj = self.nodes[2].getaddressinfo(addr2)
+
+ self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
+ mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
+
+ txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
+ decTx = self.nodes[0].gettransaction(txId)
+ rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
+
+ txDetails = self.nodes[0].gettransaction(txId, True)
+ rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
+ vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
+
+ bal = self.nodes[0].getbalance()
+ inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}]
+ outputs = {self.nodes[0].getnewaddress(): 2.19}
+ rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
+ self.log.debug(rawTxPartialSigned1)
+ assert_equal(rawTxPartialSigned1['complete'], False) # node1 only has one key, can't comp. sign the tx
+
+ rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
+ self.log.debug(rawTxPartialSigned2)
+ assert_equal(rawTxPartialSigned2['complete'], False) # node2 only has one key, can't comp. sign the tx
+ rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
+ self.log.debug(rawTxComb)
+ self.nodes[2].sendrawtransaction(rawTxComb)
+ rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
+
if __name__ == '__main__':
RawTransactionsTest().main()
diff --git a/test/functional/rpc_signmessage.py b/test/functional/rpc_signmessagewithprivkey.py
index 1c71732a61..95beba8730 100755
--- a/test/functional/rpc_signmessage.py
+++ b/test/functional/rpc_signmessagewithprivkey.py
@@ -2,7 +2,7 @@
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Test RPC commands for signing and verifying messages."""
+"""Test RPC commands for signing messages with private key."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -10,14 +10,10 @@ from test_framework.util import (
assert_raises_rpc_error,
)
-class SignMessagesTest(BitcoinTestFramework):
+class SignMessagesWithPrivTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
- self.extra_args = [["-addresstype=legacy"]]
-
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
def run_test(self):
message = 'This is just a test message'
@@ -30,33 +26,20 @@ class SignMessagesTest(BitcoinTestFramework):
assert_equal(expected_signature, signature)
assert self.nodes[0].verifymessage(address, signature, message)
- self.log.info('test signing with an address with wallet')
- address = self.nodes[0].getnewaddress()
- signature = self.nodes[0].signmessage(address, message)
- assert self.nodes[0].verifymessage(address, signature, message)
-
- self.log.info('test verifying with another address should not work')
- other_address = self.nodes[0].getnewaddress()
- other_signature = self.nodes[0].signmessage(other_address, message)
- assert not self.nodes[0].verifymessage(other_address, signature, message)
- assert not self.nodes[0].verifymessage(address, other_signature, message)
-
self.log.info('test parameter validity and error codes')
- # signmessage(withprivkey) have two required parameters
+ # signmessagewithprivkey has two required parameters
for num_params in [0, 1, 3, 4, 5]:
param_list = ["dummy"]*num_params
assert_raises_rpc_error(-1, "signmessagewithprivkey", self.nodes[0].signmessagewithprivkey, *param_list)
- assert_raises_rpc_error(-1, "signmessage", self.nodes[0].signmessage, *param_list)
# verifymessage has three required parameters
for num_params in [0, 1, 2, 4, 5]:
param_list = ["dummy"]*num_params
assert_raises_rpc_error(-1, "verifymessage", self.nodes[0].verifymessage, *param_list)
# invalid key or address provided
assert_raises_rpc_error(-5, "Invalid private key", self.nodes[0].signmessagewithprivkey, "invalid_key", message)
- assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].signmessage, "invalid_addr", message)
assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].verifymessage, "invalid_addr", signature, message)
# malformed signature provided
- assert_raises_rpc_error(-3, "Malformed base64 encoding", self.nodes[0].verifymessage, self.nodes[0].getnewaddress(), "invalid_sig", message)
+ assert_raises_rpc_error(-3, "Malformed base64 encoding", self.nodes[0].verifymessage, 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB', "invalid_sig", message)
if __name__ == '__main__':
- SignMessagesTest().main()
+ SignMessagesWithPrivTest().main()
diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py
index ef5d08e7b9..e6fef7e838 100755
--- a/test/functional/rpc_signrawtransaction.py
+++ b/test/functional/rpc_signrawtransaction.py
@@ -4,9 +4,11 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
-from test_framework.blocktools import COINBASE_MATURITY
+from test_framework.blocktools import (
+ COINBASE_MATURITY,
+ CSV_ACTIVATION_HEIGHT,
+)
from test_framework.address import (
- check_script,
script_to_p2sh,
script_to_p2wsh,
)
@@ -16,16 +18,14 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
- hex_str_to_bytes,
+ generate_to_height,
)
from test_framework.messages import (
CTxInWitness,
- sha256,
tx_from_hex,
)
from test_framework.script import (
CScript,
- OP_0,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSIG,
OP_CHECKSEQUENCEVERIFY,
@@ -231,9 +231,9 @@ class SignRawTransactionsTest(BitcoinTestFramework):
embedded_pubkey = eckey.get_pubkey().get_bytes().hex()
witness_script = {
'P2PKH': key_to_p2pkh_script(embedded_pubkey).hex(),
- 'P2PK': CScript([hex_str_to_bytes(embedded_pubkey), OP_CHECKSIG]).hex()
+ 'P2PK': CScript([bytes.fromhex(embedded_pubkey), OP_CHECKSIG]).hex()
}.get(tx_type, "Invalid tx_type")
- redeem_script = CScript([OP_0, sha256(check_script(witness_script))]).hex()
+ redeem_script = script_to_p2wsh_script(witness_script).hex()
addr = script_to_p2sh(redeem_script)
script_pub_key = self.nodes[1].validateaddress(addr)['scriptPubKey']
# Fund that address
@@ -273,7 +273,8 @@ class SignRawTransactionsTest(BitcoinTestFramework):
getcontext().prec = 8
# Make sure CSV is active
- self.nodes[0].generate(500)
+ generate_to_height(self, self.nodes[0], CSV_ACTIVATION_HEIGHT)
+ assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active']
# Create a P2WSH script with CSV
script = CScript([1, OP_CHECKSEQUENCEVERIFY, OP_DROP])
@@ -307,11 +308,11 @@ class SignRawTransactionsTest(BitcoinTestFramework):
self.nodes[0].walletpassphrase("password", 9999)
getcontext().prec = 8
- # Make sure CSV is active
- self.nodes[0].generate(1500)
+ # Make sure CLTV is active
+ assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active']
# Create a P2WSH script with CLTV
- script = CScript([1000, OP_CHECKLOCKTIMEVERIFY, OP_DROP])
+ script = CScript([100, OP_CHECKLOCKTIMEVERIFY, OP_DROP])
address = script_to_p2wsh(script)
# Fund that address and make the spend
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
index 360962b8da..fe733e9368 100644
--- a/test/functional/test_framework/address.py
+++ b/test/functional/test_framework/address.py
@@ -12,7 +12,7 @@ import unittest
from .script import hash256, hash160, sha256, CScript, OP_0
from .segwit_addr import encode_segwit_address
-from .util import assert_equal, hex_str_to_bytes
+from .util import assert_equal
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
@@ -33,7 +33,7 @@ def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
- checksum = hash256(hex_str_to_bytes(str)).hex()
+ checksum = hash256(bytes.fromhex(str)).hex()
str += checksum[:8]
value = int('0x' + str, 0)
while value > 0:
@@ -100,7 +100,7 @@ def key_to_p2sh_p2wpkh(key, main=False):
def program_to_witness(version, program, main=False):
if (type(program) is str):
- program = hex_str_to_bytes(program)
+ program = bytes.fromhex(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
@@ -121,14 +121,14 @@ def script_to_p2sh_p2wsh(script, main=False):
def check_key(key):
if (type(key) is str):
- key = hex_str_to_bytes(key) # Assuming this is hex string
+ key = bytes.fromhex(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
- script = hex_str_to_bytes(script) # Assuming this is hex string
+ script = bytes.fromhex(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
diff --git a/test/functional/test_framework/bdb.py b/test/functional/test_framework/bdb.py
index 97b9c1d6d0..d623bcdf6e 100644
--- a/test/functional/test_framework/bdb.py
+++ b/test/functional/test_framework/bdb.py
@@ -24,7 +24,6 @@ transactions.
`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file
"""
-import binascii
import struct
# Important constants
@@ -96,7 +95,7 @@ def dump_meta_page(page):
metadata['key_count'] = key_count
metadata['record_count'] = record_count
metadata['flags'] = flags
- metadata['uid'] = binascii.hexlify(uid)
+ metadata['uid'] = uid.hex().encode()
assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic'
assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page'
@@ -110,8 +109,9 @@ def dump_meta_page(page):
metadata['re_pad'] = re_pad
metadata['root'] = root
metadata['crypto_magic'] = crypto_magic
- metadata['iv'] = binascii.hexlify(iv)
- metadata['chksum'] = binascii.hexlify(chksum)
+ metadata['iv'] = iv.hex().encode()
+ metadata['chksum'] = chksum.hex().encode()
+
return metadata
# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index bc43438810..25b36b6a91 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -4,7 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
-from binascii import a2b_hex
import struct
import time
import unittest
@@ -24,9 +23,7 @@ from .messages import (
CTxInWitness,
CTxOut,
hash256,
- hex_str_to_bytes,
ser_uint256,
- sha256,
tx_from_hex,
uint256_from_str,
)
@@ -34,13 +31,15 @@ from .script import (
CScript,
CScriptNum,
CScriptOp,
- OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
- hash160,
+)
+from .script_util import (
+ key_to_p2wpkh_script,
+ script_to_p2wsh_script,
)
from .util import assert_equal
@@ -54,10 +53,16 @@ TIME_GENESIS_BLOCK = 1296688602
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100
+# Soft-fork activation heights
+DERSIG_HEIGHT = 102 # BIP 66
+CLTV_HEIGHT = 111 # BIP 65
+CSV_ACTIVATION_HEIGHT = 432
+
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
+VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
@@ -65,11 +70,11 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl
block = CBlock()
if tmpl is None:
tmpl = {}
- block.nVersion = version or tmpl.get('version') or 1
+ block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
- block.nBits = struct.unpack('>I', a2b_hex(tmpl['bits']))[0]
+ block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
@@ -206,13 +211,11 @@ def witness_script(use_p2wsh, pubkey):
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
- pubkeyhash = hash160(hex_str_to_bytes(pubkey))
- pkscript = CScript([OP_0, pubkeyhash])
+ pkscript = key_to_p2wpkh_script(pubkey)
else:
# 1-of-1 multisig
- witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
- scripthash = sha256(witness_program)
- pkscript = CScript([OP_0, scripthash])
+ witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
+ pkscript = script_to_p2wsh_script(witness_script)
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
@@ -220,7 +223,7 @@ def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
- program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
+ program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
@@ -243,7 +246,7 @@ def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=Tru
else:
if (insert_redeem_script):
tx = tx_from_hex(tx_to_witness)
- tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
+ tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
tx_to_witness = tx.serialize().hex()
return node.sendrawtransaction(tx_to_witness)
diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py
index 7705dd3e4d..ad8cfe5c9a 100644
--- a/test/functional/test_framework/coverage.py
+++ b/test/functional/test_framework/coverage.py
@@ -10,6 +10,7 @@ testing.
import os
+from .authproxy import AuthServiceProxy
REFERENCE_FILENAME = 'rpc_interface.txt'
@@ -19,16 +20,17 @@ class AuthServiceProxyWrapper():
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
- def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
+ def __init__(self, auth_service_proxy_instance: AuthServiceProxy, rpc_url: str, coverage_logfile: str=None):
"""
Kwargs:
- auth_service_proxy_instance (AuthServiceProxy): the instance
- being wrapped.
- coverage_logfile (str): if specified, write each service_name
+ auth_service_proxy_instance: the instance being wrapped.
+ rpc_url: url of the RPC instance being wrapped
+ coverage_logfile: if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
+ self.rpc_url = rpc_url
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
@@ -36,7 +38,7 @@ class AuthServiceProxyWrapper():
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
- return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
+ return AuthServiceProxyWrapper(return_val, self.rpc_url, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
@@ -57,6 +59,7 @@ class AuthServiceProxyWrapper():
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
+ self.rpc_url,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
@@ -74,18 +77,18 @@ def get_filename(dirname, n_node):
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
-def write_all_rpc_commands(dirname, node):
+def write_all_rpc_commands(dirname: str, node: AuthServiceProxy) -> bool:
"""
Write out a list of all RPC functions available in `bitcoin-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
- dirname (str): temporary test dir
- node (AuthServiceProxy): client
+ dirname: temporary test dir
+ node: client
Returns:
- bool. if the RPC interface file was written.
+ if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 504c8c70d4..65d90f8448 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -18,7 +18,7 @@ ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
-from codecs import encode
+from base64 import b32decode, b32encode
import copy
import hashlib
from io import BytesIO
@@ -29,10 +29,10 @@ import struct
import time
from test_framework.siphash import siphash256
-from test_framework.util import hex_str_to_bytes, assert_equal
+from test_framework.util import assert_equal
MAX_LOCATOR_SZ = 101
-MAX_BLOCK_BASE_SIZE = 1000000
+MAX_BLOCK_WEIGHT = 4000000
MAX_BLOOM_FILTER_SIZE = 36000
MAX_BLOOM_HASH_FUNCS = 50
@@ -196,7 +196,7 @@ def from_hex(obj, hex_string):
Note that there is no complementary helper like e.g. `to_hex` for the
inverse operation. To serialize a message object to a hex string, simply
use obj.serialize().hex()"""
- obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
+ obj.deserialize(BytesIO(bytes.fromhex(hex_string)))
return obj
@@ -213,15 +213,20 @@ class CAddress:
# see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki
NET_IPV4 = 1
+ NET_I2P = 5
ADDRV2_NET_NAME = {
- NET_IPV4: "IPv4"
+ NET_IPV4: "IPv4",
+ NET_I2P: "I2P"
}
ADDRV2_ADDRESS_LENGTH = {
- NET_IPV4: 4
+ NET_IPV4: 4,
+ NET_I2P: 32
}
+ I2P_PAD = "===="
+
def __init__(self):
self.time = 0
self.nServices = 1
@@ -229,6 +234,9 @@ class CAddress:
self.ip = "0.0.0.0"
self.port = 0
+ def __eq__(self, other):
+ return self.net == other.net and self.ip == other.ip and self.nServices == other.nServices and self.port == other.port and self.time == other.time
+
def deserialize(self, f, *, with_time=True):
"""Deserialize from addrv1 format (pre-BIP155)"""
if with_time:
@@ -261,24 +269,33 @@ class CAddress:
self.nServices = deser_compact_size(f)
self.net = struct.unpack("B", f.read(1))[0]
- assert self.net == self.NET_IPV4
+ assert self.net in (self.NET_IPV4, self.NET_I2P)
address_length = deser_compact_size(f)
assert address_length == self.ADDRV2_ADDRESS_LENGTH[self.net]
- self.ip = socket.inet_ntoa(f.read(4))
+ addr_bytes = f.read(address_length)
+ if self.net == self.NET_IPV4:
+ self.ip = socket.inet_ntoa(addr_bytes)
+ else:
+ self.ip = b32encode(addr_bytes)[0:-len(self.I2P_PAD)].decode("ascii").lower() + ".b32.i2p"
self.port = struct.unpack(">H", f.read(2))[0]
def serialize_v2(self):
"""Serialize in addrv2 format (BIP155)"""
- assert self.net == self.NET_IPV4
+ assert self.net in (self.NET_IPV4, self.NET_I2P)
r = b""
r += struct.pack("<I", self.time)
r += ser_compact_size(self.nServices)
r += struct.pack("B", self.net)
r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
- r += socket.inet_aton(self.ip)
+ if self.net == self.NET_IPV4:
+ r += socket.inet_aton(self.ip)
+ else:
+ sfx = ".b32.i2p"
+ assert self.ip.endswith(sfx)
+ r += b32decode(self.ip[0:-len(sfx)] + self.I2P_PAD, True)
r += struct.pack(">H", self.port)
return r
@@ -590,12 +607,15 @@ class CTransaction:
return False
return True
- # Calculate the virtual transaction size using witness and non-witness
+ # Calculate the transaction weight using witness and non-witness
# serialization size (does NOT use sigops).
- def get_vsize(self):
+ def get_weight(self):
with_witness_size = len(self.serialize_with_witness())
without_witness_size = len(self.serialize_without_witness())
- return math.ceil(((WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size) / WITNESS_SCALE_FACTOR)
+ return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size
+
+ def get_vsize(self):
+ return math.ceil(self.get_weight() / WITNESS_SCALE_FACTOR)
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
@@ -621,7 +641,7 @@ class CBlockHeader:
self.calc_sha256()
def set_null(self):
- self.nVersion = 1
+ self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
@@ -660,7 +680,7 @@ class CBlockHeader:
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
- self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
+ self.hash = hash256(r)[::-1].hex()
def rehash(self):
self.sha256 = None
@@ -743,6 +763,13 @@ class CBlock(CBlockHeader):
self.nNonce += 1
self.rehash()
+ # Calculate the block weight using witness and non-witness
+ # serialization size (does NOT use sigops).
+ def get_weight(self):
+ with_witness_size = len(self.serialize(with_witness=True))
+ without_witness_size = len(self.serialize(with_witness=False))
+ return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size
+
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py
index 5dc723c1d5..b5f78e0cf3 100644
--- a/test/functional/test_framework/netutil.py
+++ b/test/functional/test_framework/netutil.py
@@ -12,7 +12,6 @@ import socket
import struct
import array
import os
-from binascii import unhexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
@@ -44,7 +43,7 @@ def _remove_empty(array):
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
- host = unhexlify(host)
+ host = bytes.fromhex(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index cc80b543cd..b7d5bd8fab 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -438,6 +438,7 @@ class P2PInterface(P2PConnection):
self.send_message(msg_sendaddrv2())
self.send_message(msg_verack())
self.nServices = message.nServices
+ self.send_message(msg_getaddr())
# Connection helper methods
diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py
index 457be6b0e6..5d1d7ea45c 100755
--- a/test/functional/test_framework/script_util.py
+++ b/test/functional/test_framework/script_util.py
@@ -4,7 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript, hash160, sha256, OP_0, OP_DUP, OP_HASH160, OP_CHECKSIG, OP_EQUAL, OP_EQUALVERIFY
-from test_framework.util import hex_str_to_bytes
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
@@ -49,7 +48,7 @@ def key_to_p2sh_p2wpkh_script(key, main = False):
def program_to_witness_script(version, program, main = False):
if isinstance(program, str):
- program = hex_str_to_bytes(program)
+ program = bytes.fromhex(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
@@ -70,14 +69,14 @@ def script_to_p2sh_p2wsh_script(script, main = False):
def check_key(key):
if isinstance(key, str):
- key = hex_str_to_bytes(key) # Assuming this is hex string
+ key = bytes.fromhex(key) # Assuming this is hex string
if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65):
return key
assert False
def check_script(script):
if isinstance(script, str):
- script = hex_str_to_bytes(script) # Assuming this is hex string
+ script = bytes.fromhex(script) # Assuming this is hex string
if isinstance(script, bytes) or isinstance(script, CScript):
return script
assert False
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 40360c54a0..f382e0fdb3 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -112,6 +112,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# By default the wallet is not required. Set to true by skip_if_no_wallet().
# When False, we ignore wallet_names regardless of what it is.
self.requires_wallet = False
+ # Disable ThreadOpenConnections by default, so that adding entries to
+ # addrman will not result in automatic connections to them.
+ self.disable_autoconnect = True
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
@@ -407,7 +410,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
- block_hash = self.nodes[0].generate(1)[0]
+ block_hash = self.generate(self.nodes[0], 1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
@@ -616,6 +619,22 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.connect_nodes(1, 2)
self.sync_all()
+ def generate(self, generator, *args, **kwargs):
+ blocks = generator.generate(*args, **kwargs)
+ return blocks
+
+ def generateblock(self, generator, *args, **kwargs):
+ blocks = generator.generateblock(*args, **kwargs)
+ return blocks
+
+ def generatetoaddress(self, generator, *args, **kwargs):
+ blocks = generator.generatetoaddress(*args, **kwargs)
+ return blocks
+
+ def generatetodescriptor(self, generator, *args, **kwargs):
+ blocks = generator.generatetodescriptor(*args, **kwargs)
+ return blocks
+
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
@@ -711,7 +730,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
- initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
+ initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
@@ -746,7 +765,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_BCRT1_P2WSH_OP_TRUE]
assert_equal(len(gen_addresses), 4)
for i in range(8):
- cache_node.generatetoaddress(
+ self.generatetoaddress(
+ cache_node,
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
)
@@ -769,7 +789,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
- initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf
+ initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
@@ -777,7 +797,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
- initialize_datadir(self.options.tmpdir, i, self.chain)
+ initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index ba52abc7dd..f9e2cfa2f5 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -258,7 +258,7 @@ class TestNode():
return
self.rpc = rpc
self.rpc_connected = True
- self.url = self.rpc.url
+ self.url = self.rpc.rpc_url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
@@ -557,9 +557,8 @@ class TestNode():
return p2p_conn
def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs):
- """Add an outbound p2p connection from node. Either
- full-relay("outbound-full-relay") or
- block-relay-only("block-relay-only") connection.
+ """Add an outbound p2p connection from node. Must be an
+ "outbound-full-relay", "block-relay-only" or "addr-fetch" connection.
This method adds the p2p connection to the self.p2ps list and returns
the connection to the caller.
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 35dbfbba8d..ec27fd7f85 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -5,7 +5,6 @@
"""Helpful routines for regression testing."""
from base64 import b64encode
-from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import hashlib
@@ -214,10 +213,6 @@ def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
-def hex_str_to_bytes(hex_str):
- return unhexlify(hex_str.encode('ascii'))
-
-
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
@@ -286,15 +281,15 @@ class PortSeed:
n = None
-def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
+def get_rpc_proxy(url: str, node_number: int, *, timeout: int=None, coveragedir: str=None) -> coverage.AuthServiceProxyWrapper:
"""
Args:
- url (str): URL of the RPC server to call
- node_number (int): the node number (or id) that this calls to
+ url: URL of the RPC server to call
+ node_number: the node number (or id) that this calls to
Kwargs:
- timeout (int): HTTP timeout in seconds
- coveragedir (str): Directory
+ timeout: HTTP timeout in seconds
+ coveragedir: Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
@@ -305,11 +300,10 @@ def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
- proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
- return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
+ return coverage.AuthServiceProxyWrapper(proxy, url, coverage_logfile)
def p2p_port(n):
@@ -338,17 +332,17 @@ def rpc_url(datadir, i, chain, rpchost):
################
-def initialize_datadir(dirname, n, chain):
+def initialize_datadir(dirname, n, chain, disable_autoconnect=True):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
- write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain)
+ write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain, disable_autoconnect=disable_autoconnect)
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
-def write_config(config_path, *, n, chain, extra_config=""):
+def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=True):
# Translate chain subdirectory name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
@@ -376,6 +370,8 @@ def write_config(config_path, *, n, chain, extra_config=""):
f.write("shrinkdebugfile=0\n")
# To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync
f.write("unsafesqlitesync=1\n")
+ if disable_autoconnect:
+ f.write("connect=0\n")
f.write(extra_config)
@@ -449,10 +445,10 @@ def find_output(node, txid, amount, *, blockhash=None):
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
-def create_confirmed_utxos(fee, node, count):
+def create_confirmed_utxos(test_framework, fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
- node.generate(min(25, to_generate))
+ test_framework.generate(node, min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
@@ -473,7 +469,7 @@ def create_confirmed_utxos(fee, node, count):
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
- node.generate(1)
+ test_framework.generate(node, 1)
utxos = node.listunspent()
assert len(utxos) >= count
@@ -516,7 +512,7 @@ def gen_return_txouts():
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
- txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
+ txout.scriptPubKey = bytes.fromhex(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
@@ -545,7 +541,7 @@ def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
return txids
-def mine_large_block(node, utxos=None):
+def mine_large_block(test_framework, node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
@@ -556,7 +552,18 @@ def mine_large_block(node, utxos=None):
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
- node.generate(1)
+ test_framework.generate(node, 1)
+
+
+def generate_to_height(test_framework, node, target_height):
+ """Generates blocks until a given target block height has been reached.
+ To prevent timeouts, only up to 200 blocks are generated per RPC call.
+ Can be used to activate certain soft-forks (e.g. CSV, CLTV)."""
+ current_height = node.getblockcount()
+ while current_height < target_height:
+ nblocks = min(200, target_height - current_height)
+ current_height += len(test_framework.generate(node, nblocks))
+ assert_equal(node.getblockcount(), target_height)
def find_vout_for_address(node, txid, addr):
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index 47ec6b0be2..ba5b95f930 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -4,8 +4,10 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""A limited-functionality wallet, which may replace a real wallet in tests"""
+from copy import deepcopy
from decimal import Decimal
from enum import Enum
+from random import choice
from typing import Optional
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.key import ECKey
@@ -16,6 +18,7 @@ from test_framework.messages import (
CTxIn,
CTxInWitness,
CTxOut,
+ tx_from_hex,
)
from test_framework.script import (
CScript,
@@ -27,10 +30,11 @@ from test_framework.script import (
)
from test_framework.util import (
assert_equal,
- hex_str_to_bytes,
+ assert_greater_than_or_equal,
satoshi_round,
)
+DEFAULT_FEE = Decimal("0.0001")
class MiniWalletMode(Enum):
"""Determines the transaction type the MiniWallet is creating and spending.
@@ -73,7 +77,7 @@ class MiniWallet:
self._scriptPubKey = bytes(CScript([pub_key.get_bytes(), OP_CHECKSIG]))
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
- self._scriptPubKey = hex_str_to_bytes(self._test_node.validateaddress(self._address)['scriptPubKey'])
+ self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
def scan_blocks(self, *, start=1, num):
"""Scan the blocks for self._address outputs and add them to self._utxos"""
@@ -177,3 +181,75 @@ class MiniWallet:
def sendrawtransaction(self, *, from_node, tx_hex):
from_node.sendrawtransaction(tx_hex)
self.scan_tx(from_node.decoderawtransaction(tx_hex))
+
+def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
+ """Build a transaction that spends parent_txid.vout[n] and produces one output with
+ amount = parent_value with a fee deducted.
+ Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
+ """
+ inputs = [{"txid": parent_txid, "vout": n}]
+ my_value = parent_value - fee
+ outputs = {address : my_value}
+ rawtx = node.createrawtransaction(inputs, outputs)
+ prevtxs = [{
+ "txid": parent_txid,
+ "vout": n,
+ "scriptPubKey": parent_locking_script,
+ "amount": parent_value,
+ }] if parent_locking_script else None
+ signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
+ assert signedtx["complete"]
+ tx = tx_from_hex(signedtx["hex"])
+ return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
+
+def create_child_with_parents(node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE):
+ """Creates a transaction that spends the first output of each parent in parents_tx."""
+ num_parents = len(parents_tx)
+ total_value = sum(values)
+ inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
+ outputs = {address : total_value - fee}
+ rawtx_child = node.createrawtransaction(inputs, outputs)
+ prevtxs = []
+ for i in range(num_parents):
+ prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
+ signedtx_child = node.signrawtransactionwithkey(hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs)
+ assert signedtx_child["complete"]
+ return signedtx_child["hex"]
+
+def create_raw_chain(node, first_coin, address, privkeys, chain_length=25):
+ """Helper function: create a "chain" of chain_length transactions. The nth transaction in the
+ chain is a child of the n-1th transaction and parent of the n+1th transaction.
+ """
+ parent_locking_script = None
+ txid = first_coin["txid"]
+ chain_hex = []
+ chain_txns = []
+ value = first_coin["amount"]
+
+ for _ in range(chain_length):
+ (tx, txhex, value, parent_locking_script) = make_chain(node, address, privkeys, txid, value, 0, parent_locking_script)
+ txid = tx.rehash()
+ chain_hex.append(txhex)
+ chain_txns.append(tx)
+
+ return (chain_hex, chain_txns)
+
+def bulk_transaction(tx, node, target_weight, privkeys, prevtxs=None):
+ """Pad a transaction with extra outputs until it reaches a target weight (or higher).
+ returns CTransaction object
+ """
+ tx_heavy = deepcopy(tx)
+ assert_greater_than_or_equal(target_weight, tx_heavy.get_weight())
+ while tx_heavy.get_weight() < target_weight:
+ random_spk = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
+ for _ in range(512*2):
+ random_spk += choice("0123456789ABCDEF")
+ tx_heavy.vout.append(CTxOut(0, bytes.fromhex(random_spk)))
+ # Re-sign the transaction
+ if privkeys:
+ signed = node.signrawtransactionwithkey(tx_heavy.serialize().hex(), privkeys, prevtxs)
+ return tx_from_hex(signed["hex"])
+ # OP_TRUE
+ tx_heavy.wit.vtxinwit = [CTxInWitness()]
+ tx_heavy.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
+ return tx_heavy
diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py
index b9c0fb6691..1ee55aa3b7 100755
--- a/test/functional/test_framework/wallet_util.py
+++ b/test/functional/test_framework/wallet_util.py
@@ -17,19 +17,16 @@ from test_framework.address import (
from test_framework.key import ECKey
from test_framework.script import (
CScript,
- OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
- OP_CHECKSIG,
- OP_DUP,
- OP_EQUAL,
- OP_EQUALVERIFY,
- OP_HASH160,
- hash160,
- sha256,
)
-from test_framework.util import hex_str_to_bytes
+from test_framework.script_util import (
+ key_to_p2pkh_script,
+ key_to_p2wpkh_script,
+ script_to_p2sh_script,
+ script_to_p2wsh_script,
+)
Key = namedtuple('Key', ['privkey',
'pubkey',
@@ -57,15 +54,14 @@ def get_key(node):
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
- pkh = hash160(hex_str_to_bytes(pubkey))
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
- p2pkh_script=CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
+ p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
- p2wpkh_script=CScript([OP_0, pkh]).hex(),
+ p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
- p2sh_p2wpkh_script=CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(),
- p2sh_p2wpkh_redeem_script=CScript([OP_0, pkh]).hex(),
+ p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
+ p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_generate_key():
@@ -76,15 +72,14 @@ def get_generate_key():
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
- pkh = hash160(hex_str_to_bytes(pubkey))
return Key(privkey=privkey,
pubkey=pubkey,
- p2pkh_script=CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
+ p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
- p2wpkh_script=CScript([OP_0, pkh]).hex(),
+ p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
- p2sh_p2wpkh_script=CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(),
- p2sh_p2wpkh_redeem_script=CScript([OP_0, pkh]).hex(),
+ p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
+ p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
@@ -97,16 +92,16 @@ def get_multisig(node):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
- script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
- witness_script = CScript([OP_0, sha256(script_code)])
+ script_code = CScript([OP_2] + [bytes.fromhex(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
+ witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
- p2sh_script=CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(),
+ p2sh_script=script_to_p2sh_script(script_code).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
- p2sh_p2wsh_script=CScript([OP_HASH160, witness_script, OP_EQUAL]).hex(),
+ p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index ad1acd2e2f..3792d751de 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -109,8 +109,7 @@ BASE_SCRIPTS = [
'p2p_tx_download.py',
'mempool_updatefromblock.py',
'wallet_dump.py --legacy-wallet',
- 'wallet_listtransactions.py --legacy-wallet',
- 'wallet_listtransactions.py --descriptors',
+ 'feature_taproot.py --previous_release',
'feature_taproot.py',
'rpc_signer.py',
'wallet_signer.py --descriptors',
@@ -122,10 +121,9 @@ BASE_SCRIPTS = [
'wallet_listreceivedby.py --legacy-wallet',
'wallet_listreceivedby.py --descriptors',
'wallet_abandonconflict.py --legacy-wallet',
+ 'p2p_dns_seeds.py',
'wallet_abandonconflict.py --descriptors',
'feature_csv_activation.py',
- 'rpc_rawtransaction.py --legacy-wallet',
- 'rpc_rawtransaction.py --descriptors',
'wallet_address_types.py --legacy-wallet',
'wallet_address_types.py --descriptors',
'feature_bip68_sequence.py',
@@ -139,6 +137,7 @@ BASE_SCRIPTS = [
'interface_zmq.py',
'rpc_invalid_address_message.py',
'interface_bitcoin_cli.py',
+ 'feature_bind_extra.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py --legacy-wallet',
@@ -159,6 +158,8 @@ BASE_SCRIPTS = [
'wallet_createwallet.py --legacy-wallet',
'wallet_createwallet.py --usecli',
'wallet_createwallet.py --descriptors',
+ 'wallet_listtransactions.py --legacy-wallet',
+ 'wallet_listtransactions.py --descriptors',
'wallet_watchonly.py --legacy-wallet',
'wallet_watchonly.py --usecli --legacy-wallet',
'wallet_reorgsrestore.py',
@@ -171,6 +172,8 @@ BASE_SCRIPTS = [
'feature_proxy.py',
'rpc_signrawtransaction.py --legacy-wallet',
'rpc_signrawtransaction.py --descriptors',
+ 'rpc_rawtransaction.py --legacy-wallet',
+ 'rpc_rawtransaction.py --descriptors',
'wallet_groups.py --legacy-wallet',
'p2p_addrv2_relay.py',
'wallet_groups.py --descriptors',
@@ -184,6 +187,7 @@ BASE_SCRIPTS = [
'p2p_addr_relay.py',
'p2p_getaddr_caching.py',
'p2p_getdata.py',
+ 'p2p_addrfetch.py',
'rpc_net.py',
'wallet_keypool.py --legacy-wallet',
'wallet_keypool.py --descriptors',
@@ -214,13 +218,15 @@ BASE_SCRIPTS = [
'rpc_createmultisig.py --legacy-wallet',
'rpc_createmultisig.py --descriptors',
'rpc_packages.py',
+ 'mempool_package_limits.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py --legacy-wallet',
'wallet_importprunedfunds.py --descriptors',
'p2p_leak_tx.py',
'p2p_eviction.py',
- 'rpc_signmessage.py',
+ 'wallet_signmessagewithaddress.py',
+ 'rpc_signmessagewithprivkey.py',
'rpc_generateblock.py',
'rpc_generate.py',
'wallet_balance.py --legacy-wallet',
@@ -276,9 +282,11 @@ BASE_SCRIPTS = [
'p2p_blockfilters.py',
'p2p_message_capture.py',
'feature_includeconf.py',
+ 'feature_addrman.py',
'feature_asmap.py',
'mempool_unbroadcast.py',
'mempool_compatibility.py',
+ 'mempool_accept_wtxid.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'p2p_ping.py',
@@ -291,7 +299,9 @@ BASE_SCRIPTS = [
'p2p_permissions.py',
'feature_blocksdir.py',
'wallet_startup.py',
+ 'p2p_i2p_ports.py',
'feature_config_args.py',
+ 'feature_presegwit_node_upgrade.py',
'feature_settings.py',
'rpc_getdescriptorinfo.py',
'rpc_addresses_deprecation.py',
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index 05a0ef0ea1..c7a983556d 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -111,6 +111,18 @@ class WalletBackupTest(BitcoinTestFramework):
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
+ def restore_nonexistent_wallet(self):
+ node = self.nodes[3]
+ nonexistent_wallet_file = os.path.join(self.nodes[0].datadir, 'nonexistent_wallet.bak')
+ wallet_name = "res0"
+ assert_raises_rpc_error(-8, "Backup file does not exist", node.restorewallet, wallet_name, nonexistent_wallet_file)
+
+ def restore_wallet_existent_name(self):
+ node = self.nodes[3]
+ wallet_file = os.path.join(self.nodes[0].datadir, 'wallet.bak')
+ wallet_name = "res0"
+ assert_raises_rpc_error(-8, "Wallet name already exists.", node.restorewallet, wallet_name, wallet_file)
+
def init_three(self):
self.init_wallet(0)
self.init_wallet(1)
@@ -169,26 +181,27 @@ class WalletBackupTest(BitcoinTestFramework):
##
# Test restoring spender wallets from backups
##
- self.log.info("Restoring using wallet.dat")
- self.stop_three()
- self.erase_three()
+ self.log.info("Restoring wallets on node 3 using backup files")
- # Start node2 with no chain
- shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
- shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
+ self.restore_nonexistent_wallet()
- # Restore wallets from backup
- shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
- shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
- shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
+ backup_file_0 = os.path.join(self.nodes[0].datadir, 'wallet.bak')
+ backup_file_1 = os.path.join(self.nodes[1].datadir, 'wallet.bak')
+ backup_file_2 = os.path.join(self.nodes[2].datadir, 'wallet.bak')
- self.log.info("Re-starting nodes")
- self.start_three()
- self.sync_blocks()
+ self.nodes[3].restorewallet("res0", backup_file_0)
+ self.nodes[3].restorewallet("res1", backup_file_1)
+ self.nodes[3].restorewallet("res2", backup_file_2)
+
+ res0_rpc = self.nodes[3].get_wallet_rpc("res0")
+ res1_rpc = self.nodes[3].get_wallet_rpc("res1")
+ res2_rpc = self.nodes[3].get_wallet_rpc("res2")
+
+ assert_equal(res0_rpc.getbalance(), balance0)
+ assert_equal(res1_rpc.getbalance(), balance1)
+ assert_equal(res2_rpc.getbalance(), balance2)
- assert_equal(self.nodes[0].getbalance(), balance0)
- assert_equal(self.nodes[1].getbalance(), balance1)
- assert_equal(self.nodes[2].getbalance(), balance2)
+ self.restore_wallet_existent_name()
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index b5afc3785e..ab38ddb996 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -600,7 +600,7 @@ class WalletTest(BitcoinTestFramework):
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
- # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
+ # Double chain limit but require combining inputs, so we pass AttemptSelection
self.stop_node(0)
extra_args = ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]
self.start_node(0, extra_args=extra_args)
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index c6f5d334f8..17a4c79da3 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -84,7 +84,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
- send_wrpc.generatetoaddress(COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
+ self.generatetoaddress(send_wrpc, COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py
index eb54da99f5..91d6121679 100755
--- a/test/functional/wallet_dump.py
+++ b/test/functional/wallet_dump.py
@@ -209,6 +209,15 @@ class WalletDumpTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20):
self.nodes[0].getnewaddress()
+ # Make sure that dumpwallet doesn't have a lock order issue when there is an unconfirmed tx and it is reloaded
+ # See https://github.com/bitcoin/bitcoin/issues/22489
+ self.nodes[0].createwallet("w3")
+ w3 = self.nodes[0].get_wallet_rpc("w3")
+ w3.importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label="coinbase_import")
+ w3.sendtoaddress(w3.getnewaddress(), 10)
+ w3.unloadwallet()
+ self.nodes[0].loadwallet("w3")
+ w3.dumpwallet(os.path.join(self.nodes[0].datadir, "w3.dump"))
if __name__ == '__main__':
WalletDumpTest().main()
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index 262175c789..4cb311983c 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -74,7 +74,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
- w0.generatetoaddress(COINBASE_MATURITY + 1, w0.getnewaddress())
+ self.generatetoaddress(w0, COINBASE_MATURITY + 1, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
@@ -405,7 +405,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
- w0.generatetoaddress(6, w0.getnewaddress())
+ self.generatetoaddress(w0, 6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
diff --git a/test/functional/wallet_listdescriptors.py b/test/functional/wallet_listdescriptors.py
index bf53c99855..221f5262d9 100755
--- a/test/functional/wallet_listdescriptors.py
+++ b/test/functional/wallet_listdescriptors.py
@@ -30,9 +30,10 @@ class ListDescriptorsTest(BitcoinTestFramework):
node = self.nodes[0]
assert_raises_rpc_error(-18, 'No wallet is loaded.', node.listdescriptors)
- self.log.info('Test that the command is not available for legacy wallets.')
- node.createwallet(wallet_name='w1', descriptors=False)
- assert_raises_rpc_error(-4, 'listdescriptors is not available for non-descriptor wallets', node.listdescriptors)
+ if self.is_bdb_compiled():
+ self.log.info('Test that the command is not available for legacy wallets.')
+ node.createwallet(wallet_name='w1', descriptors=False)
+ assert_raises_rpc_error(-4, 'listdescriptors is not available for non-descriptor wallets', node.listdescriptors)
self.log.info('Test the command for empty descriptors wallet.')
node.createwallet(wallet_name='w2', blank=True, descriptors=True)
@@ -71,11 +72,39 @@ class ListDescriptorsTest(BitcoinTestFramework):
],
}
assert_equal(expected, wallet.listdescriptors())
+ assert_equal(expected, wallet.listdescriptors(False))
+
+ self.log.info('Test list private descriptors')
+ expected_private = {
+ 'wallet_name': 'w2',
+ 'descriptors': [
+ {'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'),
+ 'timestamp': 1296688602,
+ 'active': False,
+ 'range': [0, 0],
+ 'next': 0},
+ ],
+ }
+ assert_equal(expected_private, wallet.listdescriptors(True))
self.log.info("Test listdescriptors with encrypted wallet")
wallet.encryptwallet("pass")
assert_equal(expected, wallet.listdescriptors())
+ self.log.info('Test list private descriptors with encrypted wallet')
+ assert_raises_rpc_error(-13, 'Please enter the wallet passphrase with walletpassphrase first.', wallet.listdescriptors, True)
+ wallet.walletpassphrase(passphrase="pass", timeout=1000000)
+ assert_equal(expected_private, wallet.listdescriptors(True))
+
+ self.log.info('Test list private descriptors with watch-only wallet')
+ node.createwallet(wallet_name='watch-only', descriptors=True, disable_private_keys=True)
+ watch_only_wallet = node.get_wallet_rpc('watch-only')
+ watch_only_wallet.importdescriptors([{
+ 'desc': descsum_create('wpkh(' + xpub_acc + ')'),
+ 'timestamp': 1296688602,
+ }])
+ assert_raises_rpc_error(-4, 'Can\'t get descriptor string', watch_only_wallet.listdescriptors, True)
+
self.log.info('Test non-active non-range combo descriptor')
node.createwallet(wallet_name='w4', blank=True, descriptors=True)
wallet = node.get_wallet_rpc('w4')
diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py
index 8b503f5971..c0386f5d70 100755
--- a/test/functional/wallet_listtransactions.py
+++ b/test/functional/wallet_listtransactions.py
@@ -18,14 +18,15 @@ from test_framework.util import (
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
+ # This test isn't testing txn relay/timing, so set whitelist on the
+ # peers for instant txn relay. This speeds up the test run time 2-3x.
+ self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
- self.nodes[0].generate(1) # Get out of IBD
- self.sync_all()
- # Simple send, 0 to 1:
+ self.log.info("Test simple send from node0 to node1")
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
@@ -34,7 +35,7 @@ class ListTransactionsTest(BitcoinTestFramework):
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
- # mine a block, confirmations should change:
+ self.log.info("Test confirmations change after mining a block")
blockhash = self.nodes[0].generate(1)[0]
blockheight = self.nodes[0].getblockheader(blockhash)['height']
self.sync_all()
@@ -45,7 +46,7 @@ class ListTransactionsTest(BitcoinTestFramework):
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})
- # send-to-self:
+ self.log.info("Test send-to-self on node0")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
@@ -54,7 +55,7 @@ class ListTransactionsTest(BitcoinTestFramework):
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
- # sendmany from node1: twice to self, twice to node2:
+ self.log.info("Test sendmany from node1: twice to self, twice to node0")
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
@@ -88,6 +89,7 @@ class ListTransactionsTest(BitcoinTestFramework):
if not self.options.descriptors:
# include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets
+ self.log.info("Test 'include_watchonly' feature (legacy wallet)")
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
@@ -103,37 +105,38 @@ class ListTransactionsTest(BitcoinTestFramework):
self.run_rbf_opt_in_test()
- # Check that the opt-in-rbf flag works properly, for sent and received
- # transactions.
+
def run_rbf_opt_in_test(self):
- # Check whether a transaction signals opt-in RBF itself
+ """Test the opt-in-rbf flag for sent and received transactions."""
+
def is_opt_in(node, txid):
+ """Check whether a transaction signals opt-in RBF itself."""
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
- # Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
+ """Find an unconfirmed output matching a certain txid."""
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
- # 1. Chain a few transactions that don't opt-in.
+ self.log.info("Test txs w/o opt-in RBF (bip125-replaceable=no)")
+ # Chain a few transactions that don't opt in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert not is_opt_in(self.nodes[0], txid_1)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
- # Tx2 will build off txid_1, still not opting in to RBF.
+ # Tx2 will build off tx1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
- utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
@@ -149,6 +152,7 @@ class ListTransactionsTest(BitcoinTestFramework):
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
+ self.log.info("Test txs with opt-in RBF (bip125-replaceable=yes)")
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
@@ -179,6 +183,7 @@ class ListTransactionsTest(BitcoinTestFramework):
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
+ self.log.info("Test tx with unknown RBF state (bip125-replaceable=unknown)")
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
@@ -191,7 +196,7 @@ class ListTransactionsTest(BitcoinTestFramework):
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
- # Check gettransaction as well:
+ self.log.info("Test bip125-replaceable status with gettransaction RPC")
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
@@ -199,7 +204,7 @@ class ListTransactionsTest(BitcoinTestFramework):
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
- # After mining a transaction, it's no longer BIP125-replaceable
+ self.log.info("Test mined transactions are no longer bip125-replaceable")
self.nodes[0].generate(1)
assert txid_3b not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
diff --git a/test/functional/wallet_signmessagewithaddress.py b/test/functional/wallet_signmessagewithaddress.py
new file mode 100755
index 0000000000..bf6f95e3f1
--- /dev/null
+++ b/test/functional/wallet_signmessagewithaddress.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test Wallet commands for signing and verifying messages."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_raises_rpc_error,
+)
+
+class SignMessagesWithAddressTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.extra_args = [["-addresstype=legacy"]]
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def run_test(self):
+ message = 'This is just a test message'
+
+ self.log.info('test signing with an address with wallet')
+ address = self.nodes[0].getnewaddress()
+ signature = self.nodes[0].signmessage(address, message)
+ assert self.nodes[0].verifymessage(address, signature, message)
+
+ self.log.info('test verifying with another address should not work')
+ other_address = self.nodes[0].getnewaddress()
+ other_signature = self.nodes[0].signmessage(other_address, message)
+ assert not self.nodes[0].verifymessage(other_address, signature, message)
+ assert not self.nodes[0].verifymessage(address, other_signature, message)
+
+ self.log.info('test parameter validity and error codes')
+ # signmessage has two required parameters
+ for num_params in [0, 1, 3, 4, 5]:
+ param_list = ["dummy"]*num_params
+ assert_raises_rpc_error(-1, "signmessage", self.nodes[0].signmessage, *param_list)
+ # invalid key or address provided
+ assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].signmessage, "invalid_addr", message)
+
+
+if __name__ == '__main__':
+ SignMessagesWithAddressTest().main()
diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py
index e7dd7592b6..4d34670ea9 100755
--- a/test/functional/wallet_upgradewallet.py
+++ b/test/functional/wallet_upgradewallet.py
@@ -6,9 +6,8 @@
Test upgradewallet RPC. Download node binaries:
-test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
-
-Only v0.15.2 and v0.16.3 are required by this test. The others are used in feature_backwards_compatibility.py
+Requires previous releases binaries, see test/README.md.
+Only v0.15.2 and v0.16.3 are required by this test.
"""
import os
@@ -95,10 +94,11 @@ class UpgradeWalletTest(BitcoinTestFramework):
def test_upgradewallet(self, wallet, previous_version, requested_version=None, expected_version=None):
unchanged = expected_version == previous_version
new_version = previous_version if unchanged else expected_version if expected_version else requested_version
- assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
+ old_wallet_info = wallet.getwalletinfo()
+ assert_equal(old_wallet_info["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
- "wallet_name": "",
+ "wallet_name": old_wallet_info["walletname"],
"previous_version": previous_version,
"current_version": new_version,
"result": "Already at latest version. Wallet version unchanged." if unchanged else "Wallet upgraded successfully from version {} to version {}.".format(previous_version, new_version),
@@ -353,6 +353,11 @@ class UpgradeWalletTest(BitcoinTestFramework):
v16_3_kvs = dump_bdb_kv(v16_3_wallet)
assert b'\x0adefaultkey' not in v16_3_kvs
+ if self.is_sqlite_compiled():
+ self.log.info("Checking that descriptor wallets do nothing, successfully")
+ self.nodes[0].createwallet(wallet_name="desc_upgrade", descriptors=True)
+ desc_wallet = self.nodes[0].get_wallet_rpc("desc_upgrade")
+ self.test_upgradewallet(desc_wallet, previous_version=169900, expected_version=169900)
if __name__ == '__main__':
UpgradeWalletTest().main()
diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py
index b177fbb4b2..e92bb402b5 100755
--- a/test/get_previous_releases.py
+++ b/test/get_previous_releases.py
@@ -21,39 +21,47 @@ import hashlib
SHA256_SUMS = {
-"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
-"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
-"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
-"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
-"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
-
-"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
-"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
-"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
-"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
-"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
-
-"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
-"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
-"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
-"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
-"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
-
-"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
-"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
-"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
-"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
-"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
-"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
-
-"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
-"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
-"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
-"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
-"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
-"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz"
+ "d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
+ "54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
+ "2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
+ "87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
+ "566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
+ #
+ "0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
+ "fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
+ "75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
+ "78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
+ "5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
+ #
+ "5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
+ "d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
+ "d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
+ "a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
+ "943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
+ #
+ "88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
+ "cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
+ "989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
+ "b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
+ "425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
+ "600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
+ #
+ "3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
+ "657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
+ "10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
+ "1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
+ "aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
+ "5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz",
+ #
+ "60c93e3462c303eb080be7cf623f1a7684b37fd47a018ad3848bc23e13c84e1c": "bitcoin-0.20.1-aarch64-linux-gnu.tar.gz",
+ "55b577e0fb306fb429d4be6c9316607753e8543e5946b542d75d876a2f08654c": "bitcoin-0.20.1-arm-linux-gnueabihf.tar.gz",
+ "b9024dde373ea7dad707363e07ec7e265383204127539ae0c234bff3a61da0d1": "bitcoin-0.20.1-osx64.tar.gz",
+ "c378d4e21109f09e8829f3591e015c66632dff2925a60b64d259be05a334c30b": "bitcoin-0.20.1-osx.dmg",
+ "fa71cb52ee5e0459cbf5248cdec72df27995840c796f58b304607a1ed4c165af": "bitcoin-0.20.1-riscv64-linux-gnu.tar.gz",
+ "376194f06596ecfa40331167c39bc70c355f960280bd2a645fdbf18f66527397": "bitcoin-0.20.1-x86_64-linux-gnu.tar.gz",
}
+
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
@@ -104,7 +112,11 @@ def download_binary(tag, args) -> int:
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
- print("Checksum did not match")
+ if tarball in SHA256_SUMS.values():
+ print("Checksum did not match")
+ return 1
+
+ print("Checksum for given version doesn't exist")
return 1
print("Checksum matched")
diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh
index f8f24bb1ff..8e74f41bb6 100755
--- a/test/lint/lint-circular-dependencies.sh
+++ b/test/lint/lint-circular-dependencies.sh
@@ -10,12 +10,12 @@ export LC_ALL=C
EXPECTED_CIRCULAR_DEPENDENCIES=(
"chainparamsbase -> util/system -> chainparamsbase"
- "index/txindex -> validation -> index/txindex"
"node/blockstorage -> validation -> node/blockstorage"
"index/blockfilterindex -> node/blockstorage -> validation -> index/blockfilterindex"
"index/base -> validation -> index/blockfilterindex -> index/base"
"index/coinstatsindex -> node/coinstats -> index/coinstatsindex"
"policy/fees -> txmempool -> policy/fees"
+ "policy/rbf -> txmempool -> validation -> policy/rbf"
"qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel"
"qt/recentrequeststablemodel -> qt/walletmodel -> qt/recentrequeststablemodel"
"qt/sendcoinsdialog -> qt/walletmodel -> qt/sendcoinsdialog"
@@ -24,10 +24,6 @@ EXPECTED_CIRCULAR_DEPENDENCIES=(
"wallet/fees -> wallet/wallet -> wallet/fees"
"wallet/wallet -> wallet/walletdb -> wallet/wallet"
"node/coinstats -> validation -> node/coinstats"
- # Temporary circular dependencies that allow wallet.h/wallet.cpp to be
- # split up in a MOVEONLY commit. These are removed in #21206.
- "wallet/receive -> wallet/wallet -> wallet/receive"
- "wallet/spend -> wallet/wallet -> wallet/spend"
)
EXIT_CODE=0
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index 737d35a397..d6312270e7 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -39,10 +39,8 @@ export LC_ALL=C
KNOWN_VIOLATIONS=(
"src/bitcoin-tx.cpp.*stoul"
- "src/bitcoin-tx.cpp.*trim_right"
"src/dbwrapper.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
- "src/httprpc.cpp.*trim"
"src/node/blockstorage.cpp:.*atoi"
"src/qt/rpcconsole.cpp:.*atoi"
"src/rest.cpp:.*strtol"
diff --git a/test/lint/lint-python.sh b/test/lint/lint-python.sh
index 51815963f6..c448fa6f9a 100755
--- a/test/lint/lint-python.sh
+++ b/test/lint/lint-python.sh
@@ -102,7 +102,7 @@ if ! PYTHONWARNINGS="ignore" flake8 --ignore=B,C,E,F,I,N,W --select=$(IFS=","; e
EXIT_CODE=1
fi
-if ! mypy --ignore-missing-imports $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
+if ! mypy --ignore-missing-imports --show-error-codes $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
EXIT_CODE=1
fi
diff --git a/test/lint/lint-shell.sh b/test/lint/lint-shell.sh
index 4dbf5ed28e..73ac583d84 100755
--- a/test/lint/lint-shell.sh
+++ b/test/lint/lint-shell.sh
@@ -14,10 +14,6 @@ disabled=(
SC2086 # Double quote to prevent globbing and word splitting.
SC2162 # read without -r will mangle backslashes.
)
-disabled_gitian=(
- SC2094 # Make sure not to read and write the same file in the same pipeline.
- SC2129 # Consider using { cmd1; cmd2; } >> file instead of individual redirects.
-)
EXIT_CODE=0
@@ -33,22 +29,4 @@ if ! "${SHELLCHECK_CMD[@]}" "$EXCLUDE" $SOURCED_FILES $(git ls-files -- '*.sh' |
EXIT_CODE=1
fi
-if ! command -v yq > /dev/null; then
- echo "Skipping Gitian descriptor scripts checking since yq is not installed."
- exit $EXIT_CODE
-fi
-
-EXCLUDE_GITIAN=${EXCLUDE}",$(IFS=','; echo "${disabled_gitian[*]}")"
-for descriptor in $(git ls-files -- 'contrib/gitian-descriptors/*.yml')
-do
- script=$(basename "$descriptor")
- # Use #!/bin/bash as gitian-builder/bin/gbuild does to complete a script.
- echo "#!/bin/bash" > $script
- yq -r .script "$descriptor" >> $script
- if ! "${SHELLCHECK_CMD[@]}" "$EXCLUDE_GITIAN" $script; then
- EXIT_CODE=1
- fi
- rm $script
-done
-
exit $EXIT_CODE
diff --git a/test/lint/lint-spelling.sh b/test/lint/lint-spelling.sh
index fbdf3c59c1..111091b7f8 100755
--- a/test/lint/lint-spelling.sh
+++ b/test/lint/lint-spelling.sh
@@ -15,6 +15,6 @@ if ! command -v codespell > /dev/null; then
fi
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
-if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)contrib/gitian-keys/keys.txt"); then
+if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)contrib/builder-keys/keys.txt" ":(exclude)contrib/guix/patches"); then
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
fi
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 2850cfcea5..b52e105a33 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -89,6 +89,7 @@ implicit-signed-integer-truncation:leveldb/
implicit-signed-integer-truncation:miner.cpp
implicit-signed-integer-truncation:net.cpp
implicit-signed-integer-truncation:net_processing.cpp
+implicit-signed-integer-truncation:netaddress.cpp
implicit-signed-integer-truncation:streams.h
implicit-signed-integer-truncation:test/arith_uint256_tests.cpp
implicit-signed-integer-truncation:test/skiplist_tests.cpp
diff --git a/test/util/bitcoin-util-test.py b/test/util/test_runner.py
index 7b1cc2b031..aa8fd6eee5 100755
--- a/test/util/bitcoin-util-test.py
+++ b/test/util/test_runner.py
@@ -10,7 +10,6 @@ Runs automatically during `make check`.
Can also be run manually."""
import argparse
-import binascii
import configparser
import difflib
import json
@@ -167,7 +166,7 @@ def parse_output(a, fmt):
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
- return binascii.a2b_hex(a.strip())
+ return bytes.fromhex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)