aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml2
-rw-r--r--.cirrus.yml10
-rw-r--r--.github/ISSUE_TEMPLATE/good_first_issue.md4
-rw-r--r--.gitignore4
-rw-r--r--.travis.yml21
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Makefile.am10
-rw-r--r--build-aux/m4/ax_boost_process.m4121
-rw-r--r--build-aux/m4/ax_boost_thread.m454
-rw-r--r--build-aux/m4/bitcoin_qt.m490
-rw-r--r--build_msvc/bitcoin_config.h3
-rw-r--r--build_msvc/common.init.vcxproj2
-rw-r--r--build_msvc/vcpkg-packages.txt2
-rwxr-xr-xci/test/00_setup_env.sh4
-rw-r--r--ci/test/00_setup_env_arm.sh2
-rw-r--r--ci/test/00_setup_env_i686_centos.sh2
-rw-r--r--ci/test/00_setup_env_mac.sh2
-rw-r--r--ci/test/00_setup_env_mac_host.sh2
-rw-r--r--ci/test/00_setup_env_native_asan.sh3
-rw-r--r--ci/test/00_setup_env_native_fuzz.sh3
-rw-r--r--ci/test/00_setup_env_native_fuzz_with_valgrind.sh1
-rw-r--r--ci/test/00_setup_env_native_msan.sh1
-rw-r--r--ci/test/00_setup_env_native_multiprocess.sh2
-rw-r--r--ci/test/00_setup_env_native_nowallet.sh2
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_native_tsan.sh4
-rw-r--r--ci/test/00_setup_env_native_valgrind.sh2
-rw-r--r--ci/test/00_setup_env_s390x.sh2
-rw-r--r--ci/test/00_setup_env_win64.sh2
-rwxr-xr-xci/test/06_script_b.sh2
-rw-r--r--configure.ac112
-rwxr-xr-xcontrib/devtools/copyright_header.py1
-rwxr-xr-xcontrib/devtools/security-check.py104
-rwxr-xr-xcontrib/devtools/test-security-check.py12
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml72
-rw-r--r--contrib/valgrind.supp1
-rw-r--r--depends/README.md6
-rw-r--r--depends/packages/bdb.mk4
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--depends/packages/fontconfig.mk12
-rw-r--r--depends/packages/miniupnpc.mk3
-rw-r--r--depends/packages/native_cctools.mk11
-rw-r--r--depends/packages/qt.mk12
-rw-r--r--depends/packages/zeromq.mk8
-rw-r--r--depends/patches/bdb/clang_cxx_11.patch147
-rw-r--r--depends/patches/boost/unused_var_in_process.patch22
-rw-r--r--depends/patches/fontconfig/gperf_header_regen.patch24
-rw-r--r--depends/patches/fontconfig/remove_char_width_usage.patch62
-rw-r--r--depends/patches/miniupnpc/dont_use_wingen.patch26
-rw-r--r--depends/patches/native_cctools/ld64_disable_threading.patch26
-rw-r--r--depends/patches/qt/dont_hardcode_pwd.patch27
-rw-r--r--depends/patches/qt/drop_lrelease_dependency.patch20
-rw-r--r--depends/patches/qt/freetype_back_compat.patch28
-rw-r--r--depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch30
-rw-r--r--depends/patches/zeromq/0002-disable-pthread_set_name_np.patch35
-rw-r--r--depends/patches/zeromq/remove_libstd_link.patch25
-rw-r--r--doc/Doxyfile.in2
-rw-r--r--doc/JSON-RPC-interface.md2
-rw-r--r--doc/benchmarking.md6
-rw-r--r--doc/bips.md3
-rw-r--r--doc/build-openbsd.md14
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/files.md2
-rw-r--r--doc/release-notes-14582.md14
-rw-r--r--doc/release-notes-15937.md12
-rw-r--r--doc/release-notes-19731.md6
-rw-r--r--doc/release-notes.md8
-rw-r--r--doc/release-notes/release-notes-0.20.1.md158
-rw-r--r--doc/tor.md18
-rw-r--r--src/Makefile.am7
-rw-r--r--src/Makefile.bench.include2
-rw-r--r--src/Makefile.test.include8
-rw-r--r--src/addrman.cpp12
-rw-r--r--src/addrman.h12
-rw-r--r--src/base58.cpp27
-rw-r--r--src/base58.h15
-rw-r--r--src/bench/addrman.cpp38
-rw-r--r--src/bench/base58.cpp30
-rw-r--r--src/bench/bech32.cpp17
-rw-r--r--src/bench/bench.cpp140
-rw-r--r--src/bench/bench.h116
-rw-r--r--src/bench/bench_bitcoin.cpp64
-rw-r--r--src/bench/block_assemble.cpp8
-rw-r--r--src/bench/ccoins_caching.cpp8
-rw-r--r--src/bench/chacha20.cpp24
-rw-r--r--src/bench/chacha_poly_aead.cpp67
-rw-r--r--src/bench/checkblock.cpp16
-rw-r--r--src/bench/checkqueue.cpp25
-rw-r--r--src/bench/coin_selection.cpp16
-rw-r--r--src/bench/crypto_hash.cpp68
-rw-r--r--src/bench/duplicate_inputs.cpp8
-rw-r--r--src/bench/examples.cpp20
-rw-r--r--src/bench/gcs_filter.cpp16
-rw-r--r--src/bench/hashpadding.cpp16
-rw-r--r--src/bench/lockedpool.cpp31
-rw-r--r--src/bench/mempool_eviction.cpp8
-rw-r--r--src/bench/mempool_stress.cpp15
-rw-r--r--src/bench/merkle_root.cpp8
-rw-r--r--src/bench/nanobench.cpp6
-rw-r--r--src/bench/nanobench.h3225
-rw-r--r--src/bench/poly1305.cpp23
-rw-r--r--src/bench/prevector.cpp91
-rw-r--r--src/bench/rollingbloom.cpp16
-rw-r--r--src/bench/rpc_blockchain.cpp9
-rw-r--r--src/bench/rpc_mempool.cpp8
-rw-r--r--src/bench/util_time.cpp32
-rw-r--r--src/bench/verify_script.cpp21
-rw-r--r--src/bench/wallet_balance.cpp22
-rw-r--r--src/bitcoin-cli.cpp42
-rw-r--r--src/bitcoin-tx.cpp56
-rw-r--r--src/bitcoin-wallet.cpp22
-rw-r--r--src/bitcoind.cpp40
-rw-r--r--src/blockfilter.cpp8
-rw-r--r--src/chainparams.cpp6
-rw-r--r--src/chainparamsbase.cpp12
-rw-r--r--src/chainparamsbase.h4
-rw-r--r--src/coins.cpp8
-rw-r--r--src/coins.h7
-rw-r--r--src/compat.h3
-rw-r--r--src/consensus/validation.h3
-rw-r--r--src/core_write.cpp7
-rw-r--r--src/dummywallet.cpp7
-rw-r--r--src/hash.cpp9
-rw-r--r--src/hash.h84
-rw-r--r--src/index/base.cpp9
-rw-r--r--src/index/base.h16
-rw-r--r--src/index/disktxpos.h35
-rw-r--r--src/index/txindex.cpp32
-rw-r--r--src/init.cpp633
-rw-r--r--src/init.h15
-rw-r--r--src/interfaces/chain.cpp21
-rw-r--r--src/interfaces/chain.h7
-rw-r--r--src/interfaces/node.cpp95
-rw-r--r--src/interfaces/node.h56
-rw-r--r--src/interfaces/wallet.cpp7
-rw-r--r--src/key.cpp2
-rw-r--r--src/merkleblock.cpp4
-rw-r--r--src/net.cpp193
-rw-r--r--src/net.h127
-rw-r--r--src/net_permissions.cpp3
-rw-r--r--src/net_permissions.h4
-rw-r--r--src/net_processing.cpp679
-rw-r--r--src/net_processing.h13
-rw-r--r--src/netaddress.cpp491
-rw-r--r--src/netaddress.h191
-rw-r--r--src/netbase.cpp5
-rw-r--r--src/policy/policy.cpp8
-rw-r--r--src/primitives/transaction.h15
-rw-r--r--src/protocol.cpp7
-rw-r--r--src/protocol.h42
-rw-r--r--src/pubkey.h4
-rw-r--r--src/qt/bitcoin.cpp109
-rw-r--r--src/qt/bitcoin.h20
-rw-r--r--src/qt/bitcoingui.cpp12
-rw-r--r--src/qt/bitcoingui.h3
-rw-r--r--src/qt/forms/optionsdialog.ui4
-rw-r--r--src/qt/guiutil.cpp7
-rw-r--r--src/qt/intro.cpp9
-rw-r--r--src/qt/intro.h2
-rw-r--r--src/qt/optionsmodel.cpp26
-rw-r--r--src/qt/optionsmodel.h9
-rw-r--r--src/qt/paymentserver.cpp6
-rw-r--r--src/qt/paymentserver.h2
-rw-r--r--src/qt/rpcconsole.cpp6
-rw-r--r--src/qt/rpcconsole.h2
-rw-r--r--src/qt/splashscreen.cpp31
-rw-r--r--src/qt/splashscreen.h8
-rw-r--r--src/qt/test/addressbooktests.cpp4
-rw-r--r--src/qt/test/apptests.cpp3
-rw-r--r--src/qt/test/test_main.cpp15
-rw-r--r--src/qt/test/wallettests.cpp5
-rw-r--r--src/qt/utilitydialog.cpp2
-rw-r--r--src/qt/utilitydialog.h6
-rw-r--r--src/random.cpp6
-rw-r--r--src/rest.cpp36
-rw-r--r--src/rpc/blockchain.cpp24
-rw-r--r--src/rpc/client.cpp5
-rw-r--r--src/rpc/mining.cpp22
-rw-r--r--src/rpc/misc.cpp202
-rw-r--r--src/rpc/net.cpp73
-rw-r--r--src/rpc/rawtransaction.cpp59
-rw-r--r--src/rpc/request.cpp2
-rw-r--r--src/rpc/server.cpp9
-rw-r--r--src/rpc/util.cpp6
-rw-r--r--src/script/descriptor.cpp9
-rw-r--r--src/script/interpreter.cpp31
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/script/standard.cpp16
-rw-r--r--src/script/standard.h13
-rw-r--r--src/span.h13
-rw-r--r--src/streams.h12
-rw-r--r--src/support/lockedpool.cpp1
-rw-r--r--src/sync.cpp22
-rw-r--r--src/sync.h14
-rw-r--r--src/test/addrman_tests.cpp12
-rw-r--r--src/test/base58_tests.cpp2
-rw-r--r--src/test/crypto_tests.cpp4
-rw-r--r--src/test/denialofservice_tests.cpp18
-rw-r--r--src/test/fuzz/asmap.cpp14
-rw-r--r--src/test/fuzz/buffered_file.cpp12
-rw-r--r--src/test/fuzz/crypto.cpp11
-rw-r--r--src/test/fuzz/key.cpp2
-rw-r--r--src/test/fuzz/locale.cpp3
-rw-r--r--src/test/fuzz/net_permissions.cpp1
-rw-r--r--src/test/fuzz/netaddress.cpp5
-rw-r--r--src/test/fuzz/process_message.cpp21
-rw-r--r--src/test/fuzz/process_messages.cpp5
-rw-r--r--src/test/fuzz/script.cpp2
-rw-r--r--src/test/fuzz/util.h2
-rw-r--r--src/test/key_tests.cpp10
-rw-r--r--src/test/merkle_tests.cpp13
-rw-r--r--src/test/net_tests.cpp89
-rw-r--r--src/test/netbase_tests.cpp33
-rw-r--r--src/test/script_standard_tests.cpp19
-rw-r--r--src/test/script_tests.cpp2
-rw-r--r--src/test/serialize_tests.cpp4
-rw-r--r--src/test/settings_tests.cpp4
-rw-r--r--src/test/sigopcount_tests.cpp10
-rw-r--r--src/test/sync_tests.cpp6
-rw-r--r--src/test/system_tests.cpp95
-rw-r--r--src/test/transaction_tests.cpp46
-rw-r--r--src/test/txvalidationcache_tests.cpp2
-rw-r--r--src/test/util/setup_common.cpp49
-rw-r--r--src/test/util/setup_common.h13
-rw-r--r--src/test/util_tests.cpp170
-rw-r--r--src/test/validation_chainstate_tests.cpp74
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp62
-rw-r--r--src/test/validation_flush_tests.cpp24
-rw-r--r--src/torcontrol.cpp4
-rw-r--r--src/txdb.cpp43
-rw-r--r--src/txdb.h10
-rw-r--r--src/txmempool.cpp6
-rw-r--r--src/txmempool.h12
-rw-r--r--src/uint256.cpp14
-rw-r--r--src/uint256.h29
-rw-r--r--src/util/message.cpp2
-rw-r--r--src/util/strencodings.cpp50
-rw-r--r--src/util/strencodings.h39
-rw-r--r--src/util/system.cpp49
-rw-r--r--src/util/system.h12
-rw-r--r--src/validation.cpp134
-rw-r--r--src/validation.h47
-rw-r--r--src/wallet/bdb.cpp172
-rw-r--r--src/wallet/bdb.h22
-rw-r--r--src/wallet/coinselection.cpp43
-rw-r--r--src/wallet/coinselection.h8
-rw-r--r--src/wallet/db.h41
-rw-r--r--src/wallet/init.cpp62
-rw-r--r--src/wallet/load.cpp25
-rw-r--r--src/wallet/load.h6
-rw-r--r--src/wallet/rpcdump.cpp2
-rw-r--r--src/wallet/rpcwallet.cpp170
-rw-r--r--src/wallet/salvage.cpp27
-rw-r--r--src/wallet/salvage.h4
-rw-r--r--src/wallet/test/wallet_tests.cpp4
-rw-r--r--src/wallet/wallet.cpp155
-rw-r--r--src/wallet/wallet.h33
-rw-r--r--src/wallet/walletdb.cpp14
-rw-r--r--src/wallet/walletdb.h4
-rw-r--r--src/wallet/wallettool.cpp34
-rw-r--r--src/walletinitinterface.h4
-rw-r--r--test/functional/README.md4
-rwxr-xr-xtest/functional/example_test.py13
-rwxr-xr-xtest/functional/feature_abortnode.py4
-rwxr-xr-xtest/functional/feature_assumevalid.py6
-rwxr-xr-xtest/functional/feature_bip68_sequence.py4
-rwxr-xr-xtest/functional/feature_block.py6
-rwxr-xr-xtest/functional/feature_cltv.py2
-rwxr-xr-xtest/functional/feature_csv_activation.py14
-rwxr-xr-xtest/functional/feature_dbcrash.py4
-rwxr-xr-xtest/functional/feature_dersig.py2
-rwxr-xr-xtest/functional/feature_fee_estimation.py6
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py4
-rwxr-xr-xtest/functional/feature_notifications.py9
-rwxr-xr-xtest/functional/feature_pruning.py7
-rwxr-xr-xtest/functional/feature_rbf.py2
-rwxr-xr-xtest/functional/feature_segwit.py4
-rwxr-xr-xtest/functional/feature_shutdown.py4
-rwxr-xr-xtest/functional/feature_versionbits_warning.py7
-rwxr-xr-xtest/functional/mempool_package_onemore.py2
-rwxr-xr-xtest/functional/mempool_packages.py17
-rwxr-xr-xtest/functional/mempool_persist.py7
-rwxr-xr-xtest/functional/mempool_unbroadcast.py2
-rwxr-xr-xtest/functional/mempool_updatefromblock.py2
-rwxr-xr-xtest/functional/mining_basic.py2
-rwxr-xr-xtest/functional/p2p_addr_relay.py4
-rwxr-xr-xtest/functional/p2p_blockfilters.py20
-rwxr-xr-xtest/functional/p2p_blocksonly.py2
-rwxr-xr-xtest/functional/p2p_compactblocks.py81
-rwxr-xr-xtest/functional/p2p_disconnect_ban.py7
-rwxr-xr-xtest/functional/p2p_dos_header_tree.py2
-rwxr-xr-xtest/functional/p2p_eviction.py8
-rwxr-xr-xtest/functional/p2p_feefilter.py56
-rwxr-xr-xtest/functional/p2p_filter.py11
-rwxr-xr-xtest/functional/p2p_fingerprint.py12
-rwxr-xr-xtest/functional/p2p_getaddr_caching.py109
-rwxr-xr-xtest/functional/p2p_getdata.py2
-rwxr-xr-xtest/functional/p2p_invalid_block.py2
-rwxr-xr-xtest/functional/p2p_invalid_locator.py2
-rwxr-xr-xtest/functional/p2p_invalid_messages.py5
-rwxr-xr-xtest/functional/p2p_invalid_tx.py5
-rwxr-xr-xtest/functional/p2p_leak.py80
-rwxr-xr-xtest/functional/p2p_leak_tx.py2
-rwxr-xr-xtest/functional/p2p_nobloomfilter_messages.py2
-rwxr-xr-xtest/functional/p2p_node_network_limited.py5
-rwxr-xr-xtest/functional/p2p_permissions.py9
-rwxr-xr-xtest/functional/p2p_ping.py21
-rwxr-xr-xtest/functional/p2p_segwit.py77
-rwxr-xr-xtest/functional/p2p_sendheaders.py43
-rwxr-xr-xtest/functional/p2p_timeouts.py2
-rwxr-xr-xtest/functional/p2p_tx_download.py23
-rwxr-xr-xtest/functional/p2p_unrequested_blocks.py6
-rwxr-xr-xtest/functional/rpc_blockchain.py30
-rwxr-xr-xtest/functional/rpc_deprecated.py37
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py4
-rwxr-xr-xtest/functional/rpc_generate.py35
-rwxr-xr-xtest/functional/rpc_generateblock.py2
-rwxr-xr-xtest/functional/rpc_invalidateblock.py7
-rwxr-xr-xtest/functional/rpc_misc.py32
-rwxr-xr-xtest/functional/rpc_net.py121
-rwxr-xr-xtest/functional/rpc_psbt.py8
-rwxr-xr-xtest/functional/rpc_signrawtransaction.py20
-rwxr-xr-xtest/functional/test_framework/messages.py19
-rwxr-xr-xtest/functional/test_framework/p2p.py (renamed from test/functional/test_framework/mininode.py)61
-rw-r--r--test/functional/test_framework/script.py2
-rwxr-xr-xtest/functional/test_framework/test_framework.py2
-rwxr-xr-xtest/functional/test_framework/test_node.py11
-rw-r--r--test/functional/test_framework/util.py14
-rwxr-xr-xtest/functional/test_runner.py9
-rwxr-xr-xtest/functional/wallet_backup.py4
-rwxr-xr-xtest/functional/wallet_basic.py7
-rwxr-xr-xtest/functional/wallet_bumpfee.py88
-rwxr-xr-xtest/functional/wallet_create_tx.py2
-rwxr-xr-xtest/functional/wallet_descriptor.py4
-rwxr-xr-xtest/functional/wallet_dump.py2
-rwxr-xr-xtest/functional/wallet_groups.py96
-rwxr-xr-xtest/functional/wallet_labels.py2
-rwxr-xr-xtest/functional/wallet_listsinceblock.py22
-rwxr-xr-xtest/functional/wallet_multiwallet.py8
-rwxr-xr-xtest/functional/wallet_resendwallettransactions.py12
-rwxr-xr-xtest/functional/wallet_startup.py48
-rwxr-xr-xtest/functional/wallet_zapwallettxes.py4
-rwxr-xr-xtest/fuzz/test_runner.py72
-rwxr-xr-xtest/lint/lint-git-commit-check.sh47
-rwxr-xr-xtest/lint/lint-include-guards.sh2
-rwxr-xr-xtest/lint/lint-includes.sh1
346 files changed, 9482 insertions, 3842 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 2f908dc718..7dcf9388b9 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -55,7 +55,7 @@ after_build:
#- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe
test_script:
- cmd: src\test_bitcoin.exe -l test_suite
-- cmd: src\bench_bitcoin.exe -evals=1 -scaling=0 > NUL
+- cmd: src\bench_bitcoin.exe > NUL
- ps: python test\util\bitcoin-util-test.py
- cmd: python test\util\rpcauth-test.py
# Fee estimation test failing on appveyor with: WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted.
diff --git a/.cirrus.yml b/.cirrus.yml
index 446d3e35a9..33bf43d4b1 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -5,7 +5,7 @@ container:
# https://cirrus-ci.org/faq/#are-there-any-limits
# Each project has 16 CPU in total, assign 2 to each container, so that 8 tasks run in parallel
cpu: 2
- memory: 6G # https://cirrus-ci.org/guide/linux/#linux-containers
+ memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers
env:
PACKAGE_MANAGER_INSTALL : "apt-get update && apt-get install -y"
MAKEJOBS: "-j4"
@@ -66,3 +66,11 @@ task:
image: ubuntu:focal
env:
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
+
+task:
+ name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]'
+ << : *GLOBAL_TASK_TEMPLATE
+ container:
+ image: ubuntu:focal
+ env:
+ FILE_ENV: "./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.md b/.github/ISSUE_TEMPLATE/good_first_issue.md
index 8be78a1f6e..d32e22d360 100644
--- a/.github/ISSUE_TEMPLATE/good_first_issue.md
+++ b/.github/ISSUE_TEMPLATE/good_first_issue.md
@@ -2,11 +2,13 @@
name: Good first issue
about: '(Regular devs only): Suggest a new good first issue'
title: ''
-labels: good first issue
+labels: ''
assignees: ''
---
+<!-- Needs the label "good first issue" assigned manually before or after opening -->
+
<!-- A good first issue is an uncontroversial issue, that has a relatively unique and obvious solution -->
<!-- Motivate the issue and explain the solution briefly -->
diff --git a/.gitignore b/.gitignore
index 23b6090265..1173edfaa7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,7 @@ src/bitcoin-gui
src/bitcoin-node
src/bitcoin-tx
src/bitcoin-wallet
-src/test/fuzz
+src/test/fuzz/*
!src/test/fuzz/*.*
src/test/test_bitcoin
src/qt/test/test_bitcoin-qt
@@ -119,7 +119,9 @@ releases
/*.info
test_bitcoin.coverage/
total.coverage/
+fuzz.coverage/
coverage_percent.txt
+/cov_tool_wrapper.sh
#build tests
linux-coverage-build
diff --git a/.travis.yml b/.travis.yml
index 2d69ad7d23..8f8fba3108 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -72,17 +72,15 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_arm.sh"
QEMU_USER_CMD=""
-# s390 build was disabled temporarily because of disk space issues on the Travis VM
-#
-# - stage: test
-# name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]'
-# arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu
-# env: >-
-# FILE_ENV="./ci/test/00_setup_env_s390x.sh"
-# QEMU_USER_CMD=""
+ - stage: test
+ name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]'
+ arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu
+ env: >-
+ FILE_ENV="./ci/test/00_setup_env_s390x.sh"
+ QEMU_USER_CMD=""
- stage: test
- name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]'
+ name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
env: >-
FILE_ENV="./ci/test/00_setup_env_win64.sh"
@@ -112,11 +110,6 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_native_multiprocess.sh"
- stage: test
- name: 'x86_64 Linux [GOAL: install] [focal] [no depends, only system libs, fuzzers under valgrind]'
- env: >-
- FILE_ENV="./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
-
- - stage: test
name: 'x86_64 Linux [GOAL: install] [xenial] [no wallet]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 11a0f4bac7..2e11474382 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -158,7 +158,7 @@ the pull request affects. Valid areas as:
Examples:
consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG
- net: Automatically create hidden service, listen on Tor
+ net: Automatically create onion service, listen on Tor
qt: Add feed bump button
log: Fix typo in log message
diff --git a/Makefile.am b/Makefile.am
index 75a164f49e..1d6358b1d5 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -65,10 +65,10 @@ OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \
$(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \
$(top_srcdir)/contrib/macdeploy/detached-sig-create.sh
-COVERAGE_INFO = baseline.info \
+COVERAGE_INFO = $(COV_TOOL_WRAPPER) baseline.info \
test_bitcoin_filtered.info total_coverage.info \
baseline_filtered.info functional_test.info functional_test_filtered.info \
- test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_coverage.info
+ test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_filtered.info fuzz_coverage.info
dist-hook:
-$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf -
@@ -192,7 +192,11 @@ LCOV_FILTER_PATTERN = \
-p "src/secp256k1" \
-p "depends"
-baseline.info:
+$(COV_TOOL_WRAPPER):
+ @echo 'exec $(COV_TOOL) "$$@"' > $(COV_TOOL_WRAPPER)
+ @chmod +x $(COV_TOOL_WRAPPER)
+
+baseline.info: $(COV_TOOL_WRAPPER)
$(LCOV) -c -i -d $(abs_builddir)/src -o $@
baseline_filtered.info: baseline.info
diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4
new file mode 100644
index 0000000000..5d20e67464
--- /dev/null
+++ b/build-aux/m4/ax_boost_process.m4
@@ -0,0 +1,121 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_BOOST_PROCESS
+#
+# DESCRIPTION
+#
+# Test for Process library from the Boost C++ libraries. The macro
+# requires a preceding call to AX_BOOST_BASE. Further documentation is
+# available at <http://randspringer.de/boost/index.html>.
+#
+# This macro calls:
+#
+# AC_SUBST(BOOST_PROCESS_LIB)
+#
+# And sets:
+#
+# HAVE_BOOST_PROCESS
+#
+# LICENSE
+#
+# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
+# Copyright (c) 2008 Michael Tindal
+# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 2
+
+AC_DEFUN([AX_BOOST_PROCESS],
+[
+ AC_ARG_WITH([boost-process],
+ AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@],
+ [use the Process library from boost - it is possible to specify a certain library for the linker
+ e.g. --with-boost-process=boost_process-gcc-mt ]),
+ [
+ if test "$withval" = "no"; then
+ want_boost_process="no"
+ elif test "$withval" = "yes"; then
+ want_boost_process="yes"
+ ax_boost_user_process_lib=""
+ else
+ want_boost_process="yes"
+ ax_boost_user_process_lib="$withval"
+ fi
+ ],
+ [want_boost_process="yes"]
+ )
+
+ if test "x$want_boost_process" = "xyes"; then
+ AC_REQUIRE([AC_PROG_CC])
+ AC_REQUIRE([AC_CANONICAL_BUILD])
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
+ export CPPFLAGS
+
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
+ export LDFLAGS
+
+ AC_CACHE_CHECK(whether the Boost::Process library is available,
+ ax_cv_boost_process,
+ [AC_LANG_PUSH([C++])
+ CXXFLAGS_SAVE=$CXXFLAGS
+ CXXFLAGS=
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]],
+ [[boost::process::child* child = new boost::process::child; delete child;]])],
+ ax_cv_boost_process=yes, ax_cv_boost_process=no)
+ CXXFLAGS=$CXXFLAGS_SAVE
+ AC_LANG_POP([C++])
+ ])
+ if test "x$ax_cv_boost_process" = "xyes"; then
+ AC_SUBST(BOOST_CPPFLAGS)
+
+ AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available])
+ BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
+
+ LDFLAGS_SAVE=$LDFLAGS
+ if test "x$ax_boost_user_process_lib" = "x"; then
+ for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ if test "x$link_process" != "xyes"; then
+ for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ fi
+
+ else
+ for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+
+ fi
+ if test "x$ax_lib" = "x"; then
+ AC_MSG_ERROR(Could not find a version of the Boost::Process library!)
+ fi
+ if test "x$link_process" = "xno"; then
+ AC_MSG_ERROR(Could not link against $ax_lib !)
+ fi
+ fi
+
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+ fi
+])
diff --git a/build-aux/m4/ax_boost_thread.m4 b/build-aux/m4/ax_boost_thread.m4
index e9dea43535..75e80e6e75 100644
--- a/build-aux/m4/ax_boost_thread.m4
+++ b/build-aux/m4/ax_boost_thread.m4
@@ -30,7 +30,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
-#serial 32
+#serial 33
AC_DEFUN([AX_BOOST_THREAD],
[
@@ -67,13 +67,24 @@ AC_DEFUN([AX_BOOST_THREAD],
[AC_LANG_PUSH([C++])
CXXFLAGS_SAVE=$CXXFLAGS
- if test "x$host_os" = "xsolaris" ; then
- CXXFLAGS="-pthreads $CXXFLAGS"
- elif test "x$host_os" = "xmingw32" ; then
- CXXFLAGS="-mthreads $CXXFLAGS"
- else
- CXXFLAGS="-pthread $CXXFLAGS"
- fi
+ case "x$host_os" in
+ xsolaris )
+ CXXFLAGS="-pthreads $CXXFLAGS"
+ break;
+ ;;
+ xmingw32 )
+ CXXFLAGS="-mthreads $CXXFLAGS"
+ break;
+ ;;
+ *android* )
+ break;
+ ;;
+ * )
+ CXXFLAGS="-pthread $CXXFLAGS"
+ break;
+ ;;
+ esac
+
AC_COMPILE_IFELSE([
AC_LANG_PROGRAM(
[[@%:@include <boost/thread/thread.hpp>]],
@@ -84,13 +95,23 @@ AC_DEFUN([AX_BOOST_THREAD],
AC_LANG_POP([C++])
])
if test "x$ax_cv_boost_thread" = "xyes"; then
- if test "x$host_os" = "xsolaris" ; then
- BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS"
- elif test "x$host_os" = "xmingw32" ; then
- BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS"
- else
- BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS"
- fi
+ case "x$host_os" in
+ xsolaris )
+ BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS"
+ break;
+ ;;
+ xmingw32 )
+ BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS"
+ break;
+ ;;
+ *android* )
+ break;
+ ;;
+ * )
+ BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS"
+ break;
+ ;;
+ esac
AC_SUBST(BOOST_CPPFLAGS)
@@ -148,6 +169,9 @@ AC_DEFUN([AX_BOOST_THREAD],
xmingw32 )
break;
;;
+ *android* )
+ break;
+ ;;
* )
BOOST_THREAD_LIB="$BOOST_THREAD_LIB -lpthread"
break;
diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4
index e171238cbc..6c7665830b 100644
--- a/build-aux/m4/bitcoin_qt.m4
+++ b/build-aux/m4/bitcoin_qt.m4
@@ -72,18 +72,32 @@ AC_DEFUN([BITCOIN_QT_INIT],[
AC_ARG_WITH([qtdbus],
[AS_HELP_STRING([--with-qtdbus],
- [enable DBus support (default is yes if qt is enabled and QtDBus is found)])],
+ [enable DBus support (default is yes if qt is enabled and QtDBus is found, except on Android)])],
[use_dbus=$withval],
[use_dbus=auto])
+ dnl Android doesn't support D-Bus and certainly doesn't use it for notifications
+ case $host in
+ *android*)
+ if test "x$use_dbus" != xyes; then
+ use_dbus=no
+ fi
+ ;;
+ esac
+
AC_SUBST(QT_TRANSLATION_DIR,$qt_translation_path)
])
dnl Find Qt libraries and includes.
+dnl
+dnl BITCOIN_QT_CONFIGURE([MINIMUM-VERSION])
+dnl
dnl Outputs: See _BITCOIN_QT_FIND_LIBS
dnl Outputs: Sets variables for all qt-related tools.
dnl Outputs: bitcoin_enable_qt, bitcoin_enable_qt_dbus, bitcoin_enable_qt_test
AC_DEFUN([BITCOIN_QT_CONFIGURE],[
+ qt_version=">= $1"
+ qt_lib_prefix="Qt5"
BITCOIN_QT_CHECK([_BITCOIN_QT_FIND_LIBS])
dnl This is ugly and complicated. Yuck. Works as follows:
@@ -221,7 +235,7 @@ AC_DEFUN([BITCOIN_QT_CONFIGURE],[
bitcoin_enable_qt=no
])
if test x$bitcoin_enable_qt = xyes; then
- AC_MSG_RESULT([$bitcoin_enable_qt ($QT_LIB_PREFIX)])
+ AC_MSG_RESULT([$bitcoin_enable_qt ($qt_lib_prefix)])
else
AC_MSG_RESULT([$bitcoin_enable_qt])
fi
@@ -295,25 +309,19 @@ AC_DEFUN([_BITCOIN_QT_FIND_STATIC_PLUGINS],[
if test -d "$qt_plugin_path/platforms/android"; then
QT_LIBS="$QT_LIBS -L$qt_plugin_path/platforms/android -lqtfreetype -lEGL"
fi
- m4_ifdef([PKG_CHECK_MODULES],[
- if test x$bitcoin_cv_qt58 = xno; then
- PKG_CHECK_MODULES([QTPLATFORM], [Qt5PlatformSupport], [QT_LIBS="$QTPLATFORM_LIBS $QT_LIBS"])
- else
- PKG_CHECK_MODULES([QTFONTDATABASE], [Qt5FontDatabaseSupport], [QT_LIBS="-lQt5FontDatabaseSupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTEVENTDISPATCHER], [Qt5EventDispatcherSupport], [QT_LIBS="-lQt5EventDispatcherSupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTTHEME], [Qt5ThemeSupport], [QT_LIBS="-lQt5ThemeSupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTDEVICEDISCOVERY], [Qt5DeviceDiscoverySupport], [QT_LIBS="-lQt5DeviceDiscoverySupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTACCESSIBILITY], [Qt5AccessibilitySupport], [QT_LIBS="-lQt5AccessibilitySupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTFB], [Qt5FbSupport], [QT_LIBS="-lQt5FbSupport $QT_LIBS"])
- fi
- if test "x$TARGET_OS" = xlinux; then
- PKG_CHECK_MODULES([QTXCBQPA], [Qt5XcbQpa], [QT_LIBS="$QTXCBQPA_LIBS $QT_LIBS"])
- elif test "x$TARGET_OS" = xdarwin; then
- PKG_CHECK_MODULES([QTCLIPBOARD], [Qt5ClipboardSupport], [QT_LIBS="-lQt5ClipboardSupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTGRAPHICS], [Qt5GraphicsSupport], [QT_LIBS="-lQt5GraphicsSupport $QT_LIBS"])
- PKG_CHECK_MODULES([QTCGL], [Qt5CglSupport], [QT_LIBS="-lQt5CglSupport $QT_LIBS"])
- fi
- ])
+ PKG_CHECK_MODULES([QTFONTDATABASE], [Qt5FontDatabaseSupport], [QT_LIBS="-lQt5FontDatabaseSupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTEVENTDISPATCHER], [Qt5EventDispatcherSupport], [QT_LIBS="-lQt5EventDispatcherSupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTTHEME], [Qt5ThemeSupport], [QT_LIBS="-lQt5ThemeSupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTDEVICEDISCOVERY], [Qt5DeviceDiscoverySupport], [QT_LIBS="-lQt5DeviceDiscoverySupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTACCESSIBILITY], [Qt5AccessibilitySupport], [QT_LIBS="-lQt5AccessibilitySupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTFB], [Qt5FbSupport], [QT_LIBS="-lQt5FbSupport $QT_LIBS"])
+ if test "x$TARGET_OS" = xlinux; then
+ PKG_CHECK_MODULES([QTXCBQPA], [Qt5XcbQpa], [QT_LIBS="$QTXCBQPA_LIBS $QT_LIBS"])
+ elif test "x$TARGET_OS" = xdarwin; then
+ PKG_CHECK_MODULES([QTCLIPBOARD], [Qt5ClipboardSupport], [QT_LIBS="-lQt5ClipboardSupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTGRAPHICS], [Qt5GraphicsSupport], [QT_LIBS="-lQt5GraphicsSupport $QT_LIBS"])
+ PKG_CHECK_MODULES([QTCGL], [Qt5CglSupport], [QT_LIBS="-lQt5CglSupport $QT_LIBS"])
+ fi
fi
])
@@ -321,23 +329,29 @@ dnl Internal. Find Qt libraries using pkg-config.
dnl Outputs: All necessary QT_* variables are set.
dnl Outputs: have_qt_test and have_qt_dbus are set (if applicable) to yes|no.
AC_DEFUN([_BITCOIN_QT_FIND_LIBS],[
- m4_ifdef([PKG_CHECK_MODULES],[
- QT_LIB_PREFIX=Qt5
- qt5_modules="Qt5Core Qt5Gui Qt5Network Qt5Widgets"
- BITCOIN_QT_CHECK([
- PKG_CHECK_MODULES([QT5], [$qt5_modules], [QT_INCLUDES="$QT5_CFLAGS"; QT_LIBS="$QT5_LIBS" have_qt=yes],[have_qt=no])
+ BITCOIN_QT_CHECK([
+ PKG_CHECK_MODULES([QT_CORE], [${qt_lib_prefix}Core $qt_version], [],
+ [BITCOIN_QT_FAIL([${qt_lib_prefix}Core $qt_version not found])])
+ ])
+ BITCOIN_QT_CHECK([
+ PKG_CHECK_MODULES([QT_GUI], [${qt_lib_prefix}Gui $qt_version], [],
+ [BITCOIN_QT_FAIL([${qt_lib_prefix}Gui $qt_version not found])])
+ ])
+ BITCOIN_QT_CHECK([
+ PKG_CHECK_MODULES([QT_WIDGETS], [${qt_lib_prefix}Widgets $qt_version], [],
+ [BITCOIN_QT_FAIL([${qt_lib_prefix}Widgets $qt_version not found])])
+ ])
+ BITCOIN_QT_CHECK([
+ PKG_CHECK_MODULES([QT_NETWORK], [${qt_lib_prefix}Network $qt_version], [],
+ [BITCOIN_QT_FAIL([${qt_lib_prefix}Network $qt_version not found])])
+ ])
+ QT_INCLUDES="$QT_CORE_CFLAGS $QT_GUI_CFLAGS $QT_WIDGETS_CFLAGS $QT_NETWORK_CFLAGS"
+ QT_LIBS="$QT_CORE_LIBS $QT_GUI_LIBS $QT_WIDGETS_LIBS $QT_NETWORK_LIBS"
- if test "x$have_qt" != xyes; then
- have_qt=no
- BITCOIN_QT_FAIL([Qt dependencies not found])
- fi
- ])
- BITCOIN_QT_CHECK([
- PKG_CHECK_MODULES([QT_TEST], [${QT_LIB_PREFIX}Test], [QT_TEST_INCLUDES="$QT_TEST_CFLAGS"; have_qt_test=yes], [have_qt_test=no])
- if test "x$use_dbus" != xno; then
- PKG_CHECK_MODULES([QT_DBUS], [${QT_LIB_PREFIX}DBus], [QT_DBUS_INCLUDES="$QT_DBUS_CFLAGS"; have_qt_dbus=yes], [have_qt_dbus=no])
- fi
- ])
+ BITCOIN_QT_CHECK([
+ PKG_CHECK_MODULES([QT_TEST], [${qt_lib_prefix}Test $qt_version], [QT_TEST_INCLUDES="$QT_TEST_CFLAGS"; have_qt_test=yes], [have_qt_test=no])
+ if test "x$use_dbus" != xno; then
+ PKG_CHECK_MODULES([QT_DBUS], [${qt_lib_prefix}DBus $qt_version], [QT_DBUS_INCLUDES="$QT_DBUS_CFLAGS"; have_qt_dbus=yes], [have_qt_dbus=no])
+ fi
])
- true; dnl
])
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index fbbe1a2156..9d0b50a0b4 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -47,6 +47,9 @@
/* define if the Boost::Filesystem library is available */
#define HAVE_BOOST_FILESYSTEM /**/
+/* define if the Boost::Process library is available */
+#define HAVE_BOOST_PROCESS /**/
+
/* define if the Boost::System library is available */
#define HAVE_BOOST_SYSTEM /**/
diff --git a/build_msvc/common.init.vcxproj b/build_msvc/common.init.vcxproj
index 4fd516fff5..a080fd2aa4 100644
--- a/build_msvc/common.init.vcxproj
+++ b/build_msvc/common.init.vcxproj
@@ -110,7 +110,7 @@
<AdditionalOptions>/utf-8 /std:c++17 %(AdditionalOptions)</AdditionalOptions>
<DisableSpecificWarnings>4018;4221;4244;4267;4334;4715;4805;4834</DisableSpecificWarnings>
<TreatWarningAsError>true</TreatWarningAsError>
- <PreprocessorDefinitions>_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;_SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING;ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;_SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING;ZMQ_STATIC;NOMINMAX;WIN32;HAVE_CONFIG_H;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_CONSOLE;_WIN32_WINNT=0x0601;_WIN32_IE=0x0501;WIN32_LEAN_AND_MEAN;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\src;..\..\src\univalue\include;..\..\src\secp256k1\include;..\..\src\leveldb\include;..\..\src\leveldb\helpers\memenv;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt
index 307f295f08..edce8576c3 100644
--- a/build_msvc/vcpkg-packages.txt
+++ b/build_msvc/vcpkg-packages.txt
@@ -1 +1 @@
-berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion \ No newline at end of file
+berkeleydb boost-filesystem boost-multi-index boost-process boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 2413cfca9f..702e881862 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -35,6 +35,10 @@ export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false}
+# By how much to scale the test_runner timeouts (option --timeout-factor).
+# This is needed because some ci machines have slow CPU or disk, so sanitizers
+# might be slow or a reindex might be waiting on disk IO.
+export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-4}
export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
export EXPECTED_TESTS_DURATION_IN_SECONDS=${EXPECTED_TESTS_DURATION_IN_SECONDS:-1000}
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index b70a581532..2e445c126d 100644
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 5688799f9e..e58003ab19 100644
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -11,5 +11,5 @@ export CONTAINER_NAME=ci_i686_centos_7
export DOCKER_NAME_TAG=centos:7
export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --with-boost-process"
export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 7ec66eeb4f..b62f1603f4 100644
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -14,4 +14,4 @@ export XCODE_BUILD_ID=11C505
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index b8a9ccaae5..5fb127b762 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -10,7 +10,7 @@ export HOST=x86_64-apple-darwin16
export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well)
export PIP_PACKAGES="zmq"
export GOAL="install"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index d57c673069..251ece7984 100644
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -10,6 +10,5 @@ export CONTAINER_NAME=ci_native_asan
export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
export DOCKER_NAME_TAG=ubuntu:20.04
export NO_DEPENDS=1
-export TEST_RUNNER_EXTRA="--timeout-factor=4" # Increase timeout because sanitizers slow down
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --with-boost-process"
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index 43ee219ef9..a32de4a6b5 100644
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -14,4 +14,5 @@ export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++ --with-boost-process"
+export CCACHE_SIZE=200M
diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
index c27d525003..e06a40eb23 100644
--- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
@@ -16,3 +16,4 @@ export RUN_FUZZ_TESTS=true
export FUZZ_TESTS_CONFIG="--valgrind"
export GOAL="install"
export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++"
+export CCACHE_SIZE=200M
diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh
index cc583edf17..6a4979990b 100644
--- a/ci/test/00_setup_env_native_msan.sh
+++ b/ci/test/00_setup_env_native_msan.sh
@@ -20,3 +20,4 @@ export GOAL="install"
export BITCOIN_CONFIG="--enable-wallet --with-sanitizers=memory --with-asm=no --prefix=${BASE_ROOT_DIR}/depends/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' BDB_LIBS='-L${BDB_PREFIX}/lib -ldb_cxx-4.8' BDB_CFLAGS='-I${BDB_PREFIX}/include'"
export USE_MEMORY_SANITIZER="true"
export RUN_FUNCTIONAL_TESTS="false"
+export CCACHE_SIZE=250M
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
index 786f0f927f..522a5d9fc2 100644
--- a/ci/test/00_setup_env_native_multiprocess.sh
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -11,5 +11,5 @@ export DOCKER_NAME_TAG=ubuntu:20.04
export PACKAGES="cmake python3"
export DEP_OPTS="MULTIPROCESS=1"
export GOAL="install"
-export BITCOIN_CONFIG=""
+export BITCOIN_CONFIG="--with-boost-process"
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 1a0b14b62b..0a09bfe230 100644
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tes
export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8 --with-boost-process"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 6e2ff729a2..f9d869b4fd 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -16,4 +16,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process"
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index 5695c43ec3..b14a46562c 100644
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -10,6 +10,6 @@ export CONTAINER_NAME=ci_native_tsan
export DOCKER_NAME_TAG=ubuntu:20.04
export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq"
export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'"
-export TEST_RUNNER_EXTRA="--exclude feature_block --timeout-factor=4" # Increase timeout because sanitizers slow down. Low memory on Travis machines, exclude feature_block.
+export TEST_RUNNER_EXTRA="--exclude feature_block" # Low memory on Travis machines, exclude feature_block.
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --with-boost-process"
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index 0041122f1e..710d9e1011 100644
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
@@ -10,6 +10,6 @@ export CONTAINER_NAME=ci_native_valgrind
export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
-export TEST_RUNNER_EXTRA="--exclude rpc_bind --timeout-factor=4" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
+export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index c180d023de..fe330920d0 100644
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -22,4 +22,4 @@ export DOCKER_NAME_TAG="debian:buster"
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb"
+export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-boost-process"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index eb8b870dd6..2b351dff6d 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -13,4 +13,4 @@ export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
export RUN_SECURITY_TESTS="true"
export GOAL="deploy"
-export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests"
+export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process"
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 96d44328b8..6c14a3dfbe 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -35,7 +35,7 @@ fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
BEGIN_FOLD functional-tests
- DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib ${TEST_RUNNER_ENV} test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --quiet --failfast
+ DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib ${TEST_RUNNER_ENV} test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 --timeout-factor=${TEST_RUNNER_TIMEOUT_FACTOR} ${TEST_RUNNER_EXTRA} --quiet --failfast
END_FOLD
fi
diff --git a/configure.ac b/configure.ac
index fb63b9fc68..acd4e0cf6c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -105,6 +105,7 @@ AC_PATH_TOOL(AR, ar)
AC_PATH_TOOL(RANLIB, ranlib)
AC_PATH_TOOL(STRIP, strip)
AC_PATH_TOOL(GCOV, gcov)
+AC_PATH_TOOL(LLVM_COV, llvm-cov)
AC_PATH_PROG(LCOV, lcov)
dnl Python 3.5 is specified in .python-version and should be used if available, see doc/dependencies.md
AC_PATH_PROGS([PYTHON], [python3.5 python3.6 python3.7 python3.8 python3 python])
@@ -376,6 +377,7 @@ if test "x$enable_werror" = "xyes"; then
AX_CHECK_COMPILE_FLAG([-Werror=shadow-field],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=shadow-field"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=switch],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=switch"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=thread-safety],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Werror=range-loop-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=range-loop-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=return-type],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"],,[[$CXXFLAG_WERROR]])
@@ -404,6 +406,10 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wdate-time],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wconditional-uninitialized],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wconditional-uninitialized"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wsign-compare],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsign-compare"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wduplicated-branches],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-branches"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wduplicated-cond],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-cond"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wlogical-op],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wlogical-op"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Woverloaded-virtual],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Woverloaded-virtual"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wsuggest-override],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"],,[[$CXXFLAG_WERROR]],
[AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
@@ -598,7 +604,7 @@ case $host in
AC_MSG_ERROR("windres not found")
fi
- CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601"
+ CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN"
dnl libtool insists upon adding -nostdlib and a list of objects/libs to link against.
dnl That breaks our ability to build dll's with static libgcc/libstdc++/libssp. Override
@@ -642,6 +648,7 @@ case $host in
BUILD_OS=darwin
;;
*)
+ AC_PATH_TOOL([DSYMUTIL], [dsymutil], dsymutil)
AC_PATH_TOOL([INSTALLNAMETOOL], [install_name_tool], install_name_tool)
AC_PATH_TOOL([OTOOL], [otool], otool)
AC_PATH_PROGS([GENISOIMAGE], [genisoimage mkisofs],genisoimage)
@@ -679,16 +686,37 @@ if test x$use_lcov = xyes; then
if test x$LCOV = x; then
AC_MSG_ERROR("lcov testing requested but lcov not found")
fi
- if test x$GCOV = x; then
- AC_MSG_ERROR("lcov testing requested but gcov not found")
- fi
if test x$PYTHON = x; then
AC_MSG_ERROR("lcov testing requested but python not found")
fi
if test x$GENHTML = x; then
AC_MSG_ERROR("lcov testing requested but genhtml not found")
fi
- LCOV="$LCOV --gcov-tool=$GCOV"
+
+ AC_MSG_CHECKING([whether compiler is Clang])
+ AC_PREPROC_IFELSE([AC_LANG_SOURCE([[
+ #if defined(__clang__) && defined(__llvm__)
+ // Compiler is Clang
+ #else
+ # error Compiler is not Clang
+ #endif
+ ]])],[
+ AC_MSG_RESULT([yes])
+ if test x$LLVM_COV = x; then
+ AC_MSG_ERROR([lcov testing requested but llvm-cov not found])
+ fi
+ COV_TOOL="$LLVM_COV gcov"
+ ],[
+ AC_MSG_RESULT([no])
+ if test x$GCOV = x; then
+ AC_MSG_ERROR([lcov testing requested but gcov not found])
+ fi
+ COV_TOOL="$GCOV"
+ ])
+ AC_SUBST(COV_TOOL)
+ AC_SUBST(COV_TOOL_WRAPPER, "cov_tool_wrapper.sh")
+ LCOV="$LCOV --gcov-tool $(pwd)/$COV_TOOL_WRAPPER"
+
AX_CHECK_LINK_FLAG([[--coverage]], [LDFLAGS="$LDFLAGS --coverage"],
[AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")])
AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"],
@@ -784,6 +812,7 @@ if test x$use_hardening != xno; then
AX_CHECK_LINK_FLAG([[-Wl,--high-entropy-va]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,--high-entropy-va"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-Wl,-z,relro]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,relro"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-Wl,-z,now]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"],, [[$LDFLAG_WERROR]])
+ AX_CHECK_LINK_FLAG([[-Wl,-z,separate-code]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,separate-code"],, [[$LDFLAG_WERROR]])
AX_CHECK_LINK_FLAG([[-fPIE -pie]], [PIE_FLAGS="-fPIE"; HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"],, [[$CXXFLAG_WERROR]])
case $host in
@@ -1132,7 +1161,7 @@ else
BITCOIN_QT_INIT
dnl sets $bitcoin_enable_qt, $bitcoin_enable_qt_test, $bitcoin_enable_qt_dbus
- BITCOIN_QT_CONFIGURE
+ BITCOIN_QT_CONFIGURE([5.5.1])
fi
if test x$enable_wallet != xno; then
@@ -1178,9 +1207,9 @@ fi
if test x$use_boost = xyes; then
dnl Minimum required Boost version
-define(MINIMUM_REQUIRED_BOOST, 1.47.0)
+define(MINIMUM_REQUIRED_BOOST, 1.58.0)
-dnl Check for boost libs
+dnl Check for Boost libs
AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST])
if test x$want_boost = xno; then
AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]])
@@ -1189,30 +1218,15 @@ AX_BOOST_SYSTEM
AX_BOOST_FILESYSTEM
AX_BOOST_THREAD
+dnl Opt-in to boost-process
+AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] )
+
dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic
dnl counter implementations. In 1.63 and later the std::atomic approach is default.
m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro
BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS"
-if test x$use_reduce_exports = xyes; then
- AC_MSG_CHECKING([for working boost reduced exports])
- TEMP_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS"
- AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[
- @%:@include <boost/version.hpp>
- ]], [[
- #if BOOST_VERSION >= 104900
- // Everything is okay
- #else
- # error Boost version is too old
- #endif
- ]])],[
- AC_MSG_RESULT(yes)
- ],[
- AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.])
- ])
- CPPFLAGS="$TEMP_CPPFLAGS"
-fi
+BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
fi
if test x$use_reduce_exports = xyes; then
@@ -1226,7 +1240,6 @@ if test x$use_tests = xyes; then
AC_MSG_ERROR(hexdump is required for tests)
fi
-
if test x$use_boost = xyes; then
AX_BOOST_UNIT_TEST_FRAMEWORK
@@ -1252,48 +1265,6 @@ if test x$use_tests = xyes; then
fi
fi
-if test x$use_boost = xyes; then
-
-BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
-
-
-dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums
-dnl using c++98 constructs. Unfortunately, this implementation detail leaked into
-dnl the abi. This was fixed in 1.57.
-
-dnl When building against that installed version using c++11, the headers pick up
-dnl on the native c++11 scoped enum support and enable it, however it will fail to
-dnl link. This can be worked around by disabling c++11 scoped enums if linking will
-dnl fail.
-dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51.
-
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_MSG_CHECKING([for mismatched boost c++11 scoped enums])
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/config.hpp>
- #include <boost/version.hpp>
- #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700
- #define BOOST_NO_SCOPED_ENUMS
- #define BOOST_NO_CXX11_SCOPED_ENUMS
- #define CHECK
- #endif
- #include <boost/filesystem.hpp>
- ]],[[
- #if defined(CHECK)
- boost::filesystem::copy_file("foo", "bar");
- #else
- choke;
- #endif
- ]])],
- [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-
-fi
-
dnl libevent check
if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then
@@ -1681,6 +1652,7 @@ esac
echo
echo "Options used to compile and link:"
+echo " boost process = $ax_cv_boost_process"
echo " multiprocess = $build_multiprocess"
echo " with wallet = $enable_wallet"
echo " with gui / qt = $bitcoin_enable_qt"
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
index 084914f11a..9a555c70bb 100755
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -22,6 +22,7 @@ EXCLUDE = [
'src/reverse_iterator.h',
'src/test/fuzz/FuzzedDataProvider.h',
'src/tinyformat.h',
+ 'src/bench/nanobench.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index ca587ca9e5..dc74de9198 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -40,25 +40,48 @@ def get_ELF_program_headers(executable):
stdout = run_command([READELF_CMD, '-l', '-W', executable])
in_headers = False
- count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
+ count = 0
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
- ofs_typ = line.find('Type')
- ofs_offset = line.find('Offset')
- ofs_flags = line.find('Flg')
- ofs_align = line.find('Align')
- if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
+ header = [x.strip() for x in line.split()]
+ ofs_typ = header.index('Type')
+ ofs_flags = header.index('Flg')
+ # assert readelf output is what we expect
+ if ofs_typ == -1 or ofs_flags == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
- typ = line[ofs_typ:ofs_offset].rstrip()
- flags = line[ofs_flags:ofs_align].rstrip()
- headers.append((typ, flags))
+ splitline = [x.strip() for x in line.split()]
+ typ = splitline[ofs_typ]
+ if not typ.startswith('[R'): # skip [Requesting ...]
+ splitline = [x.strip() for x in line.split()]
+ flags = splitline[ofs_flags]
+ # check for 'R', ' E'
+ if splitline[ofs_flags + 1] is 'E':
+ flags += ' E'
+ headers.append((typ, flags, []))
+ count += 1
+
+ if line.startswith(' Section to Segment mapping:'):
+ in_mapping = True
+ count = 0
+ if line == '':
+ in_mapping = False
+ if in_mapping:
+ if count == 1: # header line
+ ofs_segment = line.find('Segment')
+ ofs_sections = line.find('Sections...')
+ if ofs_segment == -1 or ofs_sections == -1:
+ raise ValueError('Cannot parse elfread -lW output')
+ elif count > 1:
+ segment = int(line[ofs_segment:ofs_sections].strip())
+ sections = line[ofs_sections:].strip().split()
+ headers[segment][2].extend(sections)
count += 1
return headers
@@ -68,7 +91,7 @@ def check_ELF_NX(executable) -> bool:
'''
have_wx = False
have_gnu_stack = False
- for (typ, flags) in get_ELF_program_headers(executable):
+ for (typ, flags, _) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
@@ -82,7 +105,7 @@ def check_ELF_RELRO(executable) -> bool:
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
- for (typ, flags) in get_ELF_program_headers(executable):
+ for (typ, flags, _) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program
# header are ignored, the PT_LOAD header determines the effective permissions.
@@ -113,6 +136,62 @@ def check_ELF_Canary(executable) -> bool:
ok = True
return ok
+def check_ELF_separate_code(executable):
+ '''
+ Check that sections are appropriately separated in virtual memory,
+ based on their permissions. This checks for missing -Wl,-z,separate-code
+ and potentially other problems.
+ '''
+ EXPECTED_FLAGS = {
+ # Read + execute
+ '.init': 'R E',
+ '.plt': 'R E',
+ '.plt.got': 'R E',
+ '.plt.sec': 'R E',
+ '.text': 'R E',
+ '.fini': 'R E',
+ # Read-only data
+ '.interp': 'R',
+ '.note.gnu.property': 'R',
+ '.note.gnu.build-id': 'R',
+ '.note.ABI-tag': 'R',
+ '.gnu.hash': 'R',
+ '.dynsym': 'R',
+ '.dynstr': 'R',
+ '.gnu.version': 'R',
+ '.gnu.version_r': 'R',
+ '.rela.dyn': 'R',
+ '.rela.plt': 'R',
+ '.rodata': 'R',
+ '.eh_frame_hdr': 'R',
+ '.eh_frame': 'R',
+ '.qtmetadata': 'R',
+ '.gcc_except_table': 'R',
+ '.stapsdt.base': 'R',
+ # Writable data
+ '.init_array': 'RW',
+ '.fini_array': 'RW',
+ '.dynamic': 'RW',
+ '.got': 'RW',
+ '.data': 'RW',
+ '.bss': 'RW',
+ }
+ # For all LOAD program headers get mapping to the list of sections,
+ # and for each section, remember the flags of the associated program header.
+ flags_per_section = {}
+ for (typ, flags, sections) in get_ELF_program_headers(executable):
+ if typ == 'LOAD':
+ for section in sections:
+ assert(section not in flags_per_section)
+ flags_per_section[section] = flags
+ # Spot-check ELF LOAD program header flags per section
+ # If these sections exist, check them against the expected R/W/E flags
+ for (section, flags) in flags_per_section.items():
+ if section in EXPECTED_FLAGS:
+ if EXPECTED_FLAGS[section] != flags:
+ return False
+ return True
+
def get_PE_dll_characteristics(executable) -> int:
'''Get PE DllCharacteristics bits'''
stdout = run_command([OBJDUMP_CMD, '-x', executable])
@@ -225,7 +304,8 @@ CHECKS = {
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
- ('Canary', check_ELF_Canary)
+ ('Canary', check_ELF_Canary),
+ ('separate_code', check_ELF_separate_code),
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 629eba4f28..ec2d886653 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -31,15 +31,17 @@ class TestSecurityChecks(unittest.TestCase):
cc = 'gcc'
write_testcode(source)
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ (1, executable+': failed separate_code'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
def test_PE(self):
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index 0ed1e16f7e..e86ff83798 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -7,31 +7,29 @@ suites:
architectures:
- "amd64"
packages:
-- "curl"
-- "g++-aarch64-linux-gnu"
-- "g++-8-aarch64-linux-gnu"
-- "gcc-8-aarch64-linux-gnu"
-- "binutils-aarch64-linux-gnu"
-- "g++-arm-linux-gnueabihf"
-- "g++-8-arm-linux-gnueabihf"
-- "gcc-8-arm-linux-gnueabihf"
-- "binutils-arm-linux-gnueabihf"
-- "g++-riscv64-linux-gnu"
-- "g++-8-riscv64-linux-gnu"
-- "gcc-8-riscv64-linux-gnu"
-- "binutils-riscv64-linux-gnu"
-- "g++-8-multilib"
-- "gcc-8-multilib"
-- "binutils-gold"
-- "git"
-- "pkg-config"
+# Common dependencies.
- "autoconf"
-- "libtool"
- "automake"
-- "faketime"
+- "binutils"
- "bsdmainutils"
- "ca-certificates"
+- "curl"
+- "faketime"
+- "git"
+- "libtool"
+- "patch"
+- "pkg-config"
- "python3"
+# Cross compilation HOSTS:
+# - arm-linux-gnueabihf
+- "binutils-arm-linux-gnueabihf"
+- "g++-8-arm-linux-gnueabihf"
+# - aarch64-linux-gnu
+- "binutils-aarch64-linux-gnu"
+- "g++-8-aarch64-linux-gnu"
+# - riscv64-linux-gnu
+- "binutils-riscv64-linux-gnu"
+- "g++-8-riscv64-linux-gnu"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -93,45 +91,11 @@ script: |
create_per-host_faketime_wrappers "2000-01-01 12:00:00"
export PATH=${WRAP_DIR}:${PATH}
- EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes
- mkdir -p $EXTRA_INCLUDES_BASE
-
- # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm,
- # but we can't write there. Instead, create a link here and force it to be included in the
- # search paths by wrapping gcc/g++.
-
- mkdir -p $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu
- rm -f $WRAP_DIR/extra_includes/i686-pc-linux-gnu/asm
- ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu/asm
-
- for prog in gcc g++; do
- rm -f ${WRAP_DIR}/${prog}
- cat << EOF > ${WRAP_DIR}/${prog}
- #!/usr/bin/env bash
- REAL="$(which -a ${prog}-8 | grep -v ${WRAP_DIR}/${prog} | head -1)"
- for var in "\$@"
- do
- if [ "\$var" = "-m32" ]; then
- export C_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu"
- export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu"
- break
- fi
- done
- \$REAL \$@
- EOF
- chmod +x ${WRAP_DIR}/${prog}
- done
-
cd bitcoin
BASEPREFIX="${PWD}/depends"
# Build dependencies for each host
for i in $HOSTS; do
- EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i"
- if [ -d "$EXTRA_INCLUDES" ]; then
- export HOST_ID_SALT="$EXTRA_INCLUDES"
- fi
make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
- unset HOST_ID_SALT
done
# Faketime for binaries
diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp
index d2652119b4..ece02dc24e 100644
--- a/contrib/valgrind.supp
+++ b/contrib/valgrind.supp
@@ -123,7 +123,6 @@
Memcheck:Cond
...
fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE
- fun:unique_path
}
{
Suppress boost warning
diff --git a/depends/README.md b/depends/README.md
index 11733024b1..2356e8be59 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -110,9 +110,9 @@ The following can be set when running make: `make FOO=bar`
<dt>BUILD_ID_SALT</dt>
<dd>Optional salt to use when generating build package ids</dd>
<dt>FORCE_USE_SYSTEM_CLANG</dt>
-<dd>(EXPERTS ONLY) When cross-compiling for macOS, use clang found in the
-system's <code>$PATH</code> rather than the default prebuilt release of clang
-from llvm.org</dd>
+<dd>(EXPERTS ONLY) When cross-compiling for macOS, use Clang found in the
+system's <code>$PATH</code> rather than the default prebuilt release of Clang
+from llvm.org. Clang 8 or later is required.</dd>
</dl>
If some packages are not built, for example `make NO_WALLET=1`, the appropriate
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index b679438c6f..06cf974f75 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -4,6 +4,7 @@ $(package)_download_path=https://download.oracle.com/berkeley-db
$(package)_file_name=db-$($(package)_version).NC.tar.gz
$(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef
$(package)_build_subdir=build_unix
+$(package)_patches=clang_cxx_11.patch
define $(package)_set_vars
$(package)_config_opts=--disable-shared --enable-cxx --disable-replication --enable-option-checking
@@ -14,8 +15,7 @@ $(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
endef
define $(package)_preprocess_cmds
- sed -i.old 's/__atomic_compare_exchange/__atomic_compare_exchange_db/' dbinc/atomic.h && \
- sed -i.old 's/atomic_init/atomic_init_db/' dbinc/atomic.h mp/mp_region.c mp/mp_mvcc.c mp/mp_fget.c mutex/mut_method.c mutex/mut_tas.c && \
+ patch -p1 < $($(package)_patch_dir)/clang_cxx_11.patch && \
cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub dist
endef
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 3a7e605b4f..d8bce108b1 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -3,6 +3,7 @@ $(package)_version=1_70_0
$(package)_download_path=https://dl.bintray.com/boostorg/release/1.70.0/source/
$(package)_file_name=boost_$($(package)_version).tar.bz2
$(package)_sha256_hash=430ae8354789de4fd19ee52f3b1f739e1fba576f0aded0897c3c2bc00fb38778
+$(package)_patches=unused_var_in_process.patch
define $(package)_set_vars
$(package)_config_opts_release=variant=release
@@ -32,6 +33,7 @@ $(package)_cxxflags_android=-fPIC
endef
define $(package)_preprocess_cmds
+ patch -p1 < $($(package)_patch_dir)/unused_var_in_process.patch && \
echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
endef
diff --git a/depends/packages/fontconfig.mk b/depends/packages/fontconfig.mk
index 128599ba77..0d5f94f380 100644
--- a/depends/packages/fontconfig.mk
+++ b/depends/packages/fontconfig.mk
@@ -4,23 +4,23 @@ $(package)_download_path=https://www.freedesktop.org/software/fontconfig/release
$(package)_file_name=$(package)-$($(package)_version).tar.bz2
$(package)_sha256_hash=b449a3e10c47e1d1c7a6ec6e2016cca73d3bd68fbbd4f0ae5cc6b573f7d6c7f3
$(package)_dependencies=freetype expat
+$(package)_patches=remove_char_width_usage.patch gperf_header_regen.patch
define $(package)_set_vars
$(package)_config_opts=--disable-docs --disable-static --disable-libxml2 --disable-iconv
$(package)_config_opts += --disable-dependency-tracking --enable-option-checking
endef
+define $(package)_preprocess_cmds
+ patch -p1 < $($(package)_patch_dir)/remove_char_width_usage.patch && \
+ patch -p1 < $($(package)_patch_dir)/gperf_header_regen.patch
+endef
+
define $(package)_config_cmds
$($(package)_autoconf)
endef
-# 2.12.1 uses CHAR_WIDTH which is reserved and clashes with some glibc versions, but newer versions of fontconfig
-# have broken makefiles which needlessly attempt to re-generate headers with gperf.
-# Instead, change all uses of CHAR_WIDTH, and disable the rule that forces header re-generation.
-# This can be removed once the upstream build is fixed.
define $(package)_build_cmds
- sed -i 's/CHAR_WIDTH/CHARWIDTH/g' fontconfig/fontconfig.h src/fcobjshash.gperf src/fcobjs.h src/fcobjshash.h && \
- sed -i 's/fcobjshash.h: fcobjshash.gperf/fcobjshash.h:/' src/Makefile && \
$(MAKE)
endef
diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk
index fdbe22cda6..49a584e462 100644
--- a/depends/packages/miniupnpc.mk
+++ b/depends/packages/miniupnpc.mk
@@ -3,6 +3,7 @@ $(package)_version=2.0.20180203
$(package)_download_path=https://miniupnp.tuxfamily.org/files/
$(package)_file_name=$(package)-$($(package)_version).tar.gz
$(package)_sha256_hash=90dda8c7563ca6cd4a83e23b3c66dbbea89603a1675bfdb852897c2c9cc220b7
+$(package)_patches=dont_use_wingen.patch
define $(package)_set_vars
$(package)_build_opts=CC="$($(package)_cc)"
@@ -14,7 +15,7 @@ endef
define $(package)_preprocess_cmds
mkdir dll && \
sed -e 's|MINIUPNPC_VERSION_STRING \"version\"|MINIUPNPC_VERSION_STRING \"$($(package)_version)\"|' -e 's|OS/version|$(host)|' miniupnpcstrings.h.in > miniupnpcstrings.h && \
- sed -i.old "s|miniupnpcstrings.h: miniupnpcstrings.h.in wingenminiupnpcstrings|miniupnpcstrings.h: miniupnpcstrings.h.in|" Makefile.mingw
+ patch -p1 < $($(package)_patch_dir)/dont_use_wingen.patch
endef
define $(package)_build_cmds
diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk
index 072d3828a6..d56b636695 100644
--- a/depends/packages/native_cctools.mk
+++ b/depends/packages/native_cctools.mk
@@ -1,9 +1,11 @@
package=native_cctools
-$(package)_version=4da2f3b485bcf4cef526f30c0b8c0bcda99cdbb4
+$(package)_version=55562e4073dea0fbfd0b20e0bf69ffe6390c7f97
$(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=a2d491c0981cef72fee2b833598f20f42a6c44a7614a61c439bda93d56446fec
+$(package)_sha256_hash=e51995a843533a3dac155dd0c71362dd471597a2d23f13dff194c6285362f875
$(package)_build_subdir=cctools
+$(package)_patches=ld64_disable_threading.patch
+
ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),)
$(package)_clang_version=8.0.0
$(package)_clang_download_path=https://releases.llvm.org/$($(package)_clang_version)
@@ -63,9 +65,10 @@ endef
endif
define $(package)_set_vars
- $(package)_config_opts=--target=$(host) --disable-lto-support --with-libtapi=$($(package)_extract_dir)
+ $(package)_config_opts=--target=$(host) --with-libtapi=$($(package)_extract_dir)
$(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib
ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),)
+ $(package)_config_opts+=--enable-lto-support --with-llvm-config=$($(package)_extract_dir)/toolchain/bin/llvm-config
$(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang
$(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++
else
@@ -77,7 +80,7 @@ endef
define $(package)_preprocess_cmds
CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/build.sh && \
CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/install.sh && \
- sed -i.old "/define HAVE_PTHREADS/d" $($(package)_build_subdir)/ld64/src/ld/InputFiles.h
+ patch -p1 < $($(package)_patch_dir)/ld64_disable_threading.patch
endef
define $(package)_config_cmds
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 500881e442..f560099b6a 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -8,7 +8,10 @@ $(package)_dependencies=zlib
$(package)_linux_dependencies=freetype fontconfig libxcb
$(package)_build_subdir=qtbase
$(package)_qt_libs=corelib network widgets gui plugins testlib
-$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_configure_mac.patch fix_no_printer.patch fix_rcc_determinism.patch fix_riscv64_arch.patch xkb-default.patch no-xlib.patch fix_android_qmake_conf.patch fix_android_jni_static.patch
+$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_configure_mac.patch fix_no_printer.patch
+$(package)_patches+= fix_rcc_determinism.patch fix_riscv64_arch.patch xkb-default.patch no-xlib.patch
+$(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch dont_hardcode_pwd.patch
+$(package)_patches+= freetype_back_compat.patch drop_lrelease_dependency.patch
# Update OSX_QT_TRANSLATIONS when this is updated
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
@@ -190,11 +193,10 @@ define $(package)_extract_cmds
endef
define $(package)_preprocess_cmds
- sed -i.old "s|FT_Get_Font_Format|FT_Get_X11_Font_Format|" qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp && \
+ patch -p1 -i $($(package)_patch_dir)/freetype_back_compat.patch && \
sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \
- sed -i.old "/updateqm.depends =/d" qttranslations/translations/translations.pro && \
- sed -i.old "s/src_plugins.depends = src_sql src_network/src_plugins.depends = src_network/" qtbase/src/src.pro && \
- sed -i.old -e 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' -e 's|/bin/pwd|pwd|' qtbase/configure && \
+ patch -p1 -i $($(package)_patch_dir)/drop_lrelease_dependency.patch && \
+ patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch &&\
mkdir -p qtbase/mkspecs/macx-clang-linux &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.lib qtbase/mkspecs/macx-clang-linux/ &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 6f35ede248..c93aa1a74d 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -3,7 +3,7 @@ $(package)_version=4.3.1
$(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/
$(package)_file_name=$(package)-$($(package)_version).tar.gz
$(package)_sha256_hash=bcbabe1e2c7d0eec4ed612e10b94b112dd5f06fcefa994a0c79a45d835cd21eb
-$(package)_patches=0001-fix-build-with-older-mingw64.patch 0002-disable-pthread_set_name_np.patch
+$(package)_patches=remove_libstd_link.patch
define $(package)_set_vars
$(package)_config_opts=--without-docs --disable-shared --disable-curve --disable-curve-keygen --disable-perf
@@ -16,9 +16,8 @@ define $(package)_set_vars
endef
define $(package)_preprocess_cmds
- patch -p1 < $($(package)_patch_dir)/0001-fix-build-with-older-mingw64.patch && \
- patch -p1 < $($(package)_patch_dir)/0002-disable-pthread_set_name_np.patch && \
- cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config
+ patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config
endef
define $(package)_config_cmds
@@ -34,6 +33,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- sed -i.old "s/ -lstdc++//" lib/pkgconfig/libzmq.pc && \
rm -rf bin share lib/*.la
endef
diff --git a/depends/patches/bdb/clang_cxx_11.patch b/depends/patches/bdb/clang_cxx_11.patch
new file mode 100644
index 0000000000..58f7ddc7d5
--- /dev/null
+++ b/depends/patches/bdb/clang_cxx_11.patch
@@ -0,0 +1,147 @@
+commit 3311d68f11d1697565401eee6efc85c34f022ea7
+Author: fanquake <fanquake@gmail.com>
+Date: Mon Aug 17 20:03:56 2020 +0800
+
+ Fix C++11 compatibility
+
+diff --git a/dbinc/atomic.h b/dbinc/atomic.h
+index 0034dcc..7c11d4a 100644
+--- a/dbinc/atomic.h
++++ b/dbinc/atomic.h
+@@ -70,7 +70,7 @@ typedef struct {
+ * These have no memory barriers; the caller must include them when necessary.
+ */
+ #define atomic_read(p) ((p)->value)
+-#define atomic_init(p, val) ((p)->value = (val))
++#define atomic_init_db(p, val) ((p)->value = (val))
+
+ #ifdef HAVE_ATOMIC_SUPPORT
+
+@@ -144,7 +144,7 @@ typedef LONG volatile *interlocked_val;
+ #define atomic_inc(env, p) __atomic_inc(p)
+ #define atomic_dec(env, p) __atomic_dec(p)
+ #define atomic_compare_exchange(env, p, o, n) \
+- __atomic_compare_exchange((p), (o), (n))
++ __atomic_compare_exchange_db((p), (o), (n))
+ static inline int __atomic_inc(db_atomic_t *p)
+ {
+ int temp;
+@@ -176,7 +176,7 @@ static inline int __atomic_dec(db_atomic_t *p)
+ * http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
+ * which configure could be changed to use.
+ */
+-static inline int __atomic_compare_exchange(
++static inline int __atomic_compare_exchange_db(
+ db_atomic_t *p, atomic_value_t oldval, atomic_value_t newval)
+ {
+ atomic_value_t was;
+@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange(
+ #define atomic_dec(env, p) (--(p)->value)
+ #define atomic_compare_exchange(env, p, oldval, newval) \
+ (DB_ASSERT(env, atomic_read(p) == (oldval)), \
+- atomic_init(p, (newval)), 1)
++ atomic_init_db(p, (newval)), 1)
+ #else
+ #define atomic_inc(env, p) __atomic_inc(env, p)
+ #define atomic_dec(env, p) __atomic_dec(env, p)
+diff --git a/mp/mp_fget.c b/mp/mp_fget.c
+index 5fdee5a..0b75f57 100644
+--- a/mp/mp_fget.c
++++ b/mp/mp_fget.c
+@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */
+
+ /* Initialize enough so we can call __memp_bhfree. */
+ alloc_bhp->flags = 0;
+- atomic_init(&alloc_bhp->ref, 1);
++ atomic_init_db(&alloc_bhp->ref, 1);
+ #ifdef DIAGNOSTIC
+ if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
+ __db_errx(env,
+@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */
+ MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize,
+ PROT_READ);
+
+- atomic_init(&alloc_bhp->ref, 1);
++ atomic_init_db(&alloc_bhp->ref, 1);
+ MUTEX_LOCK(env, alloc_bhp->mtx_buf);
+ alloc_bhp->priority = bhp->priority;
+ alloc_bhp->pgno = bhp->pgno;
+diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c
+index 34467d2..f05aa0c 100644
+--- a/mp/mp_mvcc.c
++++ b/mp/mp_mvcc.c
+@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp)
+ #else
+ memcpy(frozen_bhp, bhp, SSZA(BH, buf));
+ #endif
+- atomic_init(&frozen_bhp->ref, 0);
++ atomic_init_db(&frozen_bhp->ref, 0);
+ if (mutex != MUTEX_INVALID)
+ frozen_bhp->mtx_buf = mutex;
+ else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH,
+@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp)
+ #endif
+ alloc_bhp->mtx_buf = mutex;
+ MUTEX_LOCK(env, alloc_bhp->mtx_buf);
+- atomic_init(&alloc_bhp->ref, 1);
++ atomic_init_db(&alloc_bhp->ref, 1);
+ F_CLR(alloc_bhp, BH_FROZEN);
+ }
+
+diff --git a/mp/mp_region.c b/mp/mp_region.c
+index e6cece9..ddbe906 100644
+--- a/mp/mp_region.c
++++ b/mp/mp_region.c
+@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
+ MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0)
+ return (ret);
+ SH_TAILQ_INIT(&htab[i].hash_bucket);
+- atomic_init(&htab[i].hash_page_dirty, 0);
++ atomic_init_db(&htab[i].hash_page_dirty, 0);
+ }
+
+ /*
+@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
+ hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID :
+ mtx_base + i;
+ SH_TAILQ_INIT(&hp->hash_bucket);
+- atomic_init(&hp->hash_page_dirty, 0);
++ atomic_init_db(&hp->hash_page_dirty, 0);
+ #ifdef HAVE_STATISTICS
+ hp->hash_io_wait = 0;
+ hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0;
+diff --git a/mutex/mut_method.c b/mutex/mut_method.c
+index 2588763..5c6d516 100644
+--- a/mutex/mut_method.c
++++ b/mutex/mut_method.c
+@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval)
+ MUTEX_LOCK(env, mtx);
+ ret = atomic_read(v) == oldval;
+ if (ret)
+- atomic_init(v, newval);
++ atomic_init_db(v, newval);
+ MUTEX_UNLOCK(env, mtx);
+
+ return (ret);
+diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c
+index f3922e0..e40fcdf 100644
+--- a/mutex/mut_tas.c
++++ b/mutex/mut_tas.c
+@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags)
+
+ #ifdef HAVE_SHARED_LATCHES
+ if (F_ISSET(mutexp, DB_MUTEX_SHARED))
+- atomic_init(&mutexp->sharecount, 0);
++ atomic_init_db(&mutexp->sharecount, 0);
+ else
+ #endif
+ if (MUTEX_INIT(&mutexp->tas)) {
+@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex)
+ F_CLR(mutexp, DB_MUTEX_LOCKED);
+ /* Flush flag update before zeroing count */
+ MEMBAR_EXIT();
+- atomic_init(&mutexp->sharecount, 0);
++ atomic_init_db(&mutexp->sharecount, 0);
+ } else {
+ DB_ASSERT(env, sharecount > 0);
+ MEMBAR_EXIT();
diff --git a/depends/patches/boost/unused_var_in_process.patch b/depends/patches/boost/unused_var_in_process.patch
new file mode 100644
index 0000000000..722f7bb5ea
--- /dev/null
+++ b/depends/patches/boost/unused_var_in_process.patch
@@ -0,0 +1,22 @@
+commit dbd95cdaefdea95307d004f019a1c394cf9389f0
+Author: fanquake <fanquake@gmail.com>
+Date: Mon Aug 17 20:15:17 2020 +0800
+
+ Remove unused variable in Boost Process
+
+ This causes issues with our linters / CI.
+
+ Can be removed once depends Boost is 1.71.0 or later.
+
+diff --git a/boost/process/detail/posix/wait_group.hpp b/boost/process/detail/posix/wait_group.hpp
+index 9dc249803..2502d9772 100644
+--- a/boost/process/detail/posix/wait_group.hpp
++++ b/boost/process/detail/posix/wait_group.hpp
+@@ -137,7 +137,6 @@ inline bool wait_until(
+
+ do
+ {
+- int ret_sig = 0;
+ int status;
+ if ((::waitpid(timeout_pid, &status, WNOHANG) != 0)
+ && (WIFEXITED(status) || WIFSIGNALED(status)))
diff --git a/depends/patches/fontconfig/gperf_header_regen.patch b/depends/patches/fontconfig/gperf_header_regen.patch
new file mode 100644
index 0000000000..7401b83d84
--- /dev/null
+++ b/depends/patches/fontconfig/gperf_header_regen.patch
@@ -0,0 +1,24 @@
+commit 7b6eb33ecd88768b28c67ce5d2d68a7eed5936b6
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 25 14:34:53 2020 +0800
+
+ Remove rule that causes inadvertant header regeneration
+
+ Otherwise the makefile will needlessly attempt to re-generate the
+ headers with gperf. This can be dropped once the upstream build is fixed.
+
+ See #10851.
+
+diff --git a/src/Makefile.in b/src/Makefile.in
+index f4626ad..4ae1b00 100644
+--- a/src/Makefile.in
++++ b/src/Makefile.in
+@@ -903,7 +903,7 @@ fcobjshash.gperf: fcobjshash.gperf.h fcobjs.h
+ ' - > $@.tmp && \
+ mv -f $@.tmp $@ || ( $(RM) $@.tmp && false )
+
+-fcobjshash.h: fcobjshash.gperf
++fcobjshash.h:
+ $(AM_V_GEN) $(GPERF) -m 100 $< > $@.tmp && \
+ mv -f $@.tmp $@ || ( $(RM) $@.tmp && false )
+
diff --git a/depends/patches/fontconfig/remove_char_width_usage.patch b/depends/patches/fontconfig/remove_char_width_usage.patch
new file mode 100644
index 0000000000..9f69081890
--- /dev/null
+++ b/depends/patches/fontconfig/remove_char_width_usage.patch
@@ -0,0 +1,62 @@
+commit 28165a9b078583dc8e9e5c344510e37582284cef
+Author: fanquake <fanquake@gmail.com>
+Date: Mon Aug 17 20:35:42 2020 +0800
+
+ Remove usage of CHAR_WIDTH
+
+ CHAR_WIDTH which is reserved and clashes with glibc 2.25+
+
+ See #10851.
+
+diff --git a/fontconfig/fontconfig.h b/fontconfig/fontconfig.h
+index 5c72b22..843c532 100644
+--- a/fontconfig/fontconfig.h
++++ b/fontconfig/fontconfig.h
+@@ -128,7 +128,7 @@ typedef int FcBool;
+ #define FC_USER_CACHE_FILE ".fonts.cache-" FC_CACHE_VERSION
+
+ /* Adjust outline rasterizer */
+-#define FC_CHAR_WIDTH "charwidth" /* Int */
++#define FC_CHARWIDTH "charwidth" /* Int */
+ #define FC_CHAR_HEIGHT "charheight"/* Int */
+ #define FC_MATRIX "matrix" /* FcMatrix */
+
+diff --git a/src/fcobjs.h b/src/fcobjs.h
+index 1fc4f65..d27864b 100644
+--- a/src/fcobjs.h
++++ b/src/fcobjs.h
+@@ -51,7 +51,7 @@ FC_OBJECT (DPI, FcTypeDouble, NULL)
+ FC_OBJECT (RGBA, FcTypeInteger, NULL)
+ FC_OBJECT (SCALE, FcTypeDouble, NULL)
+ FC_OBJECT (MINSPACE, FcTypeBool, NULL)
+-FC_OBJECT (CHAR_WIDTH, FcTypeInteger, NULL)
++FC_OBJECT (CHARWIDTH, FcTypeInteger, NULL)
+ FC_OBJECT (CHAR_HEIGHT, FcTypeInteger, NULL)
+ FC_OBJECT (MATRIX, FcTypeMatrix, NULL)
+ FC_OBJECT (CHARSET, FcTypeCharSet, FcCompareCharSet)
+diff --git a/src/fcobjshash.gperf b/src/fcobjshash.gperf
+index 80a0237..eb4ad84 100644
+--- a/src/fcobjshash.gperf
++++ b/src/fcobjshash.gperf
+@@ -44,7 +44,7 @@ int id;
+ "rgba",FC_RGBA_OBJECT
+ "scale",FC_SCALE_OBJECT
+ "minspace",FC_MINSPACE_OBJECT
+-"charwidth",FC_CHAR_WIDTH_OBJECT
++"charwidth",FC_CHARWIDTH_OBJECT
+ "charheight",FC_CHAR_HEIGHT_OBJECT
+ "matrix",FC_MATRIX_OBJECT
+ "charset",FC_CHARSET_OBJECT
+diff --git a/src/fcobjshash.h b/src/fcobjshash.h
+index 5a4d1ea..4e66bb0 100644
+--- a/src/fcobjshash.h
++++ b/src/fcobjshash.h
+@@ -284,7 +284,7 @@ FcObjectTypeLookup (register const char *str, register unsigned int len)
+ {(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str43,FC_CHARSET_OBJECT},
+ {-1},
+ #line 47 "fcobjshash.gperf"
+- {(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str45,FC_CHAR_WIDTH_OBJECT},
++ {(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str45,FC_CHARWIDTH_OBJECT},
+ #line 48 "fcobjshash.gperf"
+ {(int)(long)&((struct FcObjectTypeNamePool_t *)0)->FcObjectTypeNamePool_str46,FC_CHAR_HEIGHT_OBJECT},
+ #line 55 "fcobjshash.gperf"
diff --git a/depends/patches/miniupnpc/dont_use_wingen.patch b/depends/patches/miniupnpc/dont_use_wingen.patch
new file mode 100644
index 0000000000..a1cc9b50d1
--- /dev/null
+++ b/depends/patches/miniupnpc/dont_use_wingen.patch
@@ -0,0 +1,26 @@
+commit e8077044df239bcf0d9e9980b0e1afb9f1f5c446
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 20:50:19 2020 +0800
+
+ Don't use wingenminiupnpcstrings when generating miniupnpcstrings.h
+
+ The wingenminiupnpcstrings tool is used on Windows to generate version
+ information. This information is irrelevant for us, and trying to use
+ wingenminiupnpcstrings would cause builds to fail, so just don't use it.
+
+ We should be able to drop this once we are using 2.1 or later. See
+ upstream commit: 9663c55c61408fdcc39a82987d2243f816b22932.
+
+diff --git a/Makefile.mingw b/Makefile.mingw
+index 574720e..fcc17bb 100644
+--- a/Makefile.mingw
++++ b/Makefile.mingw
+@@ -74,7 +74,7 @@ wingenminiupnpcstrings: wingenminiupnpcstrings.o
+
+ wingenminiupnpcstrings.o: wingenminiupnpcstrings.c
+
+-miniupnpcstrings.h: miniupnpcstrings.h.in wingenminiupnpcstrings
++miniupnpcstrings.h: miniupnpcstrings.h.in
+ wingenminiupnpcstrings $< $@
+
+ minixml.o: minixml.c minixml.h
diff --git a/depends/patches/native_cctools/ld64_disable_threading.patch b/depends/patches/native_cctools/ld64_disable_threading.patch
new file mode 100644
index 0000000000..d6c58c102f
--- /dev/null
+++ b/depends/patches/native_cctools/ld64_disable_threading.patch
@@ -0,0 +1,26 @@
+commit 584668415039adeed073decee7e04de28248afd3
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 01:20:24 2020 +0000
+
+ Disable threading to fix non-determinism
+
+ A bug in the file parser can cause dependencies to be calculated
+ differently based on which files have already been parsed. This is more
+ likely to occur on systems with more CPUs.
+
+ Just disable threading for now. There is no noticable slowdown.
+
+ See #9891.
+
+diff --git a/cctools/ld64/src/ld/InputFiles.h b/cctools/ld64/src/ld/InputFiles.h
+index ef9c756..90a70b6 100644
+--- a/cctools/ld64/src/ld/InputFiles.h
++++ b/cctools/ld64/src/ld/InputFiles.h
+@@ -25,7 +25,6 @@
+ #ifndef __INPUT_FILES_H__
+ #define __INPUT_FILES_H__
+
+-#define HAVE_PTHREADS 1
+
+ #include <stdlib.h>
+ #include <sys/types.h>
diff --git a/depends/patches/qt/dont_hardcode_pwd.patch b/depends/patches/qt/dont_hardcode_pwd.patch
new file mode 100644
index 0000000000..a74e9cb098
--- /dev/null
+++ b/depends/patches/qt/dont_hardcode_pwd.patch
@@ -0,0 +1,27 @@
+commit 0e953866fc4672486e29e1ba6d83b4207e7b2f0b
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 15:09:06 2020 +0800
+
+ Don't hardcode pwd path
+
+ Let a man use his builtins if he wants to! Also, removes the unnecessary
+ assumption that pwd lives under /bin/pwd.
+
+ See #15581.
+
+diff --git a/qtbase/configure b/qtbase/configure
+index 08b49a8d..faea5b55 100755
+--- a/qtbase/configure
++++ b/qtbase/configure
+@@ -36,9 +36,9 @@
+ relconf=`basename $0`
+ # the directory of this script is the "source tree"
+ relpath=`dirname $0`
+-relpath=`(cd "$relpath"; /bin/pwd)`
++relpath=`(cd "$relpath"; pwd)`
+ # the current directory is the "build tree" or "object tree"
+-outpath=`/bin/pwd`
++outpath=`pwd`
+
+ WHICH="which"
+
diff --git a/depends/patches/qt/drop_lrelease_dependency.patch b/depends/patches/qt/drop_lrelease_dependency.patch
new file mode 100644
index 0000000000..f6b2c9fc80
--- /dev/null
+++ b/depends/patches/qt/drop_lrelease_dependency.patch
@@ -0,0 +1,20 @@
+commit 67b3ed7406e1d0762188dbad2c44a06824ba0778
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 15:24:01 2020 +0800
+
+ Drop dependency on lrelease
+
+ Qts buildsystem insists on using the installed lrelease, but gets
+ confused about how to find it. Since we manually control the build
+ order, just drop the dependency.
+
+ See #9469
+
+diff --git a/qttranslations/translations/translations.pro b/qttranslations/translations/translations.pro
+index 694544c..eff339d 100644
+--- a/qttranslations/translations/translations.pro
++++ b/qttranslations/translations/translations.pro
+@@ -109,3 +109,2 @@ updateqm.commands = $$LRELEASE ${QMAKE_FILE_IN} -qm ${QMAKE_FILE_OUT}
+ silent:updateqm.commands = @echo lrelease ${QMAKE_FILE_IN} && $$updateqm.commands
+-updateqm.depends = $$LRELEASE_EXE
+ updateqm.name = LRELEASE ${QMAKE_FILE_IN}
diff --git a/depends/patches/qt/freetype_back_compat.patch b/depends/patches/qt/freetype_back_compat.patch
new file mode 100644
index 0000000000..b0f1c98aa6
--- /dev/null
+++ b/depends/patches/qt/freetype_back_compat.patch
@@ -0,0 +1,28 @@
+commit 14bc77db61bf9d56f9b6c8b84aa02573605c19c6
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 15:15:08 2020 +0800
+
+ Fix backwards compatibility with older Freetype versions at runtime
+
+ A few years ago, libfreetype introduced FT_Get_Font_Format() as an alias
+ for FT_Get_X11_Font_Format(), but FT_Get_X11_Font_Format() was kept for abi
+ backwards-compatibility.
+
+ Qt 5.9 introduced a call to FT_Get_Font_Format(). Replace it with FT_Get_X11_Font_Format()
+ in order to remain compatibile with older freetype, which is still used by e.g. Ubuntu Trusty.
+
+ See #14348.
+
+diff --git a/qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp b/qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp
+index 3f543755..8ecc1c8c 100644
+--- a/qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp
++++ b/qtbase/src/platformsupport/fontdatabases/freetype/qfontengine_ft.cpp
+@@ -898,7 +898,7 @@ bool QFontEngineFT::init(FaceId faceId, bool antialias, GlyphFormat format,
+ }
+ }
+ #if defined(FT_FONT_FORMATS_H)
+- const char *fmt = FT_Get_Font_Format(face);
++ const char *fmt = FT_Get_X11_Font_Format(face);
+ if (fmt && qstrncmp(fmt, "CFF", 4) == 0) {
+ FT_Bool no_stem_darkening = true;
+ FT_Error err = FT_Property_Get(qt_getFreetype(), "cff", "no-stem-darkening", &no_stem_darkening);
diff --git a/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch b/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch
deleted file mode 100644
index b911ac5672..0000000000
--- a/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From f6866b0f166ad168618aae64c7fbee8775d3eb23 Mon Sep 17 00:00:00 2001
-From: mruddy <6440430+mruddy@users.noreply.github.com>
-Date: Sat, 30 Jun 2018 09:44:58 -0400
-Subject: [PATCH] fix build with older mingw64
-
----
- src/windows.hpp | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/src/windows.hpp b/src/windows.hpp
-index 6c3839fd..2c32ec79 100644
---- a/src/windows.hpp
-+++ b/src/windows.hpp
-@@ -58,6 +58,13 @@
- #include <winsock2.h>
- #include <windows.h>
- #include <mswsock.h>
-+
-+#if defined __MINGW64_VERSION_MAJOR && __MINGW64_VERSION_MAJOR < 4
-+// Workaround for mingw-w64 < v4.0 which did not include ws2ipdef.h in iphlpapi.h.
-+// Fixed in mingw-w64 by 9bd8fe9148924840d315b4c915dd099955ea89d1.
-+#include <ws2def.h>
-+#include <ws2ipdef.h>
-+#endif
- #include <iphlpapi.h>
-
- #if !defined __MINGW32__
---
-2.17.1
-
diff --git a/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch b/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch
deleted file mode 100644
index b1c6f78a70..0000000000
--- a/depends/patches/zeromq/0002-disable-pthread_set_name_np.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From c9bbdd6581d07acfe8971e4bcebe278a3676cf03 Mon Sep 17 00:00:00 2001
-From: mruddy <6440430+mruddy@users.noreply.github.com>
-Date: Sat, 30 Jun 2018 09:57:18 -0400
-Subject: [PATCH] disable pthread_set_name_np
-
-pthread_set_name_np adds a Glibc requirement on >= 2.12.
----
- src/thread.cpp | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/src/thread.cpp b/src/thread.cpp
-index a1086b0c..9943f354 100644
---- a/src/thread.cpp
-+++ b/src/thread.cpp
-@@ -308,7 +308,7 @@ void zmq::thread_t::setThreadName (const char *name_)
- */
- if (!name_)
- return;
--
-+#if 0
- #if defined(ZMQ_HAVE_PTHREAD_SETNAME_1)
- int rc = pthread_setname_np (name_);
- if (rc)
-@@ -324,6 +324,8 @@ void zmq::thread_t::setThreadName (const char *name_)
- #elif defined(ZMQ_HAVE_PTHREAD_SET_NAME)
- pthread_set_name_np (_descriptor, name_);
- #endif
-+#endif
-+ return;
- }
-
- #endif
---
-2.17.1
-
diff --git a/depends/patches/zeromq/remove_libstd_link.patch b/depends/patches/zeromq/remove_libstd_link.patch
new file mode 100644
index 0000000000..ddf91e6abf
--- /dev/null
+++ b/depends/patches/zeromq/remove_libstd_link.patch
@@ -0,0 +1,25 @@
+commit 47d4cd12a2c051815ddda78adebdb3923b260d8a
+Author: fanquake <fanquake@gmail.com>
+Date: Tue Aug 18 14:45:40 2020 +0800
+
+ Remove needless linking against libstdc++
+
+ This is broken for a number of reasons, including:
+ - g++ understands "static-libstdc++ -lstdc++" to mean "link against
+ whatever libstdc++ exists, probably shared", which in itself is buggy.
+ - another stdlib (libc++ for example) may be in use
+
+ See #11981.
+
+diff --git a/src/libzmq.pc.in b/src/libzmq.pc.in
+index 233bc3a..3c2bf0d 100644
+--- a/src/libzmq.pc.in
++++ b/src/libzmq.pc.in
+@@ -7,6 +7,6 @@ Name: libzmq
+ Description: 0MQ c++ library
+ Version: @VERSION@
+ Libs: -L${libdir} -lzmq
+-Libs.private: -lstdc++ @pkg_config_libs_private@
++Libs.private: @pkg_config_libs_private@
+ Requires.private: @pkg_config_names_private@
+ Cflags: -I${includedir} @pkg_config_defines@
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 7e307ab7c8..2f79168212 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -2073,7 +2073,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-PREDEFINED =
+PREDEFINED = HAVE_BOOST_PROCESS
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md
index a0cfe84a3e..40d8e330e2 100644
--- a/doc/JSON-RPC-interface.md
+++ b/doc/JSON-RPC-interface.md
@@ -60,7 +60,7 @@ RPC interface will be abused.
are sent as clear text that can be read by anyone on your network
path. Additionally, the RPC interface has not been hardened to
withstand arbitrary Internet traffic, so changing the above settings
- to expose it to the Internet (even using something like a Tor hidden
+ to expose it to the Internet (even using something like a Tor onion
service) could expose you to unconsidered vulnerabilities. See
`bitcoind -help` for more information about these settings and other
settings described in this document.
diff --git a/doc/benchmarking.md b/doc/benchmarking.md
index b1a06009b5..b6cd86eafe 100644
--- a/doc/benchmarking.md
+++ b/doc/benchmarking.md
@@ -19,8 +19,10 @@ After compiling bitcoin-core, the benchmarks can be run with:
The output will look similar to:
```
-# Benchmark, evals, iterations, total, min, max, median
-AssembleBlock, 5, 700, 1.79954, 0.000510913, 0.000517018, 0.000514497
+| ns/byte | byte/s | error % | benchmark
+|--------------------:|--------------------:|--------:|:----------------------------------------------
+| 64.13 | 15,592,356.01 | 0.1% | `Base58CheckEncode`
+| 24.56 | 40,722,672.68 | 0.2% | `Base58Decode`
...
```
diff --git a/doc/bips.md b/doc/bips.md
index b96862297f..456fea7a5a 100644
--- a/doc/bips.md
+++ b/doc/bips.md
@@ -1,4 +1,4 @@
-BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**):
+BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**):
* [`BIP 9`](https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki): The changes allowing multiple soft-forks to be deployed in parallel have been implemented since **v0.12.1** ([PR #7575](https://github.com/bitcoin/bitcoin/pull/7575))
* [`BIP 11`](https://github.com/bitcoin/bips/blob/master/bip-0011.mediawiki): Multisig outputs are standard since **v0.6.0** ([PR #669](https://github.com/bitcoin/bitcoin/pull/669)).
@@ -42,3 +42,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**):
* [`BIP 173`](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki): Bech32 addresses for native Segregated Witness outputs are supported as of **v0.16.0** ([PR 11167](https://github.com/bitcoin/bitcoin/pull/11167)). Bech32 addresses are generated by default as of **v0.20.0** ([PR 16884](https://github.com/bitcoin/bitcoin/pull/16884)).
* [`BIP 174`](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki): RPCs to operate on Partially Signed Bitcoin Transactions (PSBT) are present as of **v0.17.0** ([PR 13557](https://github.com/bitcoin/bitcoin/pull/13557)).
* [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)).
+* [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)).
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index 53c647ae34..584ee43d48 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -1,6 +1,6 @@
OpenBSD build guide
======================
-(updated for OpenBSD 6.4)
+(updated for OpenBSD 6.7)
This guide describes how to build bitcoind and command-line utilities on OpenBSD.
@@ -15,7 +15,7 @@ Run the following as root to install the base dependencies for building:
pkg_add git gmake libevent libtool boost
pkg_add autoconf # (select highest version, e.g. 2.69)
pkg_add automake # (select highest version, e.g. 1.16)
-pkg_add python # (select highest version, e.g. 3.6)
+pkg_add python # (select highest version, e.g. 3.8)
git clone https://github.com/bitcoin/bitcoin.git
```
@@ -23,10 +23,10 @@ git clone https://github.com/bitcoin/bitcoin.git
See [dependencies.md](dependencies.md) for a complete overview.
**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is
-part of the base image, and while building it is necessary to make sure that this
-compiler is used and not ancient g++ 4.2.1. This is done by appending
-`CC=cc CXX=c++` to configuration commands. Mixing different compilers
-within the same executable will result in linker errors.
+part of the base image, and while building it is necessary to make sure that
+this compiler is used and not ancient g++ 4.2.1. This is done by appending
+`CC=cc CC_FOR_BUILD=cc CXX=c++` to configuration commands. Mixing different
+compilers within the same executable will result in errors.
### Building BerkeleyDB
@@ -77,7 +77,7 @@ To configure with wallet:
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake
+./configure --disable-wallet --with-gui=no CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake
```
Build and run the tests:
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 0cb5311e8b..92dea65309 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -6,7 +6,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) |
| --- | --- | --- | --- | --- | --- |
| Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | |
-| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.47.0](https://github.com/bitcoin/bitcoin/pull/8920) | No | | |
+| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | |
| Clang | | [3.3+](https://releases.llvm.org/download.html) (C++11 support) | | | |
| Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | |
| fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | |
diff --git a/doc/files.md b/doc/files.md
index 5475826329..52e094a60b 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -56,7 +56,7 @@ Subdirectory | File(s) | Description
`./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation
`./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used
`./` | `mempool.dat` | Dump of the mempool's transactions
-`./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option
+`./` | `onion_private_key` | Cached Tor onion service private key for `-listenonion` option
`./` | `peers.dat` | Peer IP address database (custom format)
`./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option
`./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option
diff --git a/doc/release-notes-14582.md b/doc/release-notes-14582.md
new file mode 100644
index 0000000000..28b0abecd7
--- /dev/null
+++ b/doc/release-notes-14582.md
@@ -0,0 +1,14 @@
+Configuration
+-------------
+
+A new configuration flag `-maxapsfee` has been added, which sets the max allowed
+avoid partial spends (APS) fee. It defaults to 0 (i.e. fee is the same with
+and without APS). Setting it to -1 will disable APS, unless `-avoidpartialspends`
+is set. (#14582)
+
+Wallet
+------
+
+The wallet will now avoid partial spends (APS) by default, if this does not result
+in a difference in fees compared to the non-APS variant. The allowed fee threshold
+can be adjusted using the new `-maxapsfee` configuration option. (#14582)
diff --git a/doc/release-notes-15937.md b/doc/release-notes-15937.md
new file mode 100644
index 0000000000..ec7d355dfa
--- /dev/null
+++ b/doc/release-notes-15937.md
@@ -0,0 +1,12 @@
+Configuration
+-------------
+
+The `createwallet`, `loadwallet`, and `unloadwallet` RPCs now accept
+`load_on_startup` options that modify bitcoin's dynamic configuration in
+`\<datadir\>/settings.json`, and can add or remove a wallet from the list of
+wallets automatically loaded at startup. Unless these options are explicitly
+set to true or false, the load on startup wallet list is not modified, so this
+change is backwards compatible.
+
+In the future, the GUI will start updating the same startup wallet list as the
+RPCs to automatically reopen wallets previously opened in the GUI.
diff --git a/doc/release-notes-19731.md b/doc/release-notes-19731.md
new file mode 100644
index 0000000000..abe38e06af
--- /dev/null
+++ b/doc/release-notes-19731.md
@@ -0,0 +1,6 @@
+Updated RPCs
+------------
+
+- The `getpeerinfo` RPC now has additional `last_block` and `last_transaction`
+ fields that return the UNIX epoch time of the last block and the last valid
+ transaction received from each peer. (#19731)
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 23983dcd7b..a8bd68370d 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -115,6 +115,10 @@ Changes to Wallet or GUI related RPCs can be found in the GUI or Wallet section
New RPCs
--------
+- The `getindexinfo` RPC returns the actively running indices of the node,
+ including their current sync status and height. It also accepts an `index_name`
+ to specify returning only the status of that index. (#19550)
+
Build System
------------
@@ -135,6 +139,10 @@ Updated settings
in future releases. Refer to the help of the affected settings `-whitebind`
and `-whitelist` for more details. (#19191)
+- Netmasks that contain 1-bits after 0-bits (the 1-bits are not contiguous on
+ the left side, e.g. 255.0.255.255) are no longer accepted. They are invalid
+ according to RFC 4632.
+
Changes to Wallet or GUI related settings can be found in the GUI or Wallet section below.
Tools and Utilities
diff --git a/doc/release-notes/release-notes-0.20.1.md b/doc/release-notes/release-notes-0.20.1.md
new file mode 100644
index 0000000000..9fbb29cb82
--- /dev/null
+++ b/doc/release-notes/release-notes-0.20.1.md
@@ -0,0 +1,158 @@
+0.20.1 Release Notes
+====================
+
+Bitcoin Core version 0.20.1 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-0.20.1/>
+
+This minor release includes various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes in some cases), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the data directory needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems
+using the Linux kernel, macOS 10.12+, and Windows 7 and newer. Bitcoin
+Core should also work on most other Unix-like systems but is not as
+frequently tested on them. It is not recommended to use Bitcoin Core on
+unsupported systems.
+
+From Bitcoin Core 0.20.0 onwards, macOS versions earlier than 10.12 are no
+longer supported. Additionally, Bitcoin Core does not yet change appearance
+when macOS "dark mode" is activated.
+
+Known Bugs
+==========
+
+The process for generating the source code release ("tarball") has changed in an
+effort to make it more complete, however, there are a few regressions in
+this release:
+
+- The generated `configure` script is currently missing, and you will need to
+ install autotools and run `./autogen.sh` before you can run
+ `./configure`. This is the same as when checking out from git.
+
+- Instead of running `make` simply, you should instead run
+ `BITCOIN_GENBUILD_NO_GIT=1 make`.
+
+Notable changes
+===============
+
+Changes regarding misbehaving peers
+-----------------------------------
+
+Peers that misbehave (e.g. send us invalid blocks) are now referred to as
+discouraged nodes in log output, as they're not (and weren't) strictly banned:
+incoming connections are still allowed from them, but they're preferred for
+eviction.
+
+Furthermore, a few additional changes are introduced to how discouraged
+addresses are treated:
+
+- Discouraging an address does not time out automatically after 24 hours
+ (or the `-bantime` setting). Depending on traffic from other peers,
+ discouragement may time out at an indeterminate time.
+
+- Discouragement is not persisted over restarts.
+
+- There is no method to list discouraged addresses. They are not returned by
+ the `listbanned` RPC. That RPC also no longer reports the `ban_reason`
+ field, as `"manually added"` is the only remaining option.
+
+- Discouragement cannot be removed with the `setban remove` RPC command.
+ If you need to remove a discouragement, you can remove all discouragements by
+ stop-starting your node.
+
+Notification changes
+--------------------
+
+`-walletnotify` notifications are now sent for wallet transactions that are
+removed from the mempool because they conflict with a new block. These
+notifications were sent previously before the v0.19 release, but had been
+broken since that release (bug
+[#18325](https://github.com/bitcoin/bitcoin/issues/18325)).
+
+PSBT changes
+------------
+
+PSBTs will contain both the non-witness utxo and the witness utxo for segwit
+inputs in order to restore compatibility with wallet software that are now
+requiring the full previous transaction for segwit inputs. The witness utxo
+is still provided to maintain compatibility with software which relied on its
+existence to determine whether an input was segwit.
+
+0.20.1 change log
+=================
+
+### Mining
+- #19019 Fix GBT: Restore "!segwit" and "csv" to "rules" key (luke-jr)
+
+### P2P protocol and network code
+- #19219 Replace automatic bans with discouragement filter (sipa)
+
+### Wallet
+- #19300 Handle concurrent wallet loading (promag)
+- #18982 Minimal fix to restore conflicted transaction notifications (ryanofsky)
+
+### RPC and other APIs
+- #19524 Increment input value sum only once per UTXO in decodepsbt (fanquake)
+- #19517 psbt: Increment input value sum only once per UTXO in decodepsbt (achow101)
+- #19215 psbt: Include and allow both non_witness_utxo and witness_utxo for segwit inputs (achow101)
+
+### GUI
+- #19097 Add missing QPainterPath include (achow101)
+- #19059 update Qt base translations for macOS release (fanquake)
+
+### Build system
+- #19152 improve build OS configure output (skmcontrib)
+- #19536 qt, build: Fix QFileDialog for static builds (hebasto)
+
+### Tests and QA
+- #19444 Remove cached directories and associated script blocks from appveyor config (sipsorcery)
+- #18640 appveyor: Remove clcache (MarcoFalke)
+
+### Miscellaneous
+- #19194 util: Don't reference errno when pthread fails (miztake)
+- #18700 Fix locking on WSL using flock instead of fcntl (meshcollider)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- Aaron Clauson
+- Andrew Chow
+- fanquake
+- Hennadii Stepanov
+- João Barbosa
+- Luke Dashjr
+- MarcoFalke
+- MIZUTA Takeshi
+- Pieter Wuille
+- Russell Yanofsky
+- sachinkm77
+- Samuel Dobson
+- Wladimir J. van der Laan
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/tor.md b/doc/tor.md
index 2c54e32f84..17807856e5 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -1,6 +1,6 @@
# TOR SUPPORT IN BITCOIN
-It is possible to run Bitcoin Core as a Tor hidden service, and connect to such services.
+It is possible to run Bitcoin Core as a Tor onion service, and connect to such services.
The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly
configure Tor.
@@ -14,12 +14,12 @@ outgoing connections, but more is possible.
-proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy
server will be used to try to reach .onion addresses as well.
- -onion=ip:port Set the proxy server to use for Tor hidden services. You do not
+ -onion=ip:port Set the proxy server to use for Tor onion services. You do not
need to set this if it's the same as -proxy. You can use -noonion
- to explicitly disable access to hidden services.
+ to explicitly disable access to onion services.
-listen When using -proxy, listening is disabled by default. If you want
- to run a hidden service (see next section), you'll need to enable
+ to run an onion service (see next section), you'll need to enable
it explicitly.
-connect=X When behind a Tor proxy, you can specify .onion addresses instead
@@ -94,11 +94,11 @@ for normal IPv4/IPv6 communication, use:
## 3. Automatically listen on Tor
Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket
-API, to create and destroy 'ephemeral' hidden services programmatically.
+API, to create and destroy 'ephemeral' onion services programmatically.
Bitcoin Core has been updated to make use of this.
This means that if Tor is running (and proper authentication has been configured),
-Bitcoin Core automatically creates a hidden service to listen on. This will positively
+Bitcoin Core automatically creates an onion service to listen on. This will positively
affect the number of available .onion nodes.
This new feature is enabled by default if Bitcoin Core is listening (`-listen`), and
@@ -110,7 +110,7 @@ Connecting to Tor's control socket API requires one of two authentication method
configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051`
in `torrc` config file. For cookie authentication the user running bitcoind must have read
access to the `CookieAuthFile` specified in Tor configuration. In some cases this is
-preconfigured and the creation of a hidden service is automatic. If permission problems
+preconfigured and the creation of an onion service is automatic. If permission problems
are seen with `-debug=tor` they can be resolved by adding both the user running Tor and
the user running bitcoind to the same group and setting permissions appropriately. On
Debian-based systems the user running bitcoind can be added to the debian-tor group,
@@ -127,8 +127,8 @@ in the tor configuration file. The hashed password can be obtained with the comm
## 4. Privacy recommendations
-- Do not add anything but Bitcoin Core ports to the hidden service created in section 2.
- If you run a web service too, create a new hidden service for that.
+- Do not add anything but Bitcoin Core ports to the onion service created in section 2.
+ If you run a web service too, create a new onion service for that.
Otherwise it is trivial to link them, which may reduce privacy. Hidden
services created automatically (as in section 3) always have only one port
open.
diff --git a/src/Makefile.am b/src/Makefile.am
index cd3cc95707..175501d4a6 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -140,6 +140,7 @@ BITCOIN_CORE_H = \
httpserver.h \
index/base.h \
index/blockfilterindex.h \
+ index/disktxpos.h \
index/txindex.h \
indirectmap.h \
init.h \
@@ -676,12 +677,18 @@ CLEANFILES = $(EXTRA_LIBRARIES)
CLEANFILES += *.gcda *.gcno
CLEANFILES += compat/*.gcda compat/*.gcno
CLEANFILES += consensus/*.gcda consensus/*.gcno
+CLEANFILES += crc32c/src/*.gcda crc32c/src/*.gcno
CLEANFILES += crypto/*.gcda crypto/*.gcno
+CLEANFILES += index/*.gcda index/*.gcno
+CLEANFILES += interfaces/*.gcda interfaces/*.gcno
+CLEANFILES += node/*.gcda node/*.gcno
CLEANFILES += policy/*.gcda policy/*.gcno
CLEANFILES += primitives/*.gcda primitives/*.gcno
+CLEANFILES += rpc/*.gcda rpc/*.gcno
CLEANFILES += script/*.gcda script/*.gcno
CLEANFILES += support/*.gcda support/*.gcno
CLEANFILES += univalue/*.gcda univalue/*.gcno
+CLEANFILES += util/*.gcda util/*.gcno
CLEANFILES += wallet/*.gcda wallet/*.gcno
CLEANFILES += wallet/test/*.gcda wallet/test/*.gcno
CLEANFILES += zmq/*.gcda zmq/*.gcno
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 93b5156af3..c224ca7bf6 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -33,6 +33,8 @@ bench_bench_bitcoin_SOURCES = \
bench/merkle_root.cpp \
bench/mempool_eviction.cpp \
bench/mempool_stress.cpp \
+ bench/nanobench.h \
+ bench/nanobench.cpp \
bench/rpc_blockchain.cpp \
bench/rpc_mempool.cpp \
bench/util_time.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 637d1d2f6e..0068c94070 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -265,6 +265,7 @@ BITCOIN_TESTS =\
test/skiplist_tests.cpp \
test/streams_tests.cpp \
test/sync_tests.cpp \
+ test/system_tests.cpp \
test/util_threadnames_tests.cpp \
test/timedata_tests.cpp \
test/torcontrol_tests.cpp \
@@ -275,6 +276,7 @@ BITCOIN_TESTS =\
test/uint256_tests.cpp \
test/util_tests.cpp \
test/validation_block_tests.cpp \
+ test/validation_chainstate_tests.cpp \
test/validation_chainstatemanager_tests.cpp \
test/validation_flush_tests.cpp \
test/validationinterface_tests.cpp \
@@ -1206,7 +1208,7 @@ nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
$(BITCOIN_TESTS): $(GENERATED_TEST_FILES)
-CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
+CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno test/fuzz/*.gcda test/fuzz/*.gcno test/util/*.gcda test/util/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
CLEANFILES += $(CLEAN_BITCOIN_TEST)
@@ -1236,8 +1238,8 @@ endif
if TARGET_WINDOWS
else
if ENABLE_BENCH
- @echo "Running bench/bench_bitcoin -evals=1 -scaling=0..."
- $(BENCH_BINARY) -evals=1 -scaling=0 > /dev/null
+ @echo "Running bench/bench_bitcoin ..."
+ $(BENCH_BINARY) > /dev/null
endif
endif
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 7aba340d9d..7636c6bad2 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -479,11 +479,15 @@ int CAddrMan::Check_()
}
#endif
-void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr)
+void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct)
{
- unsigned int nNodes = ADDRMAN_GETADDR_MAX_PCT * vRandom.size() / 100;
- if (nNodes > ADDRMAN_GETADDR_MAX)
- nNodes = ADDRMAN_GETADDR_MAX;
+ size_t nNodes = vRandom.size();
+ if (max_pct != 0) {
+ nNodes = max_pct * nNodes / 100;
+ }
+ if (max_addresses != 0) {
+ nNodes = std::min(nNodes, max_addresses);
+ }
// gather a list of random nodes, skipping those of low quality
for (unsigned int n = 0; n < vRandom.size(); n++) {
diff --git a/src/addrman.h b/src/addrman.h
index 8e82020df0..ca045b91cd 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -153,12 +153,6 @@ public:
//! how recent a successful connection should be before we allow an address to be evicted from tried
#define ADDRMAN_REPLACEMENT_HOURS 4
-//! the maximum percentage of nodes to return in a getaddr call
-#define ADDRMAN_GETADDR_MAX_PCT 23
-
-//! the maximum number of nodes to return in a getaddr call
-#define ADDRMAN_GETADDR_MAX 2500
-
//! Convenience
#define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2)
#define ADDRMAN_NEW_BUCKET_COUNT (1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2)
@@ -261,7 +255,7 @@ protected:
#endif
//! Select several addresses at once.
- void GetAddr_(std::vector<CAddress> &vAddr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void GetAddr_(std::vector<CAddress> &vAddr, size_t max_addresses, size_t max_pct) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Mark an entry as currently-connected-to.
void Connected_(const CService &addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -638,13 +632,13 @@ public:
}
//! Return a bunch of addresses, selected at random.
- std::vector<CAddress> GetAddr()
+ std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct)
{
Check();
std::vector<CAddress> vAddr;
{
LOCK(cs);
- GetAddr_(vAddr);
+ GetAddr_(vAddr, max_addresses, max_pct);
}
Check();
return vAddr;
diff --git a/src/base58.cpp b/src/base58.cpp
index 6a9e21ffc2..18cd2090e0 100644
--- a/src/base58.cpp
+++ b/src/base58.cpp
@@ -84,21 +84,21 @@ bool DecodeBase58(const char* psz, std::vector<unsigned char>& vch, int max_ret_
return true;
}
-std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend)
+std::string EncodeBase58(Span<const unsigned char> input)
{
// Skip & count leading zeroes.
int zeroes = 0;
int length = 0;
- while (pbegin != pend && *pbegin == 0) {
- pbegin++;
+ while (input.size() > 0 && input[0] == 0) {
+ input = input.subspan(1);
zeroes++;
}
// Allocate enough space in big-endian base58 representation.
- int size = (pend - pbegin) * 138 / 100 + 1; // log(256) / log(58), rounded up.
+ int size = input.size() * 138 / 100 + 1; // log(256) / log(58), rounded up.
std::vector<unsigned char> b58(size);
// Process the bytes.
- while (pbegin != pend) {
- int carry = *pbegin;
+ while (input.size() > 0) {
+ int carry = input[0];
int i = 0;
// Apply "b58 = b58 * 256 + ch".
for (std::vector<unsigned char>::reverse_iterator it = b58.rbegin(); (carry != 0 || i < length) && (it != b58.rend()); it++, i++) {
@@ -109,7 +109,7 @@ std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend)
assert(carry == 0);
length = i;
- pbegin++;
+ input = input.subspan(1);
}
// Skip leading zeroes in base58 result.
std::vector<unsigned char>::iterator it = b58.begin() + (size - length);
@@ -124,11 +124,6 @@ std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend)
return str;
}
-std::string EncodeBase58(const std::vector<unsigned char>& vch)
-{
- return EncodeBase58(vch.data(), vch.data() + vch.size());
-}
-
bool DecodeBase58(const std::string& str, std::vector<unsigned char>& vchRet, int max_ret_len)
{
if (!ValidAsCString(str)) {
@@ -137,11 +132,11 @@ bool DecodeBase58(const std::string& str, std::vector<unsigned char>& vchRet, in
return DecodeBase58(str.c_str(), vchRet, max_ret_len);
}
-std::string EncodeBase58Check(const std::vector<unsigned char>& vchIn)
+std::string EncodeBase58Check(Span<const unsigned char> input)
{
// add 4-byte hash check to the end
- std::vector<unsigned char> vch(vchIn);
- uint256 hash = Hash(vch.begin(), vch.end());
+ std::vector<unsigned char> vch(input.begin(), input.end());
+ uint256 hash = Hash(vch);
vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4);
return EncodeBase58(vch);
}
@@ -154,7 +149,7 @@ bool DecodeBase58Check(const char* psz, std::vector<unsigned char>& vchRet, int
return false;
}
// re-calculate the checksum, ensure it matches the included 4-byte checksum
- uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4);
+ uint256 hash = Hash(MakeSpan(vchRet).first(vchRet.size() - 4));
if (memcmp(&hash, &vchRet[vchRet.size() - 4], 4) != 0) {
vchRet.clear();
return false;
diff --git a/src/base58.h b/src/base58.h
index 042ad671d3..b87664b78b 100644
--- a/src/base58.h
+++ b/src/base58.h
@@ -15,20 +15,15 @@
#define BITCOIN_BASE58_H
#include <attributes.h>
+#include <span.h>
#include <string>
#include <vector>
/**
- * Encode a byte sequence as a base58-encoded string.
- * pbegin and pend cannot be nullptr, unless both are.
+ * Encode a byte span as a base58-encoded string
*/
-std::string EncodeBase58(const unsigned char* pbegin, const unsigned char* pend);
-
-/**
- * Encode a byte vector as a base58-encoded string
- */
-std::string EncodeBase58(const std::vector<unsigned char>& vch);
+std::string EncodeBase58(Span<const unsigned char> input);
/**
* Decode a base58-encoded string (psz) into a byte vector (vchRet).
@@ -44,9 +39,9 @@ NODISCARD bool DecodeBase58(const char* psz, std::vector<unsigned char>& vchRet,
NODISCARD bool DecodeBase58(const std::string& str, std::vector<unsigned char>& vchRet, int max_ret_len);
/**
- * Encode a byte vector into a base58-encoded string, including checksum
+ * Encode a byte span into a base58-encoded string, including checksum
*/
-std::string EncodeBase58Check(const std::vector<unsigned char>& vchIn);
+std::string EncodeBase58Check(Span<const unsigned char> input);
/**
* Decode a base58-encoded string (psz) that includes a checksum into a byte
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index cc260df2b8..ebdad5a4b8 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -67,52 +67,52 @@ static void FillAddrMan(CAddrMan& addrman)
/* Benchmarks */
-static void AddrManAdd(benchmark::State& state)
+static void AddrManAdd(benchmark::Bench& bench)
{
CreateAddresses();
CAddrMan addrman;
- while (state.KeepRunning()) {
+ bench.run([&] {
AddAddressesToAddrMan(addrman);
addrman.Clear();
- }
+ });
}
-static void AddrManSelect(benchmark::State& state)
+static void AddrManSelect(benchmark::Bench& bench)
{
CAddrMan addrman;
FillAddrMan(addrman);
- while (state.KeepRunning()) {
+ bench.run([&] {
const auto& address = addrman.Select();
assert(address.GetPort() > 0);
- }
+ });
}
-static void AddrManGetAddr(benchmark::State& state)
+static void AddrManGetAddr(benchmark::Bench& bench)
{
CAddrMan addrman;
FillAddrMan(addrman);
- while (state.KeepRunning()) {
- const auto& addresses = addrman.GetAddr();
+ bench.run([&] {
+ const auto& addresses = addrman.GetAddr(2500, 23);
assert(addresses.size() > 0);
- }
+ });
}
-static void AddrManGood(benchmark::State& state)
+static void AddrManGood(benchmark::Bench& bench)
{
/* Create many CAddrMan objects - one to be modified at each loop iteration.
* This is necessary because the CAddrMan::Good() method modifies the
* object, affecting the timing of subsequent calls to the same method and
* we want to do the same amount of work in every loop iteration. */
- const uint64_t numLoops = state.m_num_iters * state.m_num_evals;
+ bench.epochs(5).epochIterations(1);
- std::vector<CAddrMan> addrmans(numLoops);
+ std::vector<CAddrMan> addrmans(bench.epochs() * bench.epochIterations());
for (auto& addrman : addrmans) {
FillAddrMan(addrman);
}
@@ -128,13 +128,13 @@ static void AddrManGood(benchmark::State& state)
};
uint64_t i = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
markSomeAsGood(addrmans.at(i));
++i;
- }
+ });
}
-BENCHMARK(AddrManAdd, 5);
-BENCHMARK(AddrManSelect, 1000000);
-BENCHMARK(AddrManGetAddr, 500);
-BENCHMARK(AddrManGood, 2);
+BENCHMARK(AddrManAdd);
+BENCHMARK(AddrManSelect);
+BENCHMARK(AddrManGetAddr);
+BENCHMARK(AddrManGood);
diff --git a/src/bench/base58.cpp b/src/bench/base58.cpp
index 0690483d50..18cb5de196 100644
--- a/src/bench/base58.cpp
+++ b/src/bench/base58.cpp
@@ -10,7 +10,7 @@
#include <vector>
-static void Base58Encode(benchmark::State& state)
+static void Base58Encode(benchmark::Bench& bench)
{
static const std::array<unsigned char, 32> buff = {
{
@@ -19,13 +19,13 @@ static void Base58Encode(benchmark::State& state)
200, 24
}
};
- while (state.KeepRunning()) {
- EncodeBase58(buff.data(), buff.data() + buff.size());
- }
+ bench.batch(buff.size()).unit("byte").run([&] {
+ EncodeBase58(buff);
+ });
}
-static void Base58CheckEncode(benchmark::State& state)
+static void Base58CheckEncode(benchmark::Bench& bench)
{
static const std::array<unsigned char, 32> buff = {
{
@@ -34,24 +34,22 @@ static void Base58CheckEncode(benchmark::State& state)
200, 24
}
};
- std::vector<unsigned char> vch;
- vch.assign(buff.begin(), buff.end());
- while (state.KeepRunning()) {
- EncodeBase58Check(vch);
- }
+ bench.batch(buff.size()).unit("byte").run([&] {
+ EncodeBase58Check(buff);
+ });
}
-static void Base58Decode(benchmark::State& state)
+static void Base58Decode(benchmark::Bench& bench)
{
const char* addr = "17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem";
std::vector<unsigned char> vch;
- while (state.KeepRunning()) {
+ bench.batch(strlen(addr)).unit("byte").run([&] {
(void) DecodeBase58(addr, vch, 64);
- }
+ });
}
-BENCHMARK(Base58Encode, 470 * 1000);
-BENCHMARK(Base58CheckEncode, 320 * 1000);
-BENCHMARK(Base58Decode, 800 * 1000);
+BENCHMARK(Base58Encode);
+BENCHMARK(Base58CheckEncode);
+BENCHMARK(Base58Decode);
diff --git a/src/bench/bech32.cpp b/src/bench/bech32.cpp
index 2107840a3a..c74d8d51b3 100644
--- a/src/bench/bech32.cpp
+++ b/src/bench/bech32.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
+#include <bench/nanobench.h>
#include <bech32.h>
#include <util/strencodings.h>
@@ -11,26 +12,26 @@
#include <vector>
-static void Bech32Encode(benchmark::State& state)
+static void Bech32Encode(benchmark::Bench& bench)
{
std::vector<uint8_t> v = ParseHex("c97f5a67ec381b760aeaf67573bc164845ff39a3bb26a1cee401ac67243b48db");
std::vector<unsigned char> tmp = {0};
tmp.reserve(1 + 32 * 8 / 5);
ConvertBits<8, 5, true>([&](unsigned char c) { tmp.push_back(c); }, v.begin(), v.end());
- while (state.KeepRunning()) {
+ bench.batch(v.size()).unit("byte").run([&] {
bech32::Encode("bc", tmp);
- }
+ });
}
-static void Bech32Decode(benchmark::State& state)
+static void Bech32Decode(benchmark::Bench& bench)
{
std::string addr = "bc1qkallence7tjawwvy0dwt4twc62qjgaw8f4vlhyd006d99f09";
- while (state.KeepRunning()) {
+ bench.batch(addr.size()).unit("byte").run([&] {
bech32::Decode(addr);
- }
+ });
}
-BENCHMARK(Bech32Encode, 800 * 1000);
-BENCHMARK(Bech32Decode, 800 * 1000);
+BENCHMARK(Bech32Encode);
+BENCHMARK(Bech32Decode);
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index 7b93ef688d..01466d0b6f 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -8,141 +8,73 @@
#include <test/util/setup_common.h>
#include <validation.h>
-#include <algorithm>
-#include <assert.h>
-#include <iomanip>
-#include <iostream>
-#include <numeric>
#include <regex>
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
-void benchmark::ConsolePrinter::header()
-{
- std::cout << "# Benchmark, evals, iterations, total, min, max, median" << std::endl;
-}
+namespace {
-void benchmark::ConsolePrinter::result(const State& state)
+void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& benchmarkResults, const std::string& filename, const char* tpl)
{
- auto results = state.m_elapsed_results;
- std::sort(results.begin(), results.end());
-
- double total = state.m_num_iters * std::accumulate(results.begin(), results.end(), 0.0);
-
- double front = 0;
- double back = 0;
- double median = 0;
-
- if (!results.empty()) {
- front = results.front();
- back = results.back();
-
- size_t mid = results.size() / 2;
- median = results[mid];
- if (0 == results.size() % 2) {
- median = (results[mid] + results[mid + 1]) / 2;
- }
+ if (benchmarkResults.empty() || filename.empty()) {
+ // nothing to write, bail out
+ return;
}
-
- std::cout << std::setprecision(6);
- std::cout << state.m_name << ", " << state.m_num_evals << ", " << state.m_num_iters << ", " << total << ", " << front << ", " << back << ", " << median << std::endl;
-}
-
-void benchmark::ConsolePrinter::footer() {}
-benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height)
- : m_plotly_url(plotly_url), m_width(width), m_height(height)
-{
-}
-
-void benchmark::PlotlyPrinter::header()
-{
- std::cout << "<html><head>"
- << "<script src=\"" << m_plotly_url << "\"></script>"
- << "</head><body><div id=\"myDiv\" style=\"width:" << m_width << "px; height:" << m_height << "px\"></div>"
- << "<script> var data = ["
- << std::endl;
-}
-
-void benchmark::PlotlyPrinter::result(const State& state)
-{
- std::cout << "{ " << std::endl
- << " name: '" << state.m_name << "', " << std::endl
- << " y: [";
-
- const char* prefix = "";
- for (const auto& e : state.m_elapsed_results) {
- std::cout << prefix << std::setprecision(6) << e;
- prefix = ", ";
+ std::ofstream fout(filename);
+ if (fout.is_open()) {
+ ankerl::nanobench::render(tpl, benchmarkResults, fout);
+ } else {
+ std::cout << "Could write to file '" << filename << "'" << std::endl;
}
- std::cout << "]," << std::endl
- << " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box',"
- << std::endl
- << "}," << std::endl;
-}
-void benchmark::PlotlyPrinter::footer()
-{
- std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: 'tozero', autorange: true } };"
- << "Plotly.newPlot('myDiv', data, layout);"
- << "</script></body></html>";
+ std::cout << "Created '" << filename << "'" << std::endl;
}
+} // namespace
benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks()
{
- static std::map<std::string, Bench> benchmarks_map;
+ static std::map<std::string, BenchFunction> benchmarks_map;
return benchmarks_map;
}
-benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second)
+benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func)
{
- benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second}));
+ benchmarks().insert(std::make_pair(name, func));
}
-void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only)
+void benchmark::BenchRunner::RunAll(const Args& args)
{
- if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
- std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
- }
-#ifdef DEBUG
- std::cerr << "WARNING: This is a debug build - may result in slower benchmarks.\n";
-#endif
-
- std::regex reFilter(filter);
+ std::regex reFilter(args.regex_filter);
std::smatch baseMatch;
- printer.header();
-
+ std::vector<ankerl::nanobench::Result> benchmarkResults;
for (const auto& p : benchmarks()) {
if (!std::regex_match(p.first, baseMatch, reFilter)) {
continue;
}
- uint64_t num_iters = static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling);
- if (0 == num_iters) {
- num_iters = 1;
- }
- State state(p.first, num_evals, num_iters, printer);
- if (!is_list_only) {
- p.second.func(state);
+ if (args.is_list_only) {
+ std::cout << p.first << std::endl;
+ continue;
}
- printer.result(state);
- }
-
- printer.footer();
-}
-
-bool benchmark::State::UpdateTimer(const benchmark::time_point current_time)
-{
- if (m_start_time != time_point()) {
- std::chrono::duration<double> diff = current_time - m_start_time;
- m_elapsed_results.push_back(diff.count() / m_num_iters);
- if (m_elapsed_results.size() == m_num_evals) {
- return false;
+ Bench bench;
+ bench.name(p.first);
+ if (args.asymptote.empty()) {
+ p.second(bench);
+ } else {
+ for (auto n : args.asymptote) {
+ bench.complexityN(n);
+ p.second(bench);
+ }
+ std::cout << bench.complexityBigO() << std::endl;
}
+ benchmarkResults.push_back(bench.results().back());
}
- m_num_iters_left = m_num_iters - 1;
- return true;
+ GenerateTemplateResults(benchmarkResults, args.output_csv, "# Benchmark, evals, iterations, total, min, max, median\n"
+ "{{#result}}{{name}}, {{epochs}}, {{average(iterations)}}, {{sumProduct(iterations, elapsed)}}, {{minimum(elapsed)}}, {{maximum(elapsed)}}, {{median(elapsed)}}\n"
+ "{{/result}}");
+ GenerateTemplateResults(benchmarkResults, args.output_json, ankerl::nanobench::templates::json());
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index 629bca9a73..bafc7f8716 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -11,131 +11,53 @@
#include <string>
#include <vector>
+#include <bench/nanobench.h>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/stringize.hpp>
-// Simple micro-benchmarking framework; API mostly matches a subset of the Google Benchmark
-// framework (see https://github.com/google/benchmark)
-// Why not use the Google Benchmark framework? Because adding Yet Another Dependency
-// (that uses cmake as its build system and has lots of features we don't need) isn't
-// worth it.
-
/*
* Usage:
-static void CODE_TO_TIME(benchmark::State& state)
+static void CODE_TO_TIME(benchmark::Bench& bench)
{
... do any setup needed...
- while (state.KeepRunning()) {
+ nanobench::Config().run([&] {
... do stuff you want to time...
- }
+ });
... do any cleanup needed...
}
-// default to running benchmark for 5000 iterations
-BENCHMARK(CODE_TO_TIME, 5000);
+BENCHMARK(CODE_TO_TIME);
*/
namespace benchmark {
-// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
-struct best_clock {
- using hi_res_clock = std::chrono::high_resolution_clock;
- using steady_clock = std::chrono::steady_clock;
- using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
-};
-using clock = best_clock::type;
-using time_point = clock::time_point;
-using duration = clock::duration;
-
-class Printer;
-
-class State
-{
-public:
- std::string m_name;
- uint64_t m_num_iters_left;
- const uint64_t m_num_iters;
- const uint64_t m_num_evals;
- std::vector<double> m_elapsed_results;
- time_point m_start_time;
- bool UpdateTimer(time_point finish_time);
+using ankerl::nanobench::Bench;
- State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals)
- {
- }
+typedef std::function<void(Bench&)> BenchFunction;
- inline bool KeepRunning()
- {
- if (m_num_iters_left--) {
- return true;
- }
-
- bool result = UpdateTimer(clock::now());
- // measure again so runtime of UpdateTimer is not included
- m_start_time = clock::now();
- return result;
- }
+struct Args {
+ std::string regex_filter;
+ bool is_list_only;
+ std::vector<double> asymptote;
+ std::string output_csv;
+ std::string output_json;
};
-typedef std::function<void(State&)> BenchFunction;
-
class BenchRunner
{
- struct Bench {
- BenchFunction func;
- uint64_t num_iters_for_one_second;
- };
- typedef std::map<std::string, Bench> BenchmarkMap;
+ typedef std::map<std::string, BenchFunction> BenchmarkMap;
static BenchmarkMap& benchmarks();
public:
- BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second);
-
- static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only);
-};
+ BenchRunner(std::string name, BenchFunction func);
-// interface to output benchmark results.
-class Printer
-{
-public:
- virtual ~Printer() {}
- virtual void header() = 0;
- virtual void result(const State& state) = 0;
- virtual void footer() = 0;
-};
-
-// default printer to console, shows min, max, median.
-class ConsolePrinter : public Printer
-{
-public:
- void header() override;
- void result(const State& state) override;
- void footer() override;
-};
-
-// creates box plot with plotly.js
-class PlotlyPrinter : public Printer
-{
-public:
- PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height);
- void header() override;
- void result(const State& state) override;
- void footer() override;
-
-private:
- std::string m_plotly_url;
- int64_t m_width;
- int64_t m_height;
+ static void RunAll(const Args& args);
};
}
-
-
-// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations);
-// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately
-// the same time, and scaling factor can be used that the total time is appropriate for your system.
-#define BENCHMARK(n, num_iters_for_one_second) \
- benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second));
+// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo");
+#define BENCHMARK(n) \
+ benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n);
#endif // BITCOIN_BENCH_BENCH_H
diff --git a/src/bench/bench_bitcoin.cpp b/src/bench/bench_bitcoin.cpp
index 1f872ce700..135659f87f 100644
--- a/src/bench/bench_bitcoin.cpp
+++ b/src/bench/bench_bitcoin.cpp
@@ -10,26 +10,30 @@
#include <memory>
-static const int64_t DEFAULT_BENCH_EVALUATIONS = 5;
static const char* DEFAULT_BENCH_FILTER = ".*";
-static const char* DEFAULT_BENCH_SCALING = "1.0";
-static const char* DEFAULT_BENCH_PRINTER = "console";
-static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js";
-static const int64_t DEFAULT_PLOT_WIDTH = 1024;
-static const int64_t DEFAULT_PLOT_HEIGHT = 768;
static void SetupBenchArgs(ArgsManager& argsman)
{
SetupHelpOptions(argsman);
- argsman.AddArg("-list", "List benchmarks without executing them. Can be combined with -scaling and -filter", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-evals=<n>", strprintf("Number of measurement evaluations to perform. (default: %u)", DEFAULT_BENCH_EVALUATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-scaling=<n>", strprintf("Scaling factor for benchmark's runtime (default: %u)", DEFAULT_BENCH_SCALING), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-printer=(console|plot)", strprintf("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)", DEFAULT_BENCH_PRINTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-plotlyurl=<uri>", strprintf("URL to use for plotly.js (default: %s)", DEFAULT_PLOT_PLOTLYURL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-width=<x>", strprintf("Plot width in pixel (default: %u)", DEFAULT_PLOT_WIDTH), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-plot-height=<x>", strprintf("Plot height in pixel (default: %u)", DEFAULT_PLOT_HEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-asymptote=n1,n2,n3,...", strprintf("Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+}
+
+// parses a comma separated list like "10,20,30,50"
+static std::vector<double> parseAsymptote(const std::string& str) {
+ std::stringstream ss(str);
+ std::vector<double> numbers;
+ double d;
+ char c;
+ while (ss >> d) {
+ numbers.push_back(d);
+ ss >> c;
+ }
+ return numbers;
}
int main(int argc, char** argv)
@@ -49,34 +53,14 @@ int main(int argc, char** argv)
return EXIT_SUCCESS;
}
- int64_t evaluations = argsman.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS);
- std::string regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
- std::string scaling_str = argsman.GetArg("-scaling", DEFAULT_BENCH_SCALING);
- bool is_list_only = argsman.GetBoolArg("-list", false);
-
- if (evaluations == 0) {
- return EXIT_SUCCESS;
- } else if (evaluations < 0) {
- tfm::format(std::cerr, "Error parsing evaluations argument: %d\n", evaluations);
- return EXIT_FAILURE;
- }
-
- double scaling_factor;
- if (!ParseDouble(scaling_str, &scaling_factor)) {
- tfm::format(std::cerr, "Error parsing scaling factor as double: %s\n", scaling_str);
- return EXIT_FAILURE;
- }
-
- std::unique_ptr<benchmark::Printer> printer = MakeUnique<benchmark::ConsolePrinter>();
- std::string printer_arg = argsman.GetArg("-printer", DEFAULT_BENCH_PRINTER);
- if ("plot" == printer_arg) {
- printer.reset(new benchmark::PlotlyPrinter(
- argsman.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL),
- argsman.GetArg("-plot-width", DEFAULT_PLOT_WIDTH),
- argsman.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT)));
- }
+ benchmark::Args args;
+ args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
+ args.is_list_only = argsman.GetBoolArg("-list", false);
+ args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", ""));
+ args.output_csv = argsman.GetArg("-output_csv", "");
+ args.output_json = argsman.GetArg("-output_json", "");
- benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only);
+ benchmark::BenchRunner::RunAll(args);
return EXIT_SUCCESS;
}
diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp
index 268f67cada..3f15f3f856 100644
--- a/src/bench/block_assemble.cpp
+++ b/src/bench/block_assemble.cpp
@@ -14,7 +14,7 @@
#include <vector>
-static void AssembleBlock(benchmark::State& state)
+static void AssembleBlock(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -54,9 +54,9 @@ static void AssembleBlock(benchmark::State& state)
}
}
- while (state.KeepRunning()) {
+ bench.run([&] {
PrepareBlock(test_setup.m_node, SCRIPT_PUB);
- }
+ });
}
-BENCHMARK(AssembleBlock, 700);
+BENCHMARK(AssembleBlock);
diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp
index 86f9a0bf67..116de98b14 100644
--- a/src/bench/ccoins_caching.cpp
+++ b/src/bench/ccoins_caching.cpp
@@ -16,7 +16,7 @@
// characteristics than e.g. reindex timings. But that's not a requirement of
// every benchmark."
// (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484)
-static void CCoinsCaching(benchmark::State& state)
+static void CCoinsCaching(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -44,11 +44,11 @@ static void CCoinsCaching(benchmark::State& state)
// Benchmark.
const CTransaction tx_1(t1);
- while (state.KeepRunning()) {
+ bench.run([&] {
bool success = AreInputsStandard(tx_1, coins);
assert(success);
- }
+ });
ECC_Stop();
}
-BENCHMARK(CCoinsCaching, 170 * 1000);
+BENCHMARK(CCoinsCaching);
diff --git a/src/bench/chacha20.cpp b/src/bench/chacha20.cpp
index f1b0a9a989..913e0f8d57 100644
--- a/src/bench/chacha20.cpp
+++ b/src/bench/chacha20.cpp
@@ -11,7 +11,7 @@ static const uint64_t BUFFER_SIZE_TINY = 64;
static const uint64_t BUFFER_SIZE_SMALL = 256;
static const uint64_t BUFFER_SIZE_LARGE = 1024*1024;
-static void CHACHA20(benchmark::State& state, size_t buffersize)
+static void CHACHA20(benchmark::Bench& bench, size_t buffersize)
{
std::vector<uint8_t> key(32,0);
ChaCha20 ctx(key.data(), key.size());
@@ -19,26 +19,26 @@ static void CHACHA20(benchmark::State& state, size_t buffersize)
ctx.Seek(0);
std::vector<uint8_t> in(buffersize,0);
std::vector<uint8_t> out(buffersize,0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
ctx.Crypt(in.data(), out.data(), in.size());
- }
+ });
}
-static void CHACHA20_64BYTES(benchmark::State& state)
+static void CHACHA20_64BYTES(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_TINY);
+ CHACHA20(bench, BUFFER_SIZE_TINY);
}
-static void CHACHA20_256BYTES(benchmark::State& state)
+static void CHACHA20_256BYTES(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_SMALL);
+ CHACHA20(bench, BUFFER_SIZE_SMALL);
}
-static void CHACHA20_1MB(benchmark::State& state)
+static void CHACHA20_1MB(benchmark::Bench& bench)
{
- CHACHA20(state, BUFFER_SIZE_LARGE);
+ CHACHA20(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(CHACHA20_64BYTES, 500000);
-BENCHMARK(CHACHA20_256BYTES, 250000);
-BENCHMARK(CHACHA20_1MB, 340);
+BENCHMARK(CHACHA20_64BYTES);
+BENCHMARK(CHACHA20_256BYTES);
+BENCHMARK(CHACHA20_1MB);
diff --git a/src/bench/chacha_poly_aead.cpp b/src/bench/chacha_poly_aead.cpp
index df10f27d03..3b1d3e697a 100644
--- a/src/bench/chacha_poly_aead.cpp
+++ b/src/bench/chacha_poly_aead.cpp
@@ -21,7 +21,7 @@ static const unsigned char k2[32] = {0};
static ChaCha20Poly1305AEAD aead(k1, 32, k2, 32);
-static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, bool include_decryption)
+static void CHACHA20_POLY1305_AEAD(benchmark::Bench& bench, size_t buffersize, bool include_decryption)
{
std::vector<unsigned char> in(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0);
std::vector<unsigned char> out(buffersize + CHACHA20_POLY1305_AEAD_AAD_LEN + POLY1305_TAGLEN, 0);
@@ -29,7 +29,7 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b
uint64_t seqnr_aad = 0;
int aad_pos = 0;
uint32_t len = 0;
- while (state.KeepRunning()) {
+ bench.batch(buffersize).unit("byte").run([&] {
// encrypt or decrypt the buffer with a static key
assert(aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, out.data(), out.size(), in.data(), buffersize, true));
@@ -53,70 +53,71 @@ static void CHACHA20_POLY1305_AEAD(benchmark::State& state, size_t buffersize, b
seqnr_aad = 0;
aad_pos = 0;
}
- }
+ });
}
-static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, false);
}
-static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, false);
}
-static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, false);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, false);
}
-static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_TINY, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_TINY, true);
}
-static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_SMALL, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_SMALL, true);
}
-static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::State& state)
+static void CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT(benchmark::Bench& bench)
{
- CHACHA20_POLY1305_AEAD(state, BUFFER_SIZE_LARGE, true);
+ CHACHA20_POLY1305_AEAD(bench, BUFFER_SIZE_LARGE, true);
}
// Add Hash() (dbl-sha256) bench for comparison
-static void HASH(benchmark::State& state, size_t buffersize)
+static void HASH(benchmark::Bench& bench, size_t buffersize)
{
uint8_t hash[CHash256::OUTPUT_SIZE];
std::vector<uint8_t> in(buffersize,0);
- while (state.KeepRunning())
- CHash256().Write(in.data(), in.size()).Finalize(hash);
+ bench.batch(in.size()).unit("byte").run([&] {
+ CHash256().Write(in).Finalize(hash);
+ });
}
-static void HASH_64BYTES(benchmark::State& state)
+static void HASH_64BYTES(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_TINY);
+ HASH(bench, BUFFER_SIZE_TINY);
}
-static void HASH_256BYTES(benchmark::State& state)
+static void HASH_256BYTES(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_SMALL);
+ HASH(bench, BUFFER_SIZE_SMALL);
}
-static void HASH_1MB(benchmark::State& state)
+static void HASH_1MB(benchmark::Bench& bench)
{
- HASH(state, BUFFER_SIZE_LARGE);
+ HASH(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT, 500000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT, 250000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT, 340);
-BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT, 500000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT, 250000);
-BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT, 340);
-BENCHMARK(HASH_64BYTES, 500000);
-BENCHMARK(HASH_256BYTES, 250000);
-BENCHMARK(HASH_1MB, 340);
+BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ONLY_ENCRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_64BYTES_ENCRYPT_DECRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_256BYTES_ENCRYPT_DECRYPT);
+BENCHMARK(CHACHA20_POLY1305_AEAD_1MB_ENCRYPT_DECRYPT);
+BENCHMARK(HASH_64BYTES);
+BENCHMARK(HASH_256BYTES);
+BENCHMARK(HASH_1MB);
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index 2b2c78905e..dc0aa4031c 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -14,21 +14,21 @@
// a block off the wire, but before we can relay the block on to peers using
// compact block relay.
-static void DeserializeBlockTest(benchmark::State& state)
+static void DeserializeBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
stream.write(&a, 1); // Prevent compaction
- while (state.KeepRunning()) {
+ bench.unit("block").run([&] {
CBlock block;
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
assert(rewound);
- }
+ });
}
-static void DeserializeAndCheckBlockTest(benchmark::State& state)
+static void DeserializeAndCheckBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
@@ -36,7 +36,7 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
const auto chainParams = CreateChainParams(CBaseChainParams::MAIN);
- while (state.KeepRunning()) {
+ bench.unit("block").run([&] {
CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here
stream >> block;
bool rewound = stream.Rewind(benchmark::data::block413567.size());
@@ -45,8 +45,8 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
BlockValidationState validationState;
bool checked = CheckBlock(block, validationState, chainParams->GetConsensus());
assert(checked);
- }
+ });
}
-BENCHMARK(DeserializeBlockTest, 130);
-BENCHMARK(DeserializeAndCheckBlockTest, 160);
+BENCHMARK(DeserializeBlockTest);
+BENCHMARK(DeserializeAndCheckBlockTest);
diff --git a/src/bench/checkqueue.cpp b/src/bench/checkqueue.cpp
index e052681181..19d7bc0dbc 100644
--- a/src/bench/checkqueue.cpp
+++ b/src/bench/checkqueue.cpp
@@ -24,7 +24,7 @@ static const unsigned int QUEUE_BATCH_SIZE = 128;
// This Benchmark tests the CheckQueue with a slightly realistic workload,
// where checks all contain a prevector that is indirect 50% of the time
// and there is a little bit of work done between calls to Add.
-static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
+static void CCheckQueueSpeedPrevectorJob(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -47,23 +47,28 @@ static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) {
tg.create_thread([&]{queue.Thread();});
}
- while (state.KeepRunning()) {
+
+ // create all the data once, then submit copies in the benchmark.
+ FastRandomContext insecure_rand(true);
+ std::vector<std::vector<PrevectorJob>> vBatches(BATCHES);
+ for (auto& vChecks : vBatches) {
+ vChecks.reserve(BATCH_SIZE);
+ for (size_t x = 0; x < BATCH_SIZE; ++x)
+ vChecks.emplace_back(insecure_rand);
+ }
+
+ bench.minEpochIterations(10).batch(BATCH_SIZE * BATCHES).unit("job").run([&] {
// Make insecure_rand here so that each iteration is identical.
- FastRandomContext insecure_rand(true);
CCheckQueueControl<PrevectorJob> control(&queue);
- std::vector<std::vector<PrevectorJob>> vBatches(BATCHES);
- for (auto& vChecks : vBatches) {
- vChecks.reserve(BATCH_SIZE);
- for (size_t x = 0; x < BATCH_SIZE; ++x)
- vChecks.emplace_back(insecure_rand);
+ for (auto vChecks : vBatches) {
control.Add(vChecks);
}
// control waits for completion by RAII, but
// it is done explicitly here for clarity
control.Wait();
- }
+ });
tg.interrupt_all();
tg.join_all();
ECC_Stop();
}
-BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400);
+BENCHMARK(CCheckQueueSpeedPrevectorJob);
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index f2d12531d7..3a71a6ca03 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -27,7 +27,7 @@ static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector<st
// same one over and over isn't too useful. Generating random isn't useful
// either for measurements."
// (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484)
-static void CoinSelection(benchmark::State& state)
+static void CoinSelection(benchmark::Bench& bench)
{
NodeContext node;
auto chain = interfaces::MakeChain(node);
@@ -51,7 +51,7 @@ static void CoinSelection(benchmark::State& state)
const CoinEligibilityFilter filter_standard(1, 6, 0);
const CoinSelectionParams coin_selection_params(true, 34, 148, CFeeRate(0), 0);
- while (state.KeepRunning()) {
+ bench.run([&] {
std::set<CInputCoin> setCoinsRet;
CAmount nValueRet;
bool bnb_used;
@@ -59,7 +59,7 @@ static void CoinSelection(benchmark::State& state)
assert(success);
assert(nValueRet == 1003 * COIN);
assert(setCoinsRet.size() == 2);
- }
+ });
}
typedef std::set<CInputCoin> CoinSet;
@@ -91,7 +91,7 @@ static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool)
return target;
}
-static void BnBExhaustion(benchmark::State& state)
+static void BnBExhaustion(benchmark::Bench& bench)
{
// Setup
testWallet.SetupLegacyScriptPubKeyMan();
@@ -100,7 +100,7 @@ static void BnBExhaustion(benchmark::State& state)
CAmount value_ret = 0;
CAmount not_input_fees = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
// Benchmark
CAmount target = make_hard_case(17, utxo_pool);
SelectCoinsBnB(utxo_pool, target, 0, selection, value_ret, not_input_fees); // Should exhaust
@@ -108,8 +108,8 @@ static void BnBExhaustion(benchmark::State& state)
// Cleanup
utxo_pool.clear();
selection.clear();
- }
+ });
}
-BENCHMARK(CoinSelection, 650);
-BENCHMARK(BnBExhaustion, 650);
+BENCHMARK(CoinSelection);
+BENCHMARK(BnBExhaustion);
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index ddcef5121e..36be86bcc8 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -16,88 +16,92 @@
/* Number of bytes to hash per iteration */
static const uint64_t BUFFER_SIZE = 1000*1000;
-static void RIPEMD160(benchmark::State& state)
+static void RIPEMD160(benchmark::Bench& bench)
{
uint8_t hash[CRIPEMD160::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CRIPEMD160().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA1(benchmark::State& state)
+static void SHA1(benchmark::Bench& bench)
{
uint8_t hash[CSHA1::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA1().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA256(benchmark::State& state)
+static void SHA256(benchmark::Bench& bench)
{
uint8_t hash[CSHA256::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA256().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SHA256_32b(benchmark::State& state)
+static void SHA256_32b(benchmark::Bench& bench)
{
std::vector<uint8_t> in(32,0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA256()
.Write(in.data(), in.size())
.Finalize(in.data());
- }
+ });
}
-static void SHA256D64_1024(benchmark::State& state)
+static void SHA256D64_1024(benchmark::Bench& bench)
{
std::vector<uint8_t> in(64 * 1024, 0);
- while (state.KeepRunning()) {
+ bench.batch(in.size()).unit("byte").run([&] {
SHA256D64(in.data(), in.data(), 1024);
- }
+ });
}
-static void SHA512(benchmark::State& state)
+static void SHA512(benchmark::Bench& bench)
{
uint8_t hash[CSHA512::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
CSHA512().Write(in.data(), in.size()).Finalize(hash);
+ });
}
-static void SipHash_32b(benchmark::State& state)
+static void SipHash_32b(benchmark::Bench& bench)
{
uint256 x;
uint64_t k1 = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
*((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x);
- }
+ });
}
-static void FastRandom_32bit(benchmark::State& state)
+static void FastRandom_32bit(benchmark::Bench& bench)
{
FastRandomContext rng(true);
- while (state.KeepRunning()) {
+ bench.run([&] {
rng.rand32();
- }
+ });
}
-static void FastRandom_1bit(benchmark::State& state)
+static void FastRandom_1bit(benchmark::Bench& bench)
{
FastRandomContext rng(true);
- while (state.KeepRunning()) {
+ bench.run([&] {
rng.randbool();
- }
+ });
}
-BENCHMARK(RIPEMD160, 440);
-BENCHMARK(SHA1, 570);
-BENCHMARK(SHA256, 340);
-BENCHMARK(SHA512, 330);
+BENCHMARK(RIPEMD160);
+BENCHMARK(SHA1);
+BENCHMARK(SHA256);
+BENCHMARK(SHA512);
-BENCHMARK(SHA256_32b, 4700 * 1000);
-BENCHMARK(SipHash_32b, 40 * 1000 * 1000);
-BENCHMARK(SHA256D64_1024, 7400);
-BENCHMARK(FastRandom_32bit, 110 * 1000 * 1000);
-BENCHMARK(FastRandom_1bit, 440 * 1000 * 1000);
+BENCHMARK(SHA256_32b);
+BENCHMARK(SipHash_32b);
+BENCHMARK(SHA256D64_1024);
+BENCHMARK(FastRandom_32bit);
+BENCHMARK(FastRandom_1bit);
diff --git a/src/bench/duplicate_inputs.cpp b/src/bench/duplicate_inputs.cpp
index e87f15042b..5745e4276c 100644
--- a/src/bench/duplicate_inputs.cpp
+++ b/src/bench/duplicate_inputs.cpp
@@ -12,7 +12,7 @@
#include <validation.h>
-static void DuplicateInputs(benchmark::State& state)
+static void DuplicateInputs(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -61,11 +61,11 @@ static void DuplicateInputs(benchmark::State& state)
block.hashMerkleRoot = BlockMerkleRoot(block);
- while (state.KeepRunning()) {
+ bench.run([&] {
BlockValidationState cvstate{};
assert(!CheckBlock(block, cvstate, chainparams.GetConsensus(), false, false));
assert(cvstate.GetRejectReason() == "bad-txns-inputs-duplicate");
- }
+ });
}
-BENCHMARK(DuplicateInputs, 10);
+BENCHMARK(DuplicateInputs);
diff --git a/src/bench/examples.cpp b/src/bench/examples.cpp
index f88150200a..dcd615b9da 100644
--- a/src/bench/examples.cpp
+++ b/src/bench/examples.cpp
@@ -3,31 +3,19 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
-#include <util/time.h>
-
-// Sanity test: this should loop ten times, and
-// min/max/average should be close to 100ms.
-static void Sleep100ms(benchmark::State& state)
-{
- while (state.KeepRunning()) {
- UninterruptibleSleep(std::chrono::milliseconds{100});
- }
-}
-
-BENCHMARK(Sleep100ms, 10);
// Extremely fast-running benchmark:
#include <math.h>
volatile double sum = 0.0; // volatile, global so not optimized away
-static void Trig(benchmark::State& state)
+static void Trig(benchmark::Bench& bench)
{
double d = 0.01;
- while (state.KeepRunning()) {
+ bench.run([&] {
sum += sin(d);
d += 0.000001;
- }
+ });
}
-BENCHMARK(Trig, 12 * 1000 * 1000);
+BENCHMARK(Trig);
diff --git a/src/bench/gcs_filter.cpp b/src/bench/gcs_filter.cpp
index 535ad35571..ef83242e41 100644
--- a/src/bench/gcs_filter.cpp
+++ b/src/bench/gcs_filter.cpp
@@ -5,7 +5,7 @@
#include <bench/bench.h>
#include <blockfilter.h>
-static void ConstructGCSFilter(benchmark::State& state)
+static void ConstructGCSFilter(benchmark::Bench& bench)
{
GCSFilter::ElementSet elements;
for (int i = 0; i < 10000; ++i) {
@@ -16,14 +16,14 @@ static void ConstructGCSFilter(benchmark::State& state)
}
uint64_t siphash_k0 = 0;
- while (state.KeepRunning()) {
+ bench.batch(elements.size()).unit("elem").run([&] {
GCSFilter filter({siphash_k0, 0, 20, 1 << 20}, elements);
siphash_k0++;
- }
+ });
}
-static void MatchGCSFilter(benchmark::State& state)
+static void MatchGCSFilter(benchmark::Bench& bench)
{
GCSFilter::ElementSet elements;
for (int i = 0; i < 10000; ++i) {
@@ -34,10 +34,10 @@ static void MatchGCSFilter(benchmark::State& state)
}
GCSFilter filter({0, 0, 20, 1 << 20}, elements);
- while (state.KeepRunning()) {
+ bench.unit("elem").run([&] {
filter.Match(GCSFilter::Element());
- }
+ });
}
-BENCHMARK(ConstructGCSFilter, 1000);
-BENCHMARK(MatchGCSFilter, 50 * 1000);
+BENCHMARK(ConstructGCSFilter);
+BENCHMARK(MatchGCSFilter);
diff --git a/src/bench/hashpadding.cpp b/src/bench/hashpadding.cpp
index 985be8bdba..309cae3723 100644
--- a/src/bench/hashpadding.cpp
+++ b/src/bench/hashpadding.cpp
@@ -8,7 +8,7 @@
#include <uint256.h>
-static void PrePadded(benchmark::State& state)
+static void PrePadded(benchmark::Bench& bench)
{
CSHA256 hasher;
@@ -18,30 +18,30 @@ static void PrePadded(benchmark::State& state)
hasher.Write(nonce.begin(), 32);
hasher.Write(nonce.begin(), 32);
uint256 data = GetRandHash();
- while (state.KeepRunning()) {
+ bench.run([&] {
unsigned char out[32];
CSHA256 h = hasher;
h.Write(data.begin(), 32);
h.Finalize(out);
- }
+ });
}
-BENCHMARK(PrePadded, 10000);
+BENCHMARK(PrePadded);
-static void RegularPadded(benchmark::State& state)
+static void RegularPadded(benchmark::Bench& bench)
{
CSHA256 hasher;
// Setup the salted hasher
uint256 nonce = GetRandHash();
uint256 data = GetRandHash();
- while (state.KeepRunning()) {
+ bench.run([&] {
unsigned char out[32];
CSHA256 h = hasher;
h.Write(nonce.begin(), 32);
h.Write(data.begin(), 32);
h.Finalize(out);
- }
+ });
}
-BENCHMARK(RegularPadded, 10000);
+BENCHMARK(RegularPadded);
diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp
index 5d943810df..32b060a15a 100644
--- a/src/bench/lockedpool.cpp
+++ b/src/bench/lockedpool.cpp
@@ -9,10 +9,9 @@
#include <vector>
#define ASIZE 2048
-#define BITER 5000
#define MSIZE 2048
-static void BenchLockedPool(benchmark::State& state)
+static void BenchLockedPool(benchmark::Bench& bench)
{
void *synth_base = reinterpret_cast<void*>(0x08000000);
const size_t synth_size = 1024*1024;
@@ -22,24 +21,22 @@ static void BenchLockedPool(benchmark::State& state)
for (int x=0; x<ASIZE; ++x)
addr.push_back(nullptr);
uint32_t s = 0x12345678;
- while (state.KeepRunning()) {
- for (int x=0; x<BITER; ++x) {
- int idx = s & (addr.size()-1);
- if (s & 0x80000000) {
- b.free(addr[idx]);
- addr[idx] = nullptr;
- } else if(!addr[idx]) {
- addr[idx] = b.alloc((s >> 16) & (MSIZE-1));
- }
- bool lsb = s & 1;
- s >>= 1;
- if (lsb)
- s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
+ bench.run([&] {
+ int idx = s & (addr.size() - 1);
+ if (s & 0x80000000) {
+ b.free(addr[idx]);
+ addr[idx] = nullptr;
+ } else if (!addr[idx]) {
+ addr[idx] = b.alloc((s >> 16) & (MSIZE - 1));
}
- }
+ bool lsb = s & 1;
+ s >>= 1;
+ if (lsb)
+ s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
+ });
for (void *ptr: addr)
b.free(ptr);
addr.clear();
}
-BENCHMARK(BenchLockedPool, 1300);
+BENCHMARK(BenchLockedPool);
diff --git a/src/bench/mempool_eviction.cpp b/src/bench/mempool_eviction.cpp
index 69483f2914..1b9e428c9d 100644
--- a/src/bench/mempool_eviction.cpp
+++ b/src/bench/mempool_eviction.cpp
@@ -23,7 +23,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& nFee, CTxMemPool& po
// Right now this is only testing eviction performance in an extremely small
// mempool. Code needs to be written to generate a much wider variety of
// unique transactions for a more meaningful performance measurement.
-static void MempoolEviction(benchmark::State& state)
+static void MempoolEviction(benchmark::Bench& bench)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -125,7 +125,7 @@ static void MempoolEviction(benchmark::State& state)
const CTransactionRef tx6_r{MakeTransactionRef(tx6)};
const CTransactionRef tx7_r{MakeTransactionRef(tx7)};
- while (state.KeepRunning()) {
+ bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
AddTx(tx1_r, 10000LL, pool);
AddTx(tx2_r, 5000LL, pool);
AddTx(tx3_r, 20000LL, pool);
@@ -135,7 +135,7 @@ static void MempoolEviction(benchmark::State& state)
AddTx(tx7_r, 9000LL, pool);
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
pool.TrimToSize(GetVirtualTransactionSize(*tx1_r));
- }
+ });
}
-BENCHMARK(MempoolEviction, 41000);
+BENCHMARK(MempoolEviction);
diff --git a/src/bench/mempool_stress.cpp b/src/bench/mempool_stress.cpp
index 38d8632318..89233e390c 100644
--- a/src/bench/mempool_stress.cpp
+++ b/src/bench/mempool_stress.cpp
@@ -26,8 +26,13 @@ struct Available {
Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){}
};
-static void ComplexMemPool(benchmark::State& state)
+static void ComplexMemPool(benchmark::Bench& bench)
{
+ int childTxs = 800;
+ if (bench.complexityN() > 1) {
+ childTxs = static_cast<int>(bench.complexityN());
+ }
+
FastRandomContext det_rand{true};
std::vector<Available> available_coins;
std::vector<CTransactionRef> ordered_coins;
@@ -46,7 +51,7 @@ static void ComplexMemPool(benchmark::State& state)
ordered_coins.emplace_back(MakeTransactionRef(tx));
available_coins.emplace_back(ordered_coins.back(), tx_counter++);
}
- for (auto x = 0; x < 800 && !available_coins.empty(); ++x) {
+ for (auto x = 0; x < childTxs && !available_coins.empty(); ++x) {
CMutableTransaction tx = CMutableTransaction();
size_t n_ancestors = det_rand.randrange(10)+1;
for (size_t ancestor = 0; ancestor < n_ancestors && !available_coins.empty(); ++ancestor){
@@ -77,13 +82,13 @@ static void ComplexMemPool(benchmark::State& state)
TestingSetup test_setup;
CTxMemPool pool;
LOCK2(cs_main, pool.cs);
- while (state.KeepRunning()) {
+ bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
for (auto& tx : ordered_coins) {
AddTx(tx, pool);
}
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front()));
- }
+ });
}
-BENCHMARK(ComplexMemPool, 1);
+BENCHMARK(ComplexMemPool);
diff --git a/src/bench/merkle_root.cpp b/src/bench/merkle_root.cpp
index e84f92feae..ba6629b9f0 100644
--- a/src/bench/merkle_root.cpp
+++ b/src/bench/merkle_root.cpp
@@ -8,7 +8,7 @@
#include <random.h>
#include <uint256.h>
-static void MerkleRoot(benchmark::State& state)
+static void MerkleRoot(benchmark::Bench& bench)
{
FastRandomContext rng(true);
std::vector<uint256> leaves;
@@ -16,11 +16,11 @@ static void MerkleRoot(benchmark::State& state)
for (auto& item : leaves) {
item = rng.rand256();
}
- while (state.KeepRunning()) {
+ bench.batch(leaves.size()).unit("leaf").run([&] {
bool mutation = false;
uint256 hash = ComputeMerkleRoot(std::vector<uint256>(leaves), &mutation);
leaves[mutation] = hash;
- }
+ });
}
-BENCHMARK(MerkleRoot, 800);
+BENCHMARK(MerkleRoot);
diff --git a/src/bench/nanobench.cpp b/src/bench/nanobench.cpp
new file mode 100644
index 0000000000..fcdd86495a
--- /dev/null
+++ b/src/bench/nanobench.cpp
@@ -0,0 +1,6 @@
+// Copyright (c) 2019-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#define ANKERL_NANOBENCH_IMPLEMENT
+#include <bench/nanobench.h>
diff --git a/src/bench/nanobench.h b/src/bench/nanobench.h
new file mode 100644
index 0000000000..c5379e7fd4
--- /dev/null
+++ b/src/bench/nanobench.h
@@ -0,0 +1,3225 @@
+// __ _ _______ __ _ _____ ______ _______ __ _ _______ _ _
+// | \ | |_____| | \ | | | |_____] |______ | \ | | |_____|
+// | \_| | | | \_| |_____| |_____] |______ | \_| |_____ | |
+//
+// Microbenchmark framework for C++11/14/17/20
+// https://github.com/martinus/nanobench
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2019-2020 Martin Ankerl <martin.ankerl@gmail.com>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ANKERL_NANOBENCH_H_INCLUDED
+#define ANKERL_NANOBENCH_H_INCLUDED
+
+// see https://semver.org/
+#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
+#define ANKERL_NANOBENCH_VERSION_MINOR 0 // backwards-compatible changes
+#define ANKERL_NANOBENCH_VERSION_PATCH 0 // backwards-compatible bug fixes
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// public facing api - as minimal as possible
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include <chrono> // high_resolution_clock
+#include <cstring> // memcpy
+#include <iosfwd> // for std::ostream* custom output target in Config
+#include <string> // all names
+#include <vector> // holds all results
+
+#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
+
+#define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus
+#define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L
+#define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L
+#define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L
+#define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L
+
+#if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17)
+# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]]
+#else
+# define ANKERL_NANOBENCH_PRIVATE_NODISCARD()
+#endif
+
+#if defined(__clang__)
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"")
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop")
+#else
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH()
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP()
+#endif
+
+#if defined(__GNUC__)
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"")
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop")
+#else
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH()
+# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP()
+#endif
+
+#if defined(ANKERL_NANOBENCH_LOG_ENABLED)
+# include <iostream>
+# define ANKERL_NANOBENCH_LOG(x) std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl
+#else
+# define ANKERL_NANOBENCH_LOG(x)
+#endif
+
+#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
+# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1
+#else
+# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
+#endif
+
+#if defined(__clang__)
+# define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__)))
+#else
+# define ANKERL_NANOBENCH_NO_SANITIZE(...)
+#endif
+
+#if defined(_MSC_VER)
+# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline)
+#else
+# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline))
+#endif
+
+// workaround missing "is_trivially_copyable" in g++ < 5.0
+// See https://stackoverflow.com/a/31798726/48181
+#if defined(__GNUC__) && __GNUC__ < 5
+# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
+#else
+# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
+#endif
+
+// declarations ///////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock,
+ std::chrono::steady_clock>::type;
+class Bench;
+struct Config;
+class Result;
+class Rng;
+class BigO;
+
+/**
+ * @brief Renders output from a mustache-like template and benchmark results.
+ *
+ * The templating facility here is heavily inspired by [mustache - logic-less templates](https://mustache.github.io/).
+ * It adds a few more features that are necessary to get all of the captured data out of nanobench. Please read the
+ * excellent [mustache manual](https://mustache.github.io/mustache.5.html) to see what this is all about.
+ *
+ * nanobench output has two nested layers, *result* and *measurement*. Here is a hierarchy of the allowed tags:
+ *
+ * * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as
+ * a benchmark result is available. Within it, you can use these tags:
+ *
+ * * `{{title}}` See Bench::title().
+ *
+ * * `{{name}}` Benchmark name, usually directly provided with Bench::run(), but can also be set with Bench::name().
+ *
+ * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::title().
+ *
+ * * `{{batch}}` Batch size, see Bench::batch().
+ *
+ * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN().
+ *
+ * * `{{epochs}}` Number of epochs, see Bench::epochs().
+ *
+ * * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock.
+ * For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first
+ * benchmark that is run, and used as a static variable throughout the application's runtime.
+ *
+ * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple().
+ * This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster
+ * will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast.
+ *
+ * * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least
+ * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime().
+ *
+ * * `{{minEpochTime}}` Minimum epoch time, usually not set. See Bench::minEpochTime().
+ *
+ * * `{{minEpochIterations}}` See Bench::minEpochIterations().
+ *
+ * * `{{epochIterations}}` See Bench::epochIterations().
+ *
+ * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup().
+ *
+ * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative().
+ *
+ * Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations
+ * are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters
+ * are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`,
+ * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measuers (except `iterations`) are
+ * provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available:
+ *
+ * * `{{median(<name>>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`.
+ *
+ * * `{{average(<name>)}}` Average (mean) calculation.
+ *
+ * * `{{medianAbsolutePercentError(<name>)}}` Calculates MdAPE, the Median Absolute Percentage Error. The MdAPE is an excellent
+ * metric for the variation of measurements. It is more robust to outliers than the
+ * [Mean absolute percentage error (M-APE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error).
+ * @f[
+ * \mathrm{medianAbsolutePercentError}(e) = \mathrm{median}\{| \frac{e_i - \mathrm{median}\{e\}}{e_i}| \}
+ * @f]
+ * E.g. for *elapsed*: First, @f$ \mathrm{median}\{elapsed\} @f$ is calculated. This is used to calculate the absolute percentage
+ * error to this median for each measurement, as in @f$ | \frac{e_i - \mathrm{median}\{e\}}{e_i}| @f$. All these results
+ * are sorted, and the middle value is chosen as the median absolute percent error.
+ *
+ * This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the
+ * measurements deviate less than 5% from the median, and the other deviate more than 5% from the median.
+ *
+ * * `{{sum(<name>)}}` Sums of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
+* measured in this benchmark.
+ *
+ * * `{{minimum(<name>)}}` Minimum of all measurements.
+ *
+ * * `{{maximum(<name>)}}` Maximum of all measurements.
+ *
+ * * `{{sumProduct(<first>, <second>)}}` Calculates the sum of the products of corresponding measures:
+ * @f[
+ * \mathrm{sumProduct}(a,b) = \sum_{i=1}^{n}a_i\cdot b_i
+ * @f]
+ * E.g. to calculate total runtime of the benchmark, you multiply iterations with elapsed time for each measurement, and
+ * sum these results up:
+ * `{{sumProduct(iterations, elapsed)}}`.
+ *
+ * * `{{#measurement}}` To access individual measurement results, open the begin tag for measurements.
+ *
+ * * `{{elapsed}}` Average elapsed time per iteration, in seconds.
+ *
+ * * `{{iterations}}` Number of iterations in the measurement. The number of iterations will fluctuate due
+ * to some applied randomness, to enhance accuracy.
+ *
+ * * `{{pagefaults}}` Average number of pagefaults per iteration.
+ *
+ * * `{{cpucycles}}` Average number of CPU cycles processed per iteration.
+ *
+ * * `{{contextswitches}}` Average number of context switches per iteration.
+ *
+ * * `{{instructions}}` Average number of retired instructions per iteration.
+ *
+ * * `{{branchinstructions}}` Average number of branches executed per iteration.
+ *
+ * * `{{branchmisses}}` Average number of branches that were missed per iteration.
+ *
+ * * `{{/measurement}}` Ends the measurement tag.
+ *
+ * * `{{/result}}` Marks the end of the result layer. This is the end marker for the template part that will be instantiated
+ * for each benchmark result.
+ *
+ *
+ * For the layer tags *result* and *measurement* you additionally can use these special markers:
+ *
+ * * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only
+ * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``.
+ *
+ * * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This,
+ * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
+ *
+ * * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``.
+ *
+ * * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only
+ * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``.
+ *
+ * * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This,
+ * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
+ *
+ * * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``.
+ *
+ @verbatim embed:rst
+
+ For an overview of all the possible data you can get out of nanobench, please see the tutorial at :ref:`tutorial-template-json`.
+
+ The templates that ship with nanobench are:
+
+ * :cpp:func:`templates::csv() <ankerl::nanobench::templates::csv()>`
+ * :cpp:func:`templates::json() <ankerl::nanobench::templates::json()>`
+ * :cpp:func:`templates::htmlBoxplot() <ankerl::nanobench::templates::htmlBoxplot()>`
+
+ @endverbatim
+ *
+ * @param mustacheTemplate The template.
+ * @param bench Benchmark, containing all the results.
+ * @param out Output for the generated output.
+ */
+void render(char const* mustacheTemplate, Bench const& bench, std::ostream& out);
+
+/**
+ * Same as render(char const* mustacheTemplate, Bench const& bench, std::ostream& out), but for when
+ * you only have results available.
+ *
+ * @param mustacheTemplate The template.
+ * @param results All the results to be used for rendering.
+ * @param out Output for the generated output.
+ */
+void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out);
+
+// Contains mustache-like templates
+namespace templates {
+
+/*!
+ @brief CSV data for the benchmark results.
+
+ Generates a comma-separated values dataset. First line is the header, each following line is a summary of each benchmark run.
+
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-csv` for an example.
+ @endverbatim
+ */
+char const* csv() noexcept;
+
+/*!
+ @brief HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an example output.
+
+ The output uses only the elapsed time, and displays each epoch as a single dot.
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-html` for an example.
+ @endverbatim
+
+ @see ankerl::nanobench::render()
+ */
+char const* htmlBoxplot() noexcept;
+
+/*!
+ @brief Template to generate JSON data.
+
+ The generated JSON data contains *all* data that has been generated. All times are as double values, in seconds. The output can get
+ quite large.
+ @verbatim embed:rst
+ See the tutorial at :ref:`tutorial-template-json` for an example.
+ @endverbatim
+ */
+char const* json() noexcept;
+
+} // namespace templates
+
+namespace detail {
+
+template <typename T>
+struct PerfCountSet;
+
+class IterationLogic;
+class PerformanceCounters;
+
+#if ANKERL_NANOBENCH(PERF_COUNTERS)
+class LinuxPerformanceCounters;
+#endif
+
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// definitions ////////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+namespace detail {
+
+template <typename T>
+struct PerfCountSet {
+ T pageFaults{};
+ T cpuCycles{};
+ T contextSwitches{};
+ T instructions{};
+ T branchInstructions{};
+ T branchMisses{};
+};
+
+} // namespace detail
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct Config {
+ // actual benchmark config
+ std::string mBenchmarkTitle = "benchmark";
+ std::string mBenchmarkName = "noname";
+ std::string mUnit = "op";
+ double mBatch = 1.0;
+ double mComplexityN = -1.0;
+ size_t mNumEpochs = 11;
+ size_t mClockResolutionMultiple = static_cast<size_t>(1000);
+ std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
+ std::chrono::nanoseconds mMinEpochTime{};
+ uint64_t mMinEpochIterations{1};
+ uint64_t mEpochIterations{0}; // If not 0, run *exactly* these number of iterations per epoch.
+ uint64_t mWarmup = 0;
+ std::ostream* mOut = nullptr;
+ bool mShowPerformanceCounters = true;
+ bool mIsRelative = false;
+
+ Config();
+ ~Config();
+ Config& operator=(Config const&);
+ Config& operator=(Config&&);
+ Config(Config const&);
+ Config(Config&&) noexcept;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Result returned after a benchmark has finished. Can be used as a baseline for relative().
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class Result {
+public:
+ enum class Measure : size_t {
+ elapsed,
+ iterations,
+ pagefaults,
+ cpucycles,
+ contextswitches,
+ instructions,
+ branchinstructions,
+ branchmisses,
+ _size
+ };
+
+ explicit Result(Config const& benchmarkConfig);
+
+ ~Result();
+ Result& operator=(Result const&);
+ Result& operator=(Result&&);
+ Result(Result const&);
+ Result(Result&&) noexcept;
+
+ // adds new measurement results
+ // all values are scaled by iters (except iters...)
+ void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc);
+
+ ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept;
+
+ ANKERL_NANOBENCH(NODISCARD) double median(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double medianAbsolutePercentError(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double average(Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) double sum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept;
+
+ ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const;
+ ANKERL_NANOBENCH(NODISCARD) bool empty() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t size() const noexcept;
+
+ // Finds string, if not found, returns _size.
+ static Measure fromString(std::string const& str);
+
+private:
+ Config mConfig{};
+ std::vector<std::vector<double>> mNameToMeasurements{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+/**
+ * An extremely fast random generator. Currently, this implements *RomuDuoJr*, developed by Mark Overton. Source:
+ * http://www.romu-random.org/
+ *
+ * RomuDuoJr is extremely fast and provides reasonable good randomness. Not enough for large jobs, but definitely
+ * good enough for a benchmarking framework.
+ *
+ * * Estimated capacity: @f$ 2^{51} @f$ bytes
+ * * Register pressure: 4
+ * * State size: 128 bits
+ *
+ * This random generator is a drop-in replacement for the generators supplied by ``<random>``. It is not
+ * cryptographically secure. It's intended purpose is to be very fast so that benchmarks that make use
+ * of randomness are not distorted too much by the random generator.
+ *
+ * Rng also provides a few non-standard helpers, optimized for speed.
+ */
+class Rng final {
+public:
+ /**
+ * @brief This RNG provides 64bit randomness.
+ */
+ using result_type = uint64_t;
+
+ static constexpr uint64_t(min)();
+ static constexpr uint64_t(max)();
+
+ /**
+ * As a safety precausion, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
+ * same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is
+ * automatically seeded from `std::random_device`. If you really need a copy, use copy().
+ */
+ Rng(Rng const&) = delete;
+
+ /**
+ * Same as Rng(Rng const&), we don't allow assignment. If you need a new Rng create one with the default constructor Rng().
+ */
+ Rng& operator=(Rng const&) = delete;
+
+ // moving is ok
+ Rng(Rng&&) noexcept = default;
+ Rng& operator=(Rng&&) noexcept = default;
+ ~Rng() noexcept = default;
+
+ /**
+ * @brief Creates a new Random generator with random seed.
+ *
+ * Instead of a default seed (as the random generators from the STD), this properly seeds the random generator from
+ * `std::random_device`. It guarantees correct seeding. Note that seeding can be relatively slow, depending on the source of
+ * randomness used. So it is best to create a Rng once and use it for all your randomness purposes.
+ */
+ Rng();
+
+ /*!
+ Creates a new Rng that is seeded with a specific seed. Each Rng created from the same seed will produce the same randomness
+ sequence. This can be useful for deterministic behavior.
+
+ @verbatim embed:rst
+ .. note::
+
+ The random algorithm might change between nanobench releases. Whenever a faster and/or better random
+ generator becomes available, I will switch the implementation.
+ @endverbatim
+
+ As per the Romu paper, this seeds the Rng with splitMix64 algorithm and performs 10 initial rounds for further mixing up of the
+ internal state.
+
+ @param seed The 64bit seed. All values are allowed, even 0.
+ */
+ explicit Rng(uint64_t seed) noexcept;
+ Rng(uint64_t x, uint64_t y) noexcept;
+
+ /**
+ * Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original.
+ */
+ ANKERL_NANOBENCH(NODISCARD) Rng copy() const noexcept;
+
+ /**
+ * @brief Produces a 64bit random value. This should be very fast, thus it is marked as inline. In my benchmark, this is ~46 times
+ * faster than `std::default_random_engine` for producing 64bit random values. It seems that the fastest std contender is
+ * `std::mt19937_64`. Still, this RNG is 2-3 times as fast.
+ *
+ * @return uint64_t The next 64 bit random value.
+ */
+ inline uint64_t operator()() noexcept;
+
+ // This is slightly biased. See
+
+ /**
+ * Generates a random number between 0 and range (excluding range).
+ *
+ * The algorithm only produces 32bit numbers, and is slightly biased. The effect is quite small unless your range is close to the
+ * maximum value of an integer. It is possible to correct the bias with rejection sampling (see
+ * [here](https://lemire.me/blog/2016/06/30/fast-random-shuffling/), but this is most likely irrelevant in practices for the
+ * purposes of this Rng.
+ *
+ * See Daniel Lemire's blog post [A fast alternative to the modulo
+ * reduction](https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/)
+ *
+ * @param range Upper exclusive range. E.g a value of 3 will generate random numbers 0, 1, 2.
+ * @return uint32_t Generated random values in range [0, range(.
+ */
+ inline uint32_t bounded(uint32_t range) noexcept;
+
+ // random double in range [0, 1(
+ // see http://prng.di.unimi.it/
+
+ /**
+ * Provides a random uniform double value between 0 and 1. This uses the method described in [Generating uniform doubles in the
+ * unit interval](http://prng.di.unimi.it/), and is extremely fast.
+ *
+ * @return double Uniformly distributed double value in range [0,1(, excluding 1.
+ */
+ inline double uniform01() noexcept;
+
+ /**
+ * Shuffles all entries in the given container. Although this has a slight bias due to the implementation of bounded(), this is
+ * preferable to `std::shuffle` because it is over 5 times faster. See Daniel Lemire's blog post [Fast random
+ * shuffling](https://lemire.me/blog/2016/06/30/fast-random-shuffling/).
+ *
+ * @param container The whole container will be shuffled.
+ */
+ template <typename Container>
+ void shuffle(Container& container) noexcept;
+
+private:
+ static constexpr uint64_t rotl(uint64_t x, unsigned k) noexcept;
+
+ uint64_t mX;
+ uint64_t mY;
+};
+
+/**
+ * @brief Main entry point to nanobench's benchmarking facility.
+ *
+ * It holds configuration and results from one or more benchmark runs. Usually it is used in a single line, where the object is
+ * constructed, configured, and then a benchmark is run. E.g. like this:
+ *
+ * ankerl::nanobench::Bench().unit("byte").batch(1000).run("random fluctuations", [&] {
+ * // here be the benchmark code
+ * });
+ *
+ * In that example Bench() constructs the benchmark, it is then configured with unit() and batch(), and after configuration a
+ * benchmark is executed with run(). Once run() has finished, it prints the result to `std::cout`. It would also store the results
+ * in the Bench instance, but in this case the object is immediately destroyed so it's not available any more.
+ */
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class Bench {
+public:
+ /**
+ * @brief Creates a new benchmark for configuration and running of benchmarks.
+ */
+ Bench();
+
+ Bench(Bench&& other);
+ Bench& operator=(Bench&& other);
+ Bench(Bench const& other);
+ Bench& operator=(Bench const& other);
+ ~Bench() noexcept;
+
+ /*!
+ @brief Repeatedly calls `op()` based on the configuration, and performs measurements.
+
+ This call is marked with `noinline` to prevent the compiler to optimize beyond different benchmarks. This can have quite a big
+ effect on benchmark accuracy.
+
+ @verbatim embed:rst
+ .. note::
+
+ Each call to your lambda must have a side effect that the compiler can't possibly optimize it away. E.g. add a result to an
+ externally defined number (like `x` in the above example), and finally call `doNotOptimizeAway` on the variables the compiler
+ must not remove. You can also use :cpp:func:`ankerl::nanobench::doNotOptimizeAway` directly in the lambda, but be aware that
+ this has a small overhead.
+
+ @endverbatim
+
+ @tparam Op The code to benchmark.
+ */
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(char const* benchmarkName, Op&& op);
+
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(std::string const& benchmarkName, Op&& op);
+
+ /**
+ * @brief Same as run(char const* benchmarkName, Op op), but instead uses the previously set name.
+ * @tparam Op The code to benchmark.
+ */
+ template <typename Op>
+ ANKERL_NANOBENCH(NOINLINE)
+ Bench& run(Op&& op);
+
+ /**
+ * @brief Title of the benchmark, will be shown in the table header. Changing the title will start a new markdown table.
+ *
+ * @param benchmarkTitle The title of the benchmark.
+ */
+ Bench& title(char const* benchmarkTitle);
+ Bench& title(std::string const& benchmarkTitle);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept;
+
+ /// Name of the benchmark, will be shown in the table row.
+ Bench& name(char const* benchmarkName);
+ Bench& name(std::string const& benchmarkName);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
+
+ /**
+ * @brief Sets the batch size.
+ *
+ * E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark
+ * hashing of a 1000 byte long string and want byte/sec as a result, you can specify 1000 as the batch size.
+ *
+ * @tparam T Any input type is internally cast to `double`.
+ * @param b batch size
+ */
+ template <typename T>
+ Bench& batch(T b) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double batch() const noexcept;
+
+ /**
+ * @brief Sets the operation unit.
+ *
+ * Defaults to "op". Could be e.g. "byte" for string processing. This is used for the table header, e.g. to show `ns/byte`. Use
+ * singular (*byte*, not *bytes*). A change clears the currently collected results.
+ *
+ * @param unit The unit name.
+ */
+ Bench& unit(char const* unit);
+ Bench& unit(std::string const& unit);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& unit() const noexcept;
+
+ /**
+ * @brief Set the output stream where the resulting markdown table will be printed to.
+ *
+ * The default is `&std::cout`. You can disable all output by setting `nullptr`.
+ *
+ * @param outstream Pointer to output stream, can be `nullptr`.
+ */
+ Bench& output(std::ostream* outstream) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::ostream* output() const noexcept;
+
+ /**
+ * Modern processors have a very accurate clock, being able to measure as low as 20 nanoseconds. This is the main trick nanobech to
+ * be so fast: we find out how accurate the clock is, then run the benchmark only so often that the clock's accuracy is good enough
+ * for accurate measurements.
+ *
+ * The default is to run one epoch for 1000 times the clock resolution. So for 20ns resolution and 11 epochs, this gives a total
+ * runtime of
+ *
+ * @f[
+ * 20ns * 1000 * 11 \approx 0.2ms
+ * @f]
+ *
+ * To be precise, nanobench adds a 0-20% random noise to each evaluation. This is to prevent any aliasing effects, and further
+ * improves accuracy.
+ *
+ * Total runtime will be higher though: Some initial time is needed to find out the target number of iterations for each epoch, and
+ * there is some overhead involved to start & stop timers and calculate resulting statistics and writing the output.
+ *
+ * @param multiple Target number of times of clock resolution. Usually 1000 is a good compromise between runtime and accuracy.
+ */
+ Bench& clockResolutionMultiple(size_t multiple) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t clockResolutionMultiple() const noexcept;
+
+ /**
+ * @brief Controls number of epochs, the number of measurements to perform.
+ *
+ * The reported result will be the median of evaluation of each epoch. The higher you choose this, the more
+ * deterministic the result be and outliers will be more easily removed. Also the `err%` will be more accurate the higher this
+ * number is. Note that the `err%` will not necessarily decrease when number of epochs is increased. But it will be a more accurate
+ * representation of the benchmarked code's runtime stability.
+ *
+ * Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy.
+ * This setting goes hand in hand with minEpocIterations() (or minEpochTime()). If you are more interested in *median* runtime, you
+ * might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase minEpochIterations()
+ * instead.
+ *
+ * @param numEpochs Number of epochs.
+ */
+ Bench& epochs(size_t numEpochs) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) size_t epochs() const noexcept;
+
+ /**
+ * @brief Upper limit for the runtime of each epoch.
+ *
+ * As a safety precausion if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
+ * epoch. Default is 100ms. At least a single evaluation of the benchmark is performed.
+ *
+ * @see minEpochTime(), minEpochIterations()
+ *
+ * @param t Maximum target runtime for a single epoch.
+ */
+ Bench& maxEpochTime(std::chrono::nanoseconds t) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds maxEpochTime() const noexcept;
+
+ /**
+ * @brief Minimum time each epoch should take.
+ *
+ * Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see
+ * that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations().
+ *
+ * @see maxEpochTime(), minEpochIterations()
+ *
+ * @param t Minimum time each epoch should take.
+ */
+ Bench& minEpochTime(std::chrono::nanoseconds t) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds minEpochTime() const noexcept;
+
+ /**
+ * @brief Sets the minimum number of iterations each epoch should take.
+ *
+ * Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want
+ * to increase the minimum number or iterations, or increase the minEpochTime().
+ *
+ * @see minEpochTime(), maxEpochTime(), minEpochIterations()
+ *
+ * @param numIters Minimum number of iterations per epoch.
+ */
+ Bench& minEpochIterations(uint64_t numIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t minEpochIterations() const noexcept;
+
+ /**
+ * Sets exactly the number of iterations for each epoch. Ignores all other epoch limits. This forces nanobench to use exactly
+ * the given number of iterations for each epoch, not more and not less. Default is 0 (disabled).
+ *
+ * @param numIters Exact number of iterations to use. Set to 0 to disable.
+ */
+ Bench& epochIterations(uint64_t numIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t epochIterations() const noexcept;
+
+ /**
+ * @brief Sets a number of iterations that are initially performed without any measurements.
+ *
+ * Some benchmarks need a few evaluations to warm up caches / database / whatever access. Normally this should not be needed, since
+ * we show the median result so initial outliers will be filtered away automatically. If the warmup effect is large though, you
+ * might want to set it. Default is 0.
+ *
+ * @param numWarmupIters Number of warmup iterations.
+ */
+ Bench& warmup(uint64_t numWarmupIters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) uint64_t warmup() const noexcept;
+
+ /**
+ * @brief Marks the next run as the baseline.
+ *
+ * Call `relative(true)` to mark the run as the baseline. Successive runs will be compared to this run. It is calculated by
+ *
+ * @f[
+ * 100\% * \frac{baseline}{runtime}
+ * @f]
+ *
+ * * 100% means it is exactly as fast as the baseline
+ * * >100% means it is faster than the baseline. E.g. 200% means the current run is twice as fast as the baseline.
+ * * <100% means it is slower than the baseline. E.g. 50% means it is twice as slow as the baseline.
+ *
+ * See the tutorial section "Comparing Results" for example usage.
+ *
+ * @param isRelativeEnabled True to enable processing
+ */
+ Bench& relative(bool isRelativeEnabled) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool relative() const noexcept;
+
+ /**
+ * @brief Enables/disables performance counters.
+ *
+ * On Linux nanobench has a powerful feature to use performance counters. This enables counting of retired instructions, count
+ * number of branches, missed branches, etc. On default this is enabled, but you can disable it if you don't need that feature.
+ *
+ * @param showPerformanceCounters True to enable, false to disable.
+ */
+ Bench& performanceCounters(bool showPerformanceCounters) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool performanceCounters() const noexcept;
+
+ /**
+ * @brief Retrieves all benchmark results collected by the bench object so far.
+ *
+ * Each call to run() generates a Result that is stored within the Bench instance. This is mostly for advanced users who want to
+ * see all the nitty gritty detials.
+ *
+ * @return All results collected so far.
+ */
+ ANKERL_NANOBENCH(NODISCARD) std::vector<Result> const& results() const noexcept;
+
+ /*!
+ @verbatim embed:rst
+
+ Convenience shortcut to :cpp:func:`ankerl::nanobench::doNotOptimizeAway`.
+
+ @endverbatim
+ */
+ template <typename Arg>
+ Bench& doNotOptimizeAway(Arg&& arg);
+
+ /*!
+ @verbatim embed:rst
+
+ Sets N for asymptotic complexity calculation, so it becomes possible to calculate `Big O
+ <https://en.wikipedia.org/wiki/Big_O_notation>`_ from multiple benchmark evaluations.
+
+ Use :cpp:func:`ankerl::nanobench::Bench::complexityBigO` when the evaluation has finished. See the tutorial
+ :ref:`asymptotic-complexity` for details.
+
+ @endverbatim
+
+ @tparam T Any type is cast to `double`.
+ @param b Length of N for the next benchmark run, so it is possible to calculate `bigO`.
+ */
+ template <typename T>
+ Bench& complexityN(T b) noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept;
+
+ /*!
+ Calculates [Big O](https://en.wikipedia.org/wiki/Big_O_notation>) of the results with all preconfigured complexity functions.
+ Currently these complexity functions are fitted into the benchmark results:
+
+ @f$ \mathcal{O}(1) @f$,
+ @f$ \mathcal{O}(n) @f$,
+ @f$ \mathcal{O}(\log{}n) @f$,
+ @f$ \mathcal{O}(n\log{}n) @f$,
+ @f$ \mathcal{O}(n^2) @f$,
+ @f$ \mathcal{O}(n^3) @f$.
+
+ If we e.g. evaluate the complexity of `std::sort`, this is the result of `std::cout << bench.complexityBigO()`:
+
+ ```
+ | coefficient | err% | complexity
+ |--------------:|-------:|------------
+ | 5.08935e-09 | 2.6% | O(n log n)
+ | 6.10608e-08 | 8.0% | O(n)
+ | 1.29307e-11 | 47.2% | O(n^2)
+ | 2.48677e-15 | 69.6% | O(n^3)
+ | 9.88133e-06 | 132.3% | O(log n)
+ | 5.98793e-05 | 162.5% | O(1)
+ ```
+
+ So in this case @f$ \mathcal{O}(n\log{}n) @f$ provides the best approximation.
+
+ @verbatim embed:rst
+ See the tutorial :ref:`asymptotic-complexity` for details.
+ @endverbatim
+ @return Evaluation results, which can be printed or otherwise inspected.
+ */
+ std::vector<BigO> complexityBigO() const;
+
+ /**
+ * @brief Calculates bigO for a custom function.
+ *
+ * E.g. to calculate the mean squared error for @f$ \mathcal{O}(\log{}\log{}n) @f$, which is not part of the default set of
+ * complexityBigO(), you can do this:
+ *
+ * ```
+ * auto logLogN = bench.complexityBigO("O(log log n)", [](double n) {
+ * return std::log2(std::log2(n));
+ * });
+ * ```
+ *
+ * The resulting mean squared error can be printed with `std::cout << logLogN`. E.g. it prints something like this:
+ *
+ * ```text
+ * 2.46985e-05 * O(log log n), rms=1.48121
+ * ```
+ *
+ * @tparam Op Type of mapping operation.
+ * @param name Name for the function, e.g. "O(log log n)"
+ * @param op Op's operator() maps a `double` with the desired complexity function, e.g. `log2(log2(n))`.
+ * @return BigO Error calculation, which is streamable to std::cout.
+ */
+ template <typename Op>
+ BigO complexityBigO(char const* name, Op op) const;
+
+ template <typename Op>
+ BigO complexityBigO(std::string const& name, Op op) const;
+
+ /*!
+ @verbatim embed:rst
+
+ Convenience shortcut to :cpp:func:`ankerl::nanobench::render`.
+
+ @endverbatim
+ */
+ Bench& render(char const* templateContent, std::ostream& os);
+
+ Bench& config(Config const& benchmarkConfig);
+ ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept;
+
+private:
+ Config mConfig{};
+ std::vector<Result> mResults{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+/**
+ * @brief Makes sure none of the given arguments are optimized away by the compiler.
+ *
+ * @tparam Arg Type of the argument that shouldn't be optimized away.
+ * @param arg The input that we mark as being used, even though we don't do anything with it.
+ */
+template <typename Arg>
+void doNotOptimizeAway(Arg&& arg);
+
+namespace detail {
+
+#if defined(_MSC_VER)
+void doNotOptimizeAwaySink(void const*);
+
+template <typename T>
+void doNotOptimizeAway(T const& val);
+
+#else
+
+// see folly's Benchmark.h
+template <typename T>
+constexpr bool doNotOptimizeNeedsIndirect() {
+ using Decayed = typename std::decay<T>::type;
+ return !ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(Decayed) || sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value;
+}
+
+template <typename T>
+typename std::enable_if<!doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) {
+ // NOLINTNEXTLINE(hicpp-no-assembler)
+ asm volatile("" ::"r"(val));
+}
+
+template <typename T>
+typename std::enable_if<doNotOptimizeNeedsIndirect<T>()>::type doNotOptimizeAway(T const& val) {
+ // NOLINTNEXTLINE(hicpp-no-assembler)
+ asm volatile("" ::"m"(val) : "memory");
+}
+#endif
+
+// internally used, but visible because run() is templated.
+// Not movable/copy-able, so we simply use a pointer instead of unique_ptr. This saves us from
+// having to include <memory>, and the template instantiation overhead of unique_ptr which is unfortunately quite significant.
+ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH)
+class IterationLogic {
+public:
+ explicit IterationLogic(Bench const& config) noexcept;
+ ~IterationLogic();
+
+ ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept;
+ void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept;
+ void moveResultTo(std::vector<Result>& results) noexcept;
+
+private:
+ struct Impl;
+ Impl* mPimpl;
+};
+ANKERL_NANOBENCH(IGNORE_EFFCPP_POP)
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class PerformanceCounters {
+public:
+ PerformanceCounters(PerformanceCounters const&) = delete;
+ PerformanceCounters& operator=(PerformanceCounters const&) = delete;
+
+ PerformanceCounters();
+ ~PerformanceCounters();
+
+ void beginMeasure();
+ void endMeasure();
+ void updateResults(uint64_t numIters);
+
+ ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& val() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& has() const noexcept;
+
+private:
+#if ANKERL_NANOBENCH(PERF_COUNTERS)
+ LinuxPerformanceCounters* mPc = nullptr;
+#endif
+ PerfCountSet<uint64_t> mVal{};
+ PerfCountSet<bool> mHas{};
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Gets the singleton
+PerformanceCounters& performanceCounters();
+
+} // namespace detail
+
+class BigO {
+public:
+ using RangeMeasure = std::vector<std::pair<double, double>>;
+
+ template <typename Op>
+ static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op) {
+ for (auto& rangeMeasure : data) {
+ rangeMeasure.first = op(rangeMeasure.first);
+ }
+ return data;
+ }
+
+ static RangeMeasure collectRangeMeasure(std::vector<Result> const& results);
+
+ template <typename Op>
+ BigO(char const* bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+
+ template <typename Op>
+ BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+
+ BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure);
+ BigO(std::string const& bigOName, RangeMeasure const& scaledRangeMeasure);
+ ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) bool operator<(BigO const& other) const noexcept;
+
+private:
+ std::string mName{};
+ double mConstant{};
+ double mNormalizedRootMeanSquare{};
+};
+std::ostream& operator<<(std::ostream& os, BigO const& bigO);
+std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs);
+
+} // namespace nanobench
+} // namespace ankerl
+
+// implementation /////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+constexpr uint64_t(Rng::min)() {
+ return 0;
+}
+
+constexpr uint64_t(Rng::max)() {
+ return (std::numeric_limits<uint64_t>::max)();
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint64_t Rng::operator()() noexcept {
+ auto x = mX;
+
+ mX = UINT64_C(15241094284759029579) * mY;
+ mY = rotl(mY - x, 27);
+
+ return x;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint32_t Rng::bounded(uint32_t range) noexcept {
+ uint64_t r32 = static_cast<uint32_t>(operator()());
+ auto multiresult = r32 * range;
+ return static_cast<uint32_t>(multiresult >> 32U);
+}
+
+double Rng::uniform01() noexcept {
+ auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U);
+ // can't use union in c++ here for type puning, it's undefined behavior.
+ // std::memcpy is optimized anyways.
+ double d;
+ std::memcpy(&d, &i, sizeof(double));
+ return d - 1.0;
+}
+
+template <typename Container>
+void Rng::shuffle(Container& container) noexcept {
+ auto size = static_cast<uint32_t>(container.size());
+ for (auto i = size; i > 1U; --i) {
+ using std::swap;
+ auto p = bounded(i); // number in [0, i)
+ swap(container[i - 1], container[p]);
+ }
+}
+
+constexpr uint64_t Rng::rotl(uint64_t x, unsigned k) noexcept {
+ return (x << k) | (x >> (64U - k));
+}
+
+template <typename Op>
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+Bench& Bench::run(Op&& op) {
+ // It is important that this method is kept short so the compiler can do better optimizations/ inlining of op()
+ detail::IterationLogic iterationLogic(*this);
+ auto& pc = detail::performanceCounters();
+
+ while (auto n = iterationLogic.numIters()) {
+ pc.beginMeasure();
+ Clock::time_point before = Clock::now();
+ while (n-- > 0) {
+ op();
+ }
+ Clock::time_point after = Clock::now();
+ pc.endMeasure();
+ pc.updateResults(iterationLogic.numIters());
+ iterationLogic.add(after - before, pc);
+ }
+ iterationLogic.moveResultTo(mResults);
+ return *this;
+}
+
+// Performs all evaluations.
+template <typename Op>
+Bench& Bench::run(char const* benchmarkName, Op&& op) {
+ name(benchmarkName);
+ return run(std::forward<Op>(op));
+}
+
+template <typename Op>
+Bench& Bench::run(std::string const& benchmarkName, Op&& op) {
+ name(benchmarkName);
+ return run(std::forward<Op>(op));
+}
+
+template <typename Op>
+BigO Bench::complexityBigO(char const* benchmarkName, Op op) const {
+ return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op);
+}
+
+template <typename Op>
+BigO Bench::complexityBigO(std::string const& benchmarkName, Op op) const {
+ return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op);
+}
+
+// Set the batch size, e.g. number of processed bytes, or some other metric for the size of the processed data in each iteration.
+// Any argument is cast to double.
+template <typename T>
+Bench& Bench::batch(T b) noexcept {
+ mConfig.mBatch = static_cast<double>(b);
+ return *this;
+}
+
+// Sets the computation complexity of the next run. Any argument is cast to double.
+template <typename T>
+Bench& Bench::complexityN(T n) noexcept {
+ mConfig.mComplexityN = static_cast<double>(n);
+ return *this;
+}
+
+// Convenience: makes sure none of the given arguments are optimized away by the compiler.
+template <typename Arg>
+Bench& Bench::doNotOptimizeAway(Arg&& arg) {
+ detail::doNotOptimizeAway(std::forward<Arg>(arg));
+ return *this;
+}
+
+// Makes sure none of the given arguments are optimized away by the compiler.
+template <typename Arg>
+void doNotOptimizeAway(Arg&& arg) {
+ detail::doNotOptimizeAway(std::forward<Arg>(arg));
+}
+
+namespace detail {
+
+#if defined(_MSC_VER)
+template <typename T>
+void doNotOptimizeAway(T const& val) {
+ doNotOptimizeAwaySink(&val);
+}
+
+#endif
+
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+#if defined(ANKERL_NANOBENCH_IMPLEMENT)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// implementation part - only visible in .cpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+# include <algorithm> // sort, reverse
+# include <atomic> // compare_exchange_strong in loop overhead
+# include <cstdlib> // getenv
+# include <cstring> // strstr, strncmp
+# include <fstream> // ifstream to parse proc files
+# include <iomanip> // setw, setprecision
+# include <iostream> // cout
+# include <numeric> // accumulate
+# include <random> // random_device
+# include <sstream> // to_s in Number
+# include <stdexcept> // throw for rendering templates
+# include <tuple> // std::tie
+# if defined(__linux__)
+# include <unistd.h> //sysconf
+# endif
+# if ANKERL_NANOBENCH(PERF_COUNTERS)
+# include <map> // map
+
+# include <linux/perf_event.h>
+# include <sys/ioctl.h>
+# include <sys/syscall.h>
+# include <unistd.h>
+# endif
+
+// declarations ///////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+// helper stuff that is only intended to be used internally
+namespace detail {
+
+struct TableInfo;
+
+// formatting utilities
+namespace fmt {
+
+class NumSep;
+class StreamStateRestorer;
+class Number;
+class MarkDownColumn;
+class MarkDownCode;
+
+} // namespace fmt
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// definitions ////////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+uint64_t splitMix64(uint64_t& state) noexcept;
+
+namespace detail {
+
+// helpers to get double values
+template <typename T>
+inline double d(T t) noexcept {
+ return static_cast<double>(t);
+}
+inline double d(Clock::duration duration) noexcept {
+ return std::chrono::duration_cast<std::chrono::duration<double>>(duration).count();
+}
+
+// Calculates clock resolution once, and remembers the result
+inline Clock::duration clockResolution() noexcept;
+
+} // namespace detail
+
+namespace templates {
+
+char const* csv() noexcept {
+ return R"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total"
+{{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}}
+{{/result}})DELIM";
+}
+
+char const* htmlBoxplot() noexcept {
+ return R"DELIM(<html>
+
+<head>
+ <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
+</head>
+
+<body>
+ <div id="myDiv"></div>
+ <script>
+ var data = [
+ {{#result}}{
+ name: '{{name}}',
+ y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}],
+ },
+ {{/result}}
+ ];
+ var title = '{{title}}';
+
+ data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' }));
+ var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true});
+ </script>
+</body>
+
+</html>)DELIM";
+}
+
+char const* json() noexcept {
+ return R"DELIM({
+ "results": [
+{{#result}} {
+ "title": "{{title}}",
+ "name": "{{name}}",
+ "unit": "{{unit}}",
+ "batch": {{batch}},
+ "complexityN": {{complexityN}},
+ "epochs": {{epochs}},
+ "clockResolution": {{clockResolution}},
+ "clockResolutionMultiple": {{clockResolutionMultiple}},
+ "maxEpochTime": {{maxEpochTime}},
+ "minEpochTime": {{minEpochTime}},
+ "minEpochIterations": {{minEpochIterations}},
+ "epochIterations": {{epochIterations}},
+ "warmup": {{warmup}},
+ "relative": {{relative}},
+ "median(elapsed)": {{median(elapsed)}},
+ "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}},
+ "median(instructions)": {{median(instructions)}},
+ "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}},
+ "median(cpucycles)": {{median(cpucycles)}},
+ "median(contextswitches)": {{median(contextswitches)}},
+ "median(pagefaults)": {{median(pagefaults)}},
+ "median(branchinstructions)": {{median(branchinstructions)}},
+ "median(branchmisses)": {{median(branchmisses)}},
+ "totalTime": {{sumProduct(iterations, elapsed)}},
+ "measurements": [
+{{#measurement}} {
+ "iterations": {{iterations}},
+ "elapsed": {{elapsed}},
+ "pagefaults": {{pagefaults}},
+ "cpucycles": {{cpucycles}},
+ "contextswitches": {{contextswitches}},
+ "instructions": {{instructions}},
+ "branchinstructions": {{branchinstructions}},
+ "branchmisses": {{branchmisses}}
+ }{{^-last}},{{/-last}}
+{{/measurement}} ]
+ }{{^-last}},{{/-last}}
+{{/result}} ]
+})DELIM";
+}
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct Node {
+ enum class Type { tag, content, section, inverted_section };
+
+ char const* begin;
+ char const* end;
+ std::vector<Node> children;
+ Type type;
+
+ template <size_t N>
+ // NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
+ bool operator==(char const (&str)[N]) const noexcept {
+ return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
+ }
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+static std::vector<Node> parseMustacheTemplate(char const** tpl) {
+ std::vector<Node> nodes;
+
+ while (true) {
+ auto begin = std::strstr(*tpl, "{{");
+ auto end = begin;
+ if (begin != nullptr) {
+ begin += 2;
+ end = std::strstr(begin, "}}");
+ }
+
+ if (begin == nullptr || end == nullptr) {
+ // nothing found, finish node
+ nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
+ return nodes;
+ }
+
+ nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
+
+ // we found a tag
+ *tpl = end + 2;
+ switch (*begin) {
+ case '/':
+ // finished! bail out
+ return nodes;
+
+ case '#':
+ nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
+ break;
+
+ case '^':
+ nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
+ break;
+
+ default:
+ nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag});
+ break;
+ }
+ }
+}
+
+static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) {
+ bool matchFirst = n == "-first";
+ bool matchLast = n == "-last";
+ if (!matchFirst && !matchLast) {
+ return false;
+ }
+
+ bool doWrite = false;
+ if (n.type == Node::Type::section) {
+ doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1);
+ } else if (n.type == Node::Type::inverted_section) {
+ doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1);
+ }
+
+ if (doWrite) {
+ for (auto const& child : n.children) {
+ if (child.type == Node::Type::content) {
+ out.write(child.begin, std::distance(child.begin, child.end));
+ }
+ }
+ }
+ return true;
+}
+
+static bool matchCmdArgs(std::string const& str, std::vector<std::string>& matchResult) {
+ matchResult.clear();
+ auto idxOpen = str.find('(');
+ auto idxClose = str.find(')', idxOpen);
+ if (idxClose == std::string::npos) {
+ return false;
+ }
+
+ matchResult.emplace_back(str.substr(0, idxOpen));
+
+ // split by comma
+ matchResult.emplace_back(std::string{});
+ for (size_t i = idxOpen + 1; i != idxClose; ++i) {
+ if (str[i] == ' ' || str[i] == '\t') {
+ // skip whitespace
+ continue;
+ }
+ if (str[i] == ',') {
+ // got a comma => new string
+ matchResult.emplace_back(std::string{});
+ continue;
+ }
+ // no whitespace no comma, append
+ matchResult.back() += str[i];
+ }
+ return true;
+}
+
+static bool generateConfigTag(Node const& n, Config const& config, std::ostream& out) {
+ using detail::d;
+
+ if (n == "title") {
+ out << config.mBenchmarkTitle;
+ return true;
+ } else if (n == "name") {
+ out << config.mBenchmarkName;
+ return true;
+ } else if (n == "unit") {
+ out << config.mUnit;
+ return true;
+ } else if (n == "batch") {
+ out << config.mBatch;
+ return true;
+ } else if (n == "complexityN") {
+ out << config.mComplexityN;
+ return true;
+ } else if (n == "epochs") {
+ out << config.mNumEpochs;
+ return true;
+ } else if (n == "clockResolution") {
+ out << d(detail::clockResolution());
+ return true;
+ } else if (n == "clockResolutionMultiple") {
+ out << config.mClockResolutionMultiple;
+ return true;
+ } else if (n == "maxEpochTime") {
+ out << d(config.mMaxEpochTime);
+ return true;
+ } else if (n == "minEpochTime") {
+ out << d(config.mMinEpochTime);
+ return true;
+ } else if (n == "minEpochIterations") {
+ out << config.mMinEpochIterations;
+ return true;
+ } else if (n == "epochIterations") {
+ out << config.mEpochIterations;
+ return true;
+ } else if (n == "warmup") {
+ out << config.mWarmup;
+ return true;
+ } else if (n == "relative") {
+ out << config.mIsRelative;
+ return true;
+ }
+ return false;
+}
+
+static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) {
+ if (generateConfigTag(n, r.config(), out)) {
+ return out;
+ }
+ // match e.g. "median(elapsed)"
+ // g++ 4.8 doesn't implement std::regex :(
+ // static std::regex const regOpArg1("^([a-zA-Z]+)\\(([a-zA-Z]*)\\)$");
+ // std::cmatch matchResult;
+ // if (std::regex_match(n.begin, n.end, matchResult, regOpArg1)) {
+ std::vector<std::string> matchResult;
+ if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
+ if (matchResult.size() == 2) {
+ auto m = Result::fromString(matchResult[1]);
+ if (m == Result::Measure::_size) {
+ return out << 0.0;
+ }
+
+ if (matchResult[0] == "median") {
+ return out << r.median(m);
+ }
+ if (matchResult[0] == "average") {
+ return out << r.average(m);
+ }
+ if (matchResult[0] == "medianAbsolutePercentError") {
+ return out << r.medianAbsolutePercentError(m);
+ }
+ if (matchResult[0] == "sum") {
+ return out << r.sum(m);
+ }
+ if (matchResult[0] == "minimum") {
+ return out << r.minimum(m);
+ }
+ if (matchResult[0] == "maximum") {
+ return out << r.maximum(m);
+ }
+ } else if (matchResult.size() == 3) {
+ auto m1 = Result::fromString(matchResult[1]);
+ auto m2 = Result::fromString(matchResult[2]);
+ if (m1 == Result::Measure::_size || m2 == Result::Measure::_size) {
+ return out << 0.0;
+ }
+
+ if (matchResult[0] == "sumProduct") {
+ return out << r.sumProduct(m1, m2);
+ }
+ }
+ }
+
+ // match e.g. "sumProduct(elapsed, iterations)"
+ // static std::regex const regOpArg2("^([a-zA-Z]+)\\(([a-zA-Z]*)\\s*,\\s+([a-zA-Z]*)\\)$");
+
+ // nothing matches :(
+ throw std::runtime_error("command '" + std::string(n.begin, n.end) + "' not understood");
+}
+
+static void generateResultMeasurement(std::vector<Node> const& nodes, size_t idx, Result const& r, std::ostream& out) {
+ for (auto const& n : nodes) {
+ if (!generateFirstLast(n, idx, r.size(), out)) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case Node::Type::inverted_section:
+ throw std::runtime_error("got a inverted section inside measurement");
+
+ case Node::Type::section:
+ throw std::runtime_error("got a section inside measurement");
+
+ case Node::Type::tag: {
+ auto m = Result::fromString(std::string(n.begin, n.end));
+ if (m == Result::Measure::_size || !r.has(m)) {
+ out << 0.0;
+ } else {
+ out << r.get(idx, m);
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void generateResult(std::vector<Node> const& nodes, size_t idx, std::vector<Result> const& results, std::ostream& out) {
+ auto const& r = results[idx];
+ for (auto const& n : nodes) {
+ if (!generateFirstLast(n, idx, results.size(), out)) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case Node::Type::inverted_section:
+ throw std::runtime_error("got a inverted section inside result");
+
+ case Node::Type::section:
+ if (n == "measurement") {
+ for (size_t i = 0; i < r.size(); ++i) {
+ generateResultMeasurement(n.children, i, r, out);
+ }
+ } else {
+ throw std::runtime_error("got a section inside result");
+ }
+ break;
+
+ case Node::Type::tag:
+ generateResultTag(n, r, out);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace templates
+
+// helper stuff that only intended to be used internally
+namespace detail {
+
+char const* getEnv(char const* name);
+bool isEndlessRunning(std::string const& name);
+
+template <typename T>
+T parseFile(std::string const& filename);
+
+void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
+void printStabilityInformationOnce(std::ostream* os);
+
+// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
+uint64_t& singletonHeaderHash() noexcept;
+
+// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference.
+Clock::duration calcClockResolution(size_t numEvaluations) noexcept;
+
+// formatting utilities
+namespace fmt {
+
+// adds thousands separator to numbers
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class NumSep : public std::numpunct<char> {
+public:
+ explicit NumSep(char sep);
+ char do_thousands_sep() const override;
+ std::string do_grouping() const override;
+
+private:
+ char mSep;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// RAII to save & restore a stream's state
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class StreamStateRestorer {
+public:
+ explicit StreamStateRestorer(std::ostream& s);
+ ~StreamStateRestorer();
+
+ // sets back all stream info that we remembered at construction
+ void restore();
+
+ // don't allow copying / moving
+ StreamStateRestorer(StreamStateRestorer const&) = delete;
+ StreamStateRestorer& operator=(StreamStateRestorer const&) = delete;
+ StreamStateRestorer(StreamStateRestorer&&) = delete;
+ StreamStateRestorer& operator=(StreamStateRestorer&&) = delete;
+
+private:
+ std::ostream& mStream;
+ std::locale mLocale;
+ std::streamsize const mPrecision;
+ std::streamsize const mWidth;
+ std::ostream::char_type const mFill;
+ std::ostream::fmtflags const mFmtFlags;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+// Number formatter
+class Number {
+public:
+ Number(int width, int precision, double value);
+ Number(int width, int precision, int64_t value);
+ std::string to_s() const;
+
+private:
+ friend std::ostream& operator<<(std::ostream& os, Number const& n);
+ std::ostream& write(std::ostream& os) const;
+
+ int mWidth;
+ int mPrecision;
+ double mValue;
+};
+
+// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent
+std::string to_s(uint64_t s);
+
+std::ostream& operator<<(std::ostream& os, Number const& n);
+
+class MarkDownColumn {
+public:
+ MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val);
+ std::string title() const;
+ std::string separator() const;
+ std::string invalid() const;
+ std::string value() const;
+
+private:
+ int mWidth;
+ int mPrecision;
+ std::string mTitle;
+ std::string mSuffix;
+ double mValue;
+};
+
+// Formats any text as markdown code, escaping backticks.
+class MarkDownCode {
+public:
+ explicit MarkDownCode(std::string const& what);
+
+private:
+ friend std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
+ std::ostream& write(std::ostream& os) const;
+
+ std::string mWhat{};
+};
+
+std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
+
+} // namespace fmt
+} // namespace detail
+} // namespace nanobench
+} // namespace ankerl
+
+// implementation /////////////////////////////////////////////////////////////////////////////////
+
+namespace ankerl {
+namespace nanobench {
+
+void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) {
+ detail::fmt::StreamStateRestorer restorer(out);
+
+ out.precision(std::numeric_limits<double>::digits10);
+ auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
+
+ for (auto const& n : nodes) {
+ ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
+ switch (n.type) {
+ case templates::Node::Type::content:
+ out.write(n.begin, std::distance(n.begin, n.end));
+ break;
+
+ case templates::Node::Type::inverted_section:
+ throw std::runtime_error("unknown list '" + std::string(n.begin, n.end) + "'");
+
+ case templates::Node::Type::section:
+ if (n == "result") {
+ const size_t nbResults = results.size();
+ for (size_t i = 0; i < nbResults; ++i) {
+ generateResult(n.children, i, results, out);
+ }
+ } else {
+ throw std::runtime_error("unknown section '" + std::string(n.begin, n.end) + "'");
+ }
+ break;
+
+ case templates::Node::Type::tag:
+ // This just uses the last result's config.
+ if (!generateConfigTag(n, results.back().config(), out)) {
+ throw std::runtime_error("unknown tag '" + std::string(n.begin, n.end) + "'");
+ }
+ break;
+ }
+ }
+}
+
+void render(char const* mustacheTemplate, const Bench& bench, std::ostream& out) {
+ render(mustacheTemplate, bench.results(), out);
+}
+
+namespace detail {
+
+PerformanceCounters& performanceCounters() {
+# if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wexit-time-destructors"
+# endif
+ static PerformanceCounters pc;
+# if defined(__clang__)
+# pragma clang diagnostic pop
+# endif
+ return pc;
+}
+
+// Windows version of doNotOptimizeAway
+// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
+// see https://github.com/facebook/folly/blob/master/folly/Benchmark.h#L280
+// see https://docs.microsoft.com/en-us/cpp/preprocessor/optimize
+# if defined(_MSC_VER)
+# pragma optimize("", off)
+void doNotOptimizeAwaySink(void const*) {}
+# pragma optimize("", on)
+# endif
+
+template <typename T>
+T parseFile(std::string const& filename) {
+ std::ifstream fin(filename);
+ T num{};
+ fin >> num;
+ return num;
+}
+
+char const* getEnv(char const* name) {
+# if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe.
+# endif
+ return std::getenv(name);
+# if defined(_MSC_VER)
+# pragma warning(pop)
+# endif
+}
+
+bool isEndlessRunning(std::string const& name) {
+ auto endless = getEnv("NANOBENCH_ENDLESS");
+ return nullptr != endless && endless == name;
+}
+
+void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) {
+ warnings.clear();
+ recommendations.clear();
+
+ bool recommendCheckFlags = false;
+
+# if defined(DEBUG)
+ warnings.emplace_back("DEBUG defined");
+ recommendCheckFlags = true;
+# endif
+
+ bool recommendPyPerf = false;
+# if defined(__linux__)
+ auto nprocs = sysconf(_SC_NPROCESSORS_CONF);
+ if (nprocs <= 0) {
+ warnings.emplace_back("couldn't figure out number of processors - no governor, turbo check possible");
+ } else {
+
+ // check frequency scaling
+ for (long id = 0; id < nprocs; ++id) {
+ auto idStr = detail::fmt::to_s(static_cast<uint64_t>(id));
+ auto sysCpu = "/sys/devices/system/cpu/cpu" + idStr;
+ auto minFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_min_freq");
+ auto maxFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_max_freq");
+ if (minFreq != maxFreq) {
+ auto minMHz = static_cast<double>(minFreq) / 1000.0;
+ auto maxMHz = static_cast<double>(maxFreq) / 1000.0;
+ warnings.emplace_back("CPU frequency scaling enabled: CPU " + idStr + " between " +
+ detail::fmt::Number(1, 1, minMHz).to_s() + " and " + detail::fmt::Number(1, 1, maxMHz).to_s() +
+ " MHz");
+ recommendPyPerf = true;
+ break;
+ }
+ }
+
+ auto currentGovernor = parseFile<std::string>("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
+ if ("performance" != currentGovernor) {
+ warnings.emplace_back("CPU governor is '" + currentGovernor + "' but should be 'performance'");
+ recommendPyPerf = true;
+ }
+
+ if (0 == parseFile<int>("/sys/devices/system/cpu/intel_pstate/no_turbo")) {
+ warnings.emplace_back("Turbo is enabled, CPU frequency will fluctuate");
+ recommendPyPerf = true;
+ }
+ }
+# endif
+
+ if (recommendCheckFlags) {
+ recommendations.emplace_back("Make sure you compile for Release");
+ }
+ if (recommendPyPerf) {
+ recommendations.emplace_back("Use 'pyperf system tune' before benchmarking. See https://github.com/vstinner/pyperf");
+ }
+}
+
+void printStabilityInformationOnce(std::ostream* outStream) {
+ static bool shouldPrint = true;
+ if (shouldPrint && outStream) {
+ auto& os = *outStream;
+ shouldPrint = false;
+ std::vector<std::string> warnings;
+ std::vector<std::string> recommendations;
+ gatherStabilityInformation(warnings, recommendations);
+ if (warnings.empty()) {
+ return;
+ }
+
+ os << "Warning, results might be unstable:" << std::endl;
+ for (auto const& w : warnings) {
+ os << "* " << w << std::endl;
+ }
+
+ os << std::endl << "Recommendations" << std::endl;
+ for (auto const& r : recommendations) {
+ os << "* " << r << std::endl;
+ }
+ }
+}
+
+// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
+uint64_t& singletonHeaderHash() noexcept {
+ static uint64_t sHeaderHash{};
+ return sHeaderHash;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+inline uint64_t fnv1a(std::string const& str) noexcept {
+ auto val = UINT64_C(14695981039346656037);
+ for (auto c : str) {
+ val = (val ^ static_cast<uint8_t>(c)) * UINT64_C(1099511628211);
+ }
+ return val;
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+inline uint64_t hash_combine(uint64_t seed, uint64_t val) {
+ return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U));
+}
+
+// determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference.
+Clock::duration calcClockResolution(size_t numEvaluations) noexcept {
+ auto bestDuration = Clock::duration::max();
+ Clock::time_point tBegin;
+ Clock::time_point tEnd;
+ for (size_t i = 0; i < numEvaluations; ++i) {
+ tBegin = Clock::now();
+ do {
+ tEnd = Clock::now();
+ } while (tBegin == tEnd);
+ bestDuration = (std::min)(bestDuration, tEnd - tBegin);
+ }
+ return bestDuration;
+}
+
+// Calculates clock resolution once, and remembers the result
+Clock::duration clockResolution() noexcept {
+ static Clock::duration sResolution = calcClockResolution(20);
+ return sResolution;
+}
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+struct IterationLogic::Impl {
+ enum class State { warmup, upscaling_runtime, measuring, endless };
+
+ explicit Impl(Bench const& bench)
+ : mBench(bench)
+ , mResult(bench.config()) {
+ printStabilityInformationOnce(mBench.output());
+
+ // determine target runtime per epoch
+ mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple();
+ if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) {
+ mTargetRuntimePerEpoch = mBench.maxEpochTime();
+ }
+ if (mTargetRuntimePerEpoch < mBench.minEpochTime()) {
+ mTargetRuntimePerEpoch = mBench.minEpochTime();
+ }
+
+ if (isEndlessRunning(mBench.name())) {
+ std::cerr << "NANOBENCH_ENDLESS set: running '" << mBench.name() << "' endlessly" << std::endl;
+ mNumIters = (std::numeric_limits<uint64_t>::max)();
+ mState = State::endless;
+ } else if (0 != mBench.warmup()) {
+ mNumIters = mBench.warmup();
+ mState = State::warmup;
+ } else if (0 != mBench.epochIterations()) {
+ // exact number of iterations
+ mNumIters = mBench.epochIterations();
+ mState = State::measuring;
+ } else {
+ mNumIters = mBench.minEpochIterations();
+ mState = State::upscaling_runtime;
+ }
+ }
+
+ // directly calculates new iters based on elapsed&iters, and adds a 10% noise. Makes sure we don't underflow.
+ ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept {
+ auto doubleElapsed = d(elapsed);
+ auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch);
+ auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters);
+
+ auto doubleMinEpochIters = d(mBench.minEpochIterations());
+ if (doubleNewIters < doubleMinEpochIters) {
+ doubleNewIters = doubleMinEpochIters;
+ }
+ doubleNewIters *= 1.0 + 0.2 * mRng.uniform01();
+
+ // +0.5 for correct rounding when casting
+ // NOLINTNEXTLINE(bugprone-incorrect-roundings)
+ return static_cast<uint64_t>(doubleNewIters + 0.5);
+ }
+
+ ANKERL_NANOBENCH_NO_SANITIZE("integer") void upscale(std::chrono::nanoseconds elapsed) {
+ if (elapsed * 10 < mTargetRuntimePerEpoch) {
+ // we are far below the target runtime. Multiply iterations by 10 (with overflow check)
+ if (mNumIters * 10 < mNumIters) {
+ // overflow :-(
+ showResult("iterations overflow. Maybe your code got optimized away?");
+ mNumIters = 0;
+ return;
+ }
+ mNumIters *= 10;
+ } else {
+ mNumIters = calcBestNumIters(elapsed, mNumIters);
+ }
+ }
+
+ void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept {
+# if defined(ANKERL_NANOBENCH_LOG_ENABLED)
+ auto oldIters = mNumIters;
+# endif
+
+ switch (mState) {
+ case State::warmup:
+ if (isCloseEnoughForMeasurements(elapsed)) {
+ // if elapsed is close enough, we can skip upscaling and go right to measurements
+ // still, we don't add the result to the measurements.
+ mState = State::measuring;
+ mNumIters = calcBestNumIters(elapsed, mNumIters);
+ } else {
+ // not close enough: switch to upscaling
+ mState = State::upscaling_runtime;
+ upscale(elapsed);
+ }
+ break;
+
+ case State::upscaling_runtime:
+ if (isCloseEnoughForMeasurements(elapsed)) {
+ // if we are close enough, add measurement and switch to always measuring
+ mState = State::measuring;
+ mTotalElapsed += elapsed;
+ mTotalNumIters += mNumIters;
+ mResult.add(elapsed, mNumIters, pc);
+ mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
+ } else {
+ upscale(elapsed);
+ }
+ break;
+
+ case State::measuring:
+ // just add measurements - no questions asked. Even when runtime is low. But we can't ignore
+ // that fluctuation, or else we would bias the result
+ mTotalElapsed += elapsed;
+ mTotalNumIters += mNumIters;
+ mResult.add(elapsed, mNumIters, pc);
+ if (0 != mBench.epochIterations()) {
+ mNumIters = mBench.epochIterations();
+ } else {
+ mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
+ }
+ break;
+
+ case State::endless:
+ mNumIters = (std::numeric_limits<uint64_t>::max)();
+ break;
+ }
+
+ if (static_cast<uint64_t>(mResult.size()) == mBench.epochs()) {
+ // we got all the results that we need, finish it
+ showResult("");
+ mNumIters = 0;
+ }
+
+ ANKERL_NANOBENCH_LOG(mBench.name() << ": " << detail::fmt::Number(20, 3, static_cast<double>(elapsed.count())) << " elapsed, "
+ << detail::fmt::Number(20, 3, static_cast<double>(mTargetRuntimePerEpoch.count()))
+ << " target. oldIters=" << oldIters << ", mNumIters=" << mNumIters
+ << ", mState=" << static_cast<int>(mState));
+ }
+
+ void showResult(std::string const& errorMessage) const {
+ ANKERL_NANOBENCH_LOG(errorMessage);
+
+ if (mBench.output() != nullptr) {
+ // prepare column data ///////
+ std::vector<fmt::MarkDownColumn> columns;
+
+ auto rMedian = mResult.median(Result::Measure::elapsed);
+
+ if (mBench.relative()) {
+ double d = 100.0;
+ if (!mBench.results().empty()) {
+ d = rMedian <= 0.0 ? 0.0 : mBench.results().front().median(Result::Measure::elapsed) / rMedian * 100.0;
+ }
+ columns.emplace_back(11, 1, "relative", "%", d);
+ }
+
+ if (mBench.complexityN() > 0) {
+ columns.emplace_back(14, 0, "complexityN", "", mBench.complexityN());
+ }
+
+ columns.emplace_back(22, 2, "ns/" + mBench.unit(), "", 1e9 * rMedian / mBench.batch());
+ columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
+
+ double rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
+ columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0);
+
+ double rInsMedian = -1.0;
+ if (mResult.has(Result::Measure::instructions)) {
+ rInsMedian = mResult.median(Result::Measure::instructions);
+ columns.emplace_back(18, 2, "ins/" + mBench.unit(), "", rInsMedian / mBench.batch());
+ }
+
+ double rCycMedian = -1.0;
+ if (mResult.has(Result::Measure::cpucycles)) {
+ rCycMedian = mResult.median(Result::Measure::cpucycles);
+ columns.emplace_back(18, 2, "cyc/" + mBench.unit(), "", rCycMedian / mBench.batch());
+ }
+ if (rInsMedian > 0.0 && rCycMedian > 0.0) {
+ columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
+ }
+ if (mResult.has(Result::Measure::branchinstructions)) {
+ double rBraMedian = mResult.median(Result::Measure::branchinstructions);
+ columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch());
+ if (mResult.has(Result::Measure::branchmisses)) {
+ double p = 0.0;
+ if (rBraMedian >= 1e-9) {
+ p = 100.0 * mResult.median(Result::Measure::branchmisses) / rBraMedian;
+ }
+ columns.emplace_back(10, 1, "miss%", "%", p);
+ }
+ }
+
+ columns.emplace_back(12, 2, "total", "", mResult.sum(Result::Measure::elapsed));
+
+ // write everything
+ auto& os = *mBench.output();
+
+ uint64_t hash = 0;
+ hash = hash_combine(fnv1a(mBench.unit()), hash);
+ hash = hash_combine(fnv1a(mBench.title()), hash);
+ hash = hash_combine(mBench.relative(), hash);
+ hash = hash_combine(mBench.performanceCounters(), hash);
+
+ if (hash != singletonHeaderHash()) {
+ singletonHeaderHash() = hash;
+
+ // no result yet, print header
+ os << std::endl;
+ for (auto const& col : columns) {
+ os << col.title();
+ }
+ os << "| " << mBench.title() << std::endl;
+
+ for (auto const& col : columns) {
+ os << col.separator();
+ }
+ os << "|:" << std::string(mBench.title().size() + 1U, '-') << std::endl;
+ }
+
+ if (!errorMessage.empty()) {
+ for (auto const& col : columns) {
+ os << col.invalid();
+ }
+ os << "| :boom: " << fmt::MarkDownCode(mBench.name()) << " (" << errorMessage << ')' << std::endl;
+ } else {
+ for (auto const& col : columns) {
+ os << col.value();
+ }
+ os << "| ";
+ auto showUnstable = rErrorMedian >= 0.05;
+ if (showUnstable) {
+ os << ":wavy_dash: ";
+ }
+ os << fmt::MarkDownCode(mBench.name());
+ if (showUnstable) {
+ auto avgIters = static_cast<double>(mTotalNumIters) / static_cast<double>(mBench.epochs());
+ // NOLINTNEXTLINE(bugprone-incorrect-roundings)
+ auto suggestedIters = static_cast<uint64_t>(avgIters * 10 + 0.5);
+
+ os << " (Unstable with ~" << detail::fmt::Number(1, 1, avgIters)
+ << " iters. Increase `minEpochIterations` to e.g. " << suggestedIters << ")";
+ }
+ os << std::endl;
+ }
+ }
+ }
+
+ ANKERL_NANOBENCH(NODISCARD) bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed) const noexcept {
+ return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
+ }
+
+ uint64_t mNumIters = 1;
+ Bench const& mBench;
+ std::chrono::nanoseconds mTargetRuntimePerEpoch{};
+ Result mResult;
+ Rng mRng{123};
+ std::chrono::nanoseconds mTotalElapsed{};
+ uint64_t mTotalNumIters = 0;
+
+ State mState = State::upscaling_runtime;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+IterationLogic::IterationLogic(Bench const& bench) noexcept
+ : mPimpl(new Impl(bench)) {}
+
+IterationLogic::~IterationLogic() {
+ if (mPimpl) {
+ delete mPimpl;
+ }
+}
+
+uint64_t IterationLogic::numIters() const noexcept {
+ ANKERL_NANOBENCH_LOG(mPimpl->mBench.name() << ": mNumIters=" << mPimpl->mNumIters);
+ return mPimpl->mNumIters;
+}
+
+void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept {
+ mPimpl->add(elapsed, pc);
+}
+
+void IterationLogic::moveResultTo(std::vector<Result>& results) noexcept {
+ results.emplace_back(std::move(mPimpl->mResult));
+}
+
+# if ANKERL_NANOBENCH(PERF_COUNTERS)
+
+ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
+class LinuxPerformanceCounters {
+public:
+ struct Target {
+ Target(uint64_t* targetValue_, bool correctMeasuringOverhead_, bool correctLoopOverhead_)
+ : targetValue(targetValue_)
+ , correctMeasuringOverhead(correctMeasuringOverhead_)
+ , correctLoopOverhead(correctLoopOverhead_) {}
+
+ uint64_t* targetValue{};
+ bool correctMeasuringOverhead{};
+ bool correctLoopOverhead{};
+ };
+
+ ~LinuxPerformanceCounters();
+
+ // quick operation
+ inline void start() {}
+
+ inline void stop() {}
+
+ bool monitor(perf_sw_ids swId, Target target);
+ bool monitor(perf_hw_id hwId, Target target);
+
+ bool hasError() const noexcept {
+ return mHasError;
+ }
+
+ // Just reading data is faster than enable & disabling.
+ // we subtract data ourselves.
+ inline void beginMeasure() {
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
+ }
+
+ inline void endMeasure() {
+ if (mHasError) {
+ return;
+ }
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
+ if (mHasError) {
+ return;
+ }
+
+ auto const numBytes = sizeof(uint64_t) * mCounters.size();
+ auto ret = read(mFd, mCounters.data(), numBytes);
+ mHasError = ret != static_cast<ssize_t>(numBytes);
+ }
+
+ void updateResults(uint64_t numIters);
+
+ // rounded integer division
+ template <typename T>
+ static inline T divRounded(T a, T divisor) {
+ return (a + divisor / 2) / divisor;
+ }
+
+ template <typename Op>
+ ANKERL_NANOBENCH_NO_SANITIZE("integer")
+ void calibrate(Op&& op) {
+ // clear current calibration data,
+ for (auto& v : mCalibratedOverhead) {
+ v = UINT64_C(0);
+ }
+
+ // create new calibration data
+ auto newCalibration = mCalibratedOverhead;
+ for (auto& v : newCalibration) {
+ v = (std::numeric_limits<uint64_t>::max)();
+ }
+ for (size_t iter = 0; iter < 100; ++iter) {
+ beginMeasure();
+ op();
+ endMeasure();
+ if (mHasError) {
+ return;
+ }
+
+ for (size_t i = 0; i < newCalibration.size(); ++i) {
+ auto diff = mCounters[i];
+ if (newCalibration[i] > diff) {
+ newCalibration[i] = diff;
+ }
+ }
+ }
+
+ mCalibratedOverhead = std::move(newCalibration);
+
+ {
+ // calibrate loop overhead. For branches & instructions this makes sense, not so much for everything else like cycles.
+ // marsaglia's xorshift: mov, sal/shr, xor. Times 3.
+ // This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further.
+ // see https://godbolt.org/z/49RVQ5
+ uint64_t const numIters = 100000U + (std::random_device{}() & 3);
+ uint64_t n = numIters;
+ uint32_t x = 1234567;
+ auto fn = [&]() {
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ };
+
+ beginMeasure();
+ while (n-- > 0) {
+ fn();
+ }
+ endMeasure();
+ detail::doNotOptimizeAway(x);
+ auto measure1 = mCounters;
+
+ n = numIters;
+ beginMeasure();
+ while (n-- > 0) {
+ // we now run *twice* so we can easily calculate the overhead
+ fn();
+ fn();
+ }
+ endMeasure();
+ detail::doNotOptimizeAway(x);
+ auto measure2 = mCounters;
+
+ for (size_t i = 0; i < mCounters.size(); ++i) {
+ // factor 2 because we have two instructions per loop
+ auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0;
+ auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0;
+ auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0;
+
+ mLoopOverhead[i] = divRounded(overhead, numIters);
+ }
+ }
+ }
+
+private:
+ bool monitor(uint32_t type, uint64_t eventid, Target target);
+
+ std::map<uint64_t, Target> mIdToTarget{};
+
+ // start with minimum size of 3 for read_format
+ std::vector<uint64_t> mCounters{3};
+ std::vector<uint64_t> mCalibratedOverhead{3};
+ std::vector<uint64_t> mLoopOverhead{3};
+
+ uint64_t mTimeEnabledNanos = 0;
+ uint64_t mTimeRunningNanos = 0;
+ int mFd = -1;
+ bool mHasError = false;
+};
+ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+
+LinuxPerformanceCounters::~LinuxPerformanceCounters() {
+ if (-1 != mFd) {
+ close(mFd);
+ }
+}
+
+bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) {
+ return monitor(PERF_TYPE_SOFTWARE, swId, target);
+}
+
+bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) {
+ return monitor(PERF_TYPE_HARDWARE, hwId, target);
+}
+
+// overflow is ok, it's checked
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+void LinuxPerformanceCounters::updateResults(uint64_t numIters) {
+ // clear old data
+ for (auto& id_value : mIdToTarget) {
+ *id_value.second.targetValue = UINT64_C(0);
+ }
+
+ if (mHasError) {
+ return;
+ }
+
+ mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1];
+ mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2];
+
+ for (uint64_t i = 0; i < mCounters[0]; ++i) {
+ auto idx = static_cast<size_t>(3 + i * 2 + 0);
+ auto id = mCounters[idx + 1U];
+
+ auto it = mIdToTarget.find(id);
+ if (it != mIdToTarget.end()) {
+
+ auto& tgt = it->second;
+ *tgt.targetValue = mCounters[idx];
+ if (tgt.correctMeasuringOverhead) {
+ if (*tgt.targetValue >= mCalibratedOverhead[idx]) {
+ *tgt.targetValue -= mCalibratedOverhead[idx];
+ } else {
+ *tgt.targetValue = 0U;
+ }
+ }
+ if (tgt.correctLoopOverhead) {
+ auto correctionVal = mLoopOverhead[idx] * numIters;
+ if (*tgt.targetValue >= correctionVal) {
+ *tgt.targetValue -= correctionVal;
+ } else {
+ *tgt.targetValue = 0U;
+ }
+ }
+ }
+ }
+}
+
+bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) {
+ *target.targetValue = (std::numeric_limits<uint64_t>::max)();
+ if (mHasError) {
+ return false;
+ }
+
+ auto pea = perf_event_attr();
+ std::memset(&pea, 0, sizeof(perf_event_attr));
+ pea.type = type;
+ pea.size = sizeof(perf_event_attr);
+ pea.config = eventid;
+ pea.disabled = 1; // start counter as disabled
+ pea.exclude_kernel = 1;
+ pea.exclude_hv = 1;
+
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ const int pid = 0; // the current process
+ const int cpu = -1; // all CPUs
+# if defined(PERF_FLAG_FD_CLOEXEC) // since Linux 3.14
+ const unsigned long flags = PERF_FLAG_FD_CLOEXEC;
+# else
+ const unsigned long flags = 0;
+# endif
+
+ auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags));
+ if (-1 == fd) {
+ return false;
+ }
+ if (-1 == mFd) {
+ // first call: set to fd, and use this from now on
+ mFd = fd;
+ }
+ uint64_t id = 0;
+ // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) {
+ // couldn't get id
+ return false;
+ }
+
+ // insert into map, rely on the fact that map's references are constant.
+ mIdToTarget.emplace(id, target);
+
+ // prepare readformat with the correct size (after the insert)
+ auto size = 3 + 2 * mIdToTarget.size();
+ mCounters.resize(size);
+ mCalibratedOverhead.resize(size);
+ mLoopOverhead.resize(size);
+
+ return true;
+}
+
+PerformanceCounters::PerformanceCounters()
+ : mPc(new LinuxPerformanceCounters())
+ , mVal()
+ , mHas() {
+
+ mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults, true, false));
+ mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false));
+ mHas.contextSwitches =
+ mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches, true, false));
+ mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions, true, true));
+ mHas.branchInstructions =
+ mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions, true, false));
+ mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses, true, false));
+ // mHas.branchMisses = false;
+
+ mPc->start();
+ mPc->calibrate([] {
+ auto before = ankerl::nanobench::Clock::now();
+ auto after = ankerl::nanobench::Clock::now();
+ (void)before;
+ (void)after;
+ });
+
+ if (mPc->hasError()) {
+ // something failed, don't monitor anything.
+ mHas = PerfCountSet<bool>{};
+ }
+}
+
+PerformanceCounters::~PerformanceCounters() {
+ if (nullptr != mPc) {
+ delete mPc;
+ }
+}
+
+void PerformanceCounters::beginMeasure() {
+ mPc->beginMeasure();
+}
+
+void PerformanceCounters::endMeasure() {
+ mPc->endMeasure();
+}
+
+void PerformanceCounters::updateResults(uint64_t numIters) {
+ mPc->updateResults(numIters);
+}
+
+# else
+
+PerformanceCounters::PerformanceCounters() = default;
+PerformanceCounters::~PerformanceCounters() = default;
+void PerformanceCounters::beginMeasure() {}
+void PerformanceCounters::endMeasure() {}
+void PerformanceCounters::updateResults(uint64_t) {}
+
+# endif
+
+ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& PerformanceCounters::val() const noexcept {
+ return mVal;
+}
+ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& PerformanceCounters::has() const noexcept {
+ return mHas;
+}
+
+// formatting utilities
+namespace fmt {
+
+// adds thousands separator to numbers
+NumSep::NumSep(char sep)
+ : mSep(sep) {}
+
+char NumSep::do_thousands_sep() const {
+ return mSep;
+}
+
+std::string NumSep::do_grouping() const {
+ return "\003";
+}
+
+// RAII to save & restore a stream's state
+StreamStateRestorer::StreamStateRestorer(std::ostream& s)
+ : mStream(s)
+ , mLocale(s.getloc())
+ , mPrecision(s.precision())
+ , mWidth(s.width())
+ , mFill(s.fill())
+ , mFmtFlags(s.flags()) {}
+
+StreamStateRestorer::~StreamStateRestorer() {
+ restore();
+}
+
+// sets back all stream info that we remembered at construction
+void StreamStateRestorer::restore() {
+ mStream.imbue(mLocale);
+ mStream.precision(mPrecision);
+ mStream.width(mWidth);
+ mStream.fill(mFill);
+ mStream.flags(mFmtFlags);
+}
+
+Number::Number(int width, int precision, int64_t value)
+ : mWidth(width)
+ , mPrecision(precision)
+ , mValue(static_cast<double>(value)) {}
+
+Number::Number(int width, int precision, double value)
+ : mWidth(width)
+ , mPrecision(precision)
+ , mValue(value) {}
+
+std::ostream& Number::write(std::ostream& os) const {
+ StreamStateRestorer restorer(os);
+ os.imbue(std::locale(os.getloc(), new NumSep(',')));
+ os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
+ return os;
+}
+
+std::string Number::to_s() const {
+ std::stringstream ss;
+ write(ss);
+ return ss.str();
+}
+
+std::string to_s(uint64_t n) {
+ std::string str;
+ do {
+ str += static_cast<char>('0' + static_cast<char>(n % 10));
+ n /= 10;
+ } while (n != 0);
+ std::reverse(str.begin(), str.end());
+ return str;
+}
+
+std::ostream& operator<<(std::ostream& os, Number const& n) {
+ return n.write(os);
+}
+
+MarkDownColumn::MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val)
+ : mWidth(w)
+ , mPrecision(prec)
+ , mTitle(tit)
+ , mSuffix(suff)
+ , mValue(val) {}
+
+std::string MarkDownColumn::title() const {
+ std::stringstream ss;
+ ss << '|' << std::setw(mWidth - 2) << std::right << mTitle << ' ';
+ return ss.str();
+}
+
+std::string MarkDownColumn::separator() const {
+ std::string sep(static_cast<size_t>(mWidth), '-');
+ sep.front() = '|';
+ sep.back() = ':';
+ return sep;
+}
+
+std::string MarkDownColumn::invalid() const {
+ std::string sep(static_cast<size_t>(mWidth), ' ');
+ sep.front() = '|';
+ sep[sep.size() - 2] = '-';
+ return sep;
+}
+
+std::string MarkDownColumn::value() const {
+ std::stringstream ss;
+ auto width = mWidth - 2 - static_cast<int>(mSuffix.size());
+ ss << '|' << Number(width, mPrecision, mValue) << mSuffix << ' ';
+ return ss.str();
+}
+
+// Formats any text as markdown code, escaping backticks.
+MarkDownCode::MarkDownCode(std::string const& what) {
+ mWhat.reserve(what.size() + 2);
+ mWhat.push_back('`');
+ for (char c : what) {
+ mWhat.push_back(c);
+ if ('`' == c) {
+ mWhat.push_back('`');
+ }
+ }
+ mWhat.push_back('`');
+}
+
+std::ostream& MarkDownCode::write(std::ostream& os) const {
+ return os << mWhat;
+}
+
+std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) {
+ return mdCode.write(os);
+}
+} // namespace fmt
+} // namespace detail
+
+// provide implementation here so it's only generated once
+Config::Config() = default;
+Config::~Config() = default;
+Config& Config::operator=(Config const&) = default;
+Config& Config::operator=(Config&&) = default;
+Config::Config(Config const&) = default;
+Config::Config(Config&&) noexcept = default;
+
+// provide implementation here so it's only generated once
+Result::~Result() = default;
+Result& Result::operator=(Result const&) = default;
+Result& Result::operator=(Result&&) = default;
+Result::Result(Result const&) = default;
+Result::Result(Result&&) noexcept = default;
+
+namespace detail {
+template <typename T>
+inline constexpr typename std::underlying_type<T>::type u(T val) noexcept {
+ return static_cast<typename std::underlying_type<T>::type>(val);
+}
+} // namespace detail
+
+// Result returned after a benchmark has finished. Can be used as a baseline for relative().
+Result::Result(Config const& benchmarkConfig)
+ : mConfig(benchmarkConfig)
+ , mNameToMeasurements{detail::u(Result::Measure::_size)} {}
+
+void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) {
+ using detail::d;
+ using detail::u;
+
+ double dIters = d(iters);
+ mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
+
+ mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
+ if (pc.has().pageFaults) {
+ mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters);
+ }
+ if (pc.has().cpuCycles) {
+ mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters);
+ }
+ if (pc.has().contextSwitches) {
+ mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters);
+ }
+ if (pc.has().instructions) {
+ mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters);
+ }
+ if (pc.has().branchInstructions) {
+ double branchInstructions = 0.0;
+ // correcting branches: remove branch introduced by the while (...) loop for each iteration.
+ if (pc.val().branchInstructions > iters + 1U) {
+ branchInstructions = d(pc.val().branchInstructions - (iters + 1U));
+ }
+ mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters);
+
+ if (pc.has().branchMisses) {
+ // correcting branch misses
+ double branchMisses = d(pc.val().branchMisses);
+ if (branchMisses > branchInstructions) {
+ // can't have branch misses when there were branches...
+ branchMisses = branchInstructions;
+ }
+
+ // assuming at least one missed branch for the loop
+ branchMisses -= 1.0;
+ if (branchMisses < 1.0) {
+ branchMisses = 1.0;
+ }
+ mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters);
+ }
+ }
+}
+
+Config const& Result::config() const noexcept {
+ return mConfig;
+}
+
+inline double calcMedian(std::vector<double>& data) {
+ if (data.empty()) {
+ return 0.0;
+ }
+ std::sort(data.begin(), data.end());
+
+ auto midIdx = data.size() / 2U;
+ if (1U == (data.size() & 1U)) {
+ return data[midIdx];
+ }
+ return (data[midIdx - 1U] + data[midIdx]) / 2U;
+}
+
+double Result::median(Measure m) const {
+ // create a copy so we can sort
+ auto data = mNameToMeasurements[detail::u(m)];
+ return calcMedian(data);
+}
+
+double Result::average(Measure m) const {
+ using detail::d;
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // create a copy so we can sort
+ return sum(m) / d(data.size());
+}
+
+double Result::medianAbsolutePercentError(Measure m) const {
+ // create copy
+ auto data = mNameToMeasurements[detail::u(m)];
+
+ // calculates MdAPE which is the median of percentage error
+ // see https://www.spiderfinancial.com/support/documentation/numxl/reference-manual/forecasting-performance/mdape
+ auto med = calcMedian(data);
+
+ // transform the data to absolute error
+ for (auto& x : data) {
+ x = (x - med) / x;
+ if (x < 0) {
+ x = -x;
+ }
+ }
+ return calcMedian(data);
+}
+
+double Result::sum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ return std::accumulate(data.begin(), data.end(), 0.0);
+}
+
+double Result::sumProduct(Measure m1, Measure m2) const noexcept {
+ auto const& data1 = mNameToMeasurements[detail::u(m1)];
+ auto const& data2 = mNameToMeasurements[detail::u(m2)];
+
+ if (data1.size() != data2.size()) {
+ return 0.0;
+ }
+
+ double result = 0.0;
+ for (size_t i = 0, s = data1.size(); i != s; ++i) {
+ result += data1[i] * data2[i];
+ }
+ return result;
+}
+
+bool Result::has(Measure m) const noexcept {
+ return !mNameToMeasurements[detail::u(m)].empty();
+}
+
+double Result::get(size_t idx, Measure m) const {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ return data.at(idx);
+}
+
+bool Result::empty() const noexcept {
+ return 0U == size();
+}
+
+size_t Result::size() const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)];
+ return data.size();
+}
+
+double Result::minimum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // here its save to assume that at least one element is there
+ return *std::min_element(data.begin(), data.end());
+}
+
+double Result::maximum(Measure m) const noexcept {
+ auto const& data = mNameToMeasurements[detail::u(m)];
+ if (data.empty()) {
+ return 0.0;
+ }
+
+ // here its save to assume that at least one element is there
+ return *std::max_element(data.begin(), data.end());
+}
+
+Result::Measure Result::fromString(std::string const& str) {
+ if (str == "elapsed") {
+ return Measure::elapsed;
+ } else if (str == "iterations") {
+ return Measure::iterations;
+ } else if (str == "pagefaults") {
+ return Measure::pagefaults;
+ } else if (str == "cpucycles") {
+ return Measure::cpucycles;
+ } else if (str == "contextswitches") {
+ return Measure::contextswitches;
+ } else if (str == "instructions") {
+ return Measure::instructions;
+ } else if (str == "branchinstructions") {
+ return Measure::branchinstructions;
+ } else if (str == "branchmisses") {
+ return Measure::branchmisses;
+ } else {
+ // not found, return _size
+ return Measure::_size;
+ }
+}
+
+// Configuration of a microbenchmark.
+Bench::Bench() {
+ mConfig.mOut = &std::cout;
+}
+
+Bench::Bench(Bench&&) = default;
+Bench& Bench::operator=(Bench&&) = default;
+Bench::Bench(Bench const&) = default;
+Bench& Bench::operator=(Bench const&) = default;
+Bench::~Bench() noexcept = default;
+
+double Bench::batch() const noexcept {
+ return mConfig.mBatch;
+}
+
+double Bench::complexityN() const noexcept {
+ return mConfig.mComplexityN;
+}
+
+// Set a baseline to compare it to. 100% it is exactly as fast as the baseline, >100% means it is faster than the baseline, <100%
+// means it is slower than the baseline.
+Bench& Bench::relative(bool isRelativeEnabled) noexcept {
+ mConfig.mIsRelative = isRelativeEnabled;
+ return *this;
+}
+bool Bench::relative() const noexcept {
+ return mConfig.mIsRelative;
+}
+
+Bench& Bench::performanceCounters(bool showPerformanceCounters) noexcept {
+ mConfig.mShowPerformanceCounters = showPerformanceCounters;
+ return *this;
+}
+bool Bench::performanceCounters() const noexcept {
+ return mConfig.mShowPerformanceCounters;
+}
+
+// Operation unit. Defaults to "op", could be e.g. "byte" for string processing.
+// If u differs from currently set unit, the stored results will be cleared.
+// Use singular (byte, not bytes).
+Bench& Bench::unit(char const* u) {
+ if (u != mConfig.mUnit) {
+ mResults.clear();
+ }
+ mConfig.mUnit = u;
+ return *this;
+}
+
+Bench& Bench::unit(std::string const& u) {
+ return unit(u.c_str());
+}
+
+std::string const& Bench::unit() const noexcept {
+ return mConfig.mUnit;
+}
+
+// If benchmarkTitle differs from currently set title, the stored results will be cleared.
+Bench& Bench::title(const char* benchmarkTitle) {
+ if (benchmarkTitle != mConfig.mBenchmarkTitle) {
+ mResults.clear();
+ }
+ mConfig.mBenchmarkTitle = benchmarkTitle;
+ return *this;
+}
+Bench& Bench::title(std::string const& benchmarkTitle) {
+ if (benchmarkTitle != mConfig.mBenchmarkTitle) {
+ mResults.clear();
+ }
+ mConfig.mBenchmarkTitle = benchmarkTitle;
+ return *this;
+}
+
+std::string const& Bench::title() const noexcept {
+ return mConfig.mBenchmarkTitle;
+}
+
+Bench& Bench::name(const char* benchmarkName) {
+ mConfig.mBenchmarkName = benchmarkName;
+ return *this;
+}
+
+Bench& Bench::name(std::string const& benchmarkName) {
+ mConfig.mBenchmarkName = benchmarkName;
+ return *this;
+}
+
+std::string const& Bench::name() const noexcept {
+ return mConfig.mBenchmarkName;
+}
+
+// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch.
+Bench& Bench::epochs(size_t numEpochs) noexcept {
+ mConfig.mNumEpochs = numEpochs;
+ return *this;
+}
+size_t Bench::epochs() const noexcept {
+ return mConfig.mNumEpochs;
+}
+
+// Desired evaluation time is a multiple of clock resolution. Default is to be 1000 times above this measurement precision.
+Bench& Bench::clockResolutionMultiple(size_t multiple) noexcept {
+ mConfig.mClockResolutionMultiple = multiple;
+ return *this;
+}
+size_t Bench::clockResolutionMultiple() const noexcept {
+ return mConfig.mClockResolutionMultiple;
+}
+
+// Sets the maximum time each epoch should take. Default is 100ms.
+Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept {
+ mConfig.mMaxEpochTime = t;
+ return *this;
+}
+std::chrono::nanoseconds Bench::maxEpochTime() const noexcept {
+ return mConfig.mMaxEpochTime;
+}
+
+// Sets the maximum time each epoch should take. Default is 100ms.
+Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept {
+ mConfig.mMinEpochTime = t;
+ return *this;
+}
+std::chrono::nanoseconds Bench::minEpochTime() const noexcept {
+ return mConfig.mMinEpochTime;
+}
+
+Bench& Bench::minEpochIterations(uint64_t numIters) noexcept {
+ mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters;
+ return *this;
+}
+uint64_t Bench::minEpochIterations() const noexcept {
+ return mConfig.mMinEpochIterations;
+}
+
+Bench& Bench::epochIterations(uint64_t numIters) noexcept {
+ mConfig.mEpochIterations = numIters;
+ return *this;
+}
+uint64_t Bench::epochIterations() const noexcept {
+ return mConfig.mEpochIterations;
+}
+
+Bench& Bench::warmup(uint64_t numWarmupIters) noexcept {
+ mConfig.mWarmup = numWarmupIters;
+ return *this;
+}
+uint64_t Bench::warmup() const noexcept {
+ return mConfig.mWarmup;
+}
+
+Bench& Bench::config(Config const& benchmarkConfig) {
+ mConfig = benchmarkConfig;
+ return *this;
+}
+Config const& Bench::config() const noexcept {
+ return mConfig;
+}
+
+Bench& Bench::output(std::ostream* outstream) noexcept {
+ mConfig.mOut = outstream;
+ return *this;
+}
+
+ANKERL_NANOBENCH(NODISCARD) std::ostream* Bench::output() const noexcept {
+ return mConfig.mOut;
+}
+
+std::vector<Result> const& Bench::results() const noexcept {
+ return mResults;
+}
+
+Bench& Bench::render(char const* templateContent, std::ostream& os) {
+ ::ankerl::nanobench::render(templateContent, *this, os);
+ return *this;
+}
+
+std::vector<BigO> Bench::complexityBigO() const {
+ std::vector<BigO> bigOs;
+ auto rangeMeasure = BigO::collectRangeMeasure(mResults);
+ bigOs.emplace_back("O(1)", rangeMeasure, [](double) {
+ return 1.0;
+ });
+ bigOs.emplace_back("O(n)", rangeMeasure, [](double n) {
+ return n;
+ });
+ bigOs.emplace_back("O(log n)", rangeMeasure, [](double n) {
+ return std::log2(n);
+ });
+ bigOs.emplace_back("O(n log n)", rangeMeasure, [](double n) {
+ return n * std::log2(n);
+ });
+ bigOs.emplace_back("O(n^2)", rangeMeasure, [](double n) {
+ return n * n;
+ });
+ bigOs.emplace_back("O(n^3)", rangeMeasure, [](double n) {
+ return n * n * n;
+ });
+ std::sort(bigOs.begin(), bigOs.end());
+ return bigOs;
+}
+
+Rng::Rng()
+ : mX(0)
+ , mY(0) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint64_t> dist;
+ do {
+ mX = dist(rd);
+ mY = dist(rd);
+ } while (mX == 0 && mY == 0);
+}
+
+ANKERL_NANOBENCH_NO_SANITIZE("integer")
+uint64_t splitMix64(uint64_t& state) noexcept {
+ uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15));
+ z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
+ z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
+ return z ^ (z >> 31U);
+}
+
+// Seeded as described in romu paper (update april 2020)
+Rng::Rng(uint64_t seed) noexcept
+ : mX(splitMix64(seed))
+ , mY(splitMix64(seed)) {
+ for (size_t i = 0; i < 10; ++i) {
+ operator()();
+ }
+}
+
+// only internally used to copy the RNG.
+Rng::Rng(uint64_t x, uint64_t y) noexcept
+ : mX(x)
+ , mY(y) {}
+
+Rng Rng::copy() const noexcept {
+ return Rng{mX, mY};
+}
+
+BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results) {
+ BigO::RangeMeasure rangeMeasure;
+ for (auto const& result : results) {
+ if (result.config().mComplexityN > 0.0) {
+ rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed));
+ }
+ }
+ return rangeMeasure;
+}
+
+BigO::BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure)
+ : mName(bigOName) {
+
+ // estimate the constant factor
+ double sumRangeMeasure = 0.0;
+ double sumRangeRange = 0.0;
+
+ for (size_t i = 0; i < rangeMeasure.size(); ++i) {
+ sumRangeMeasure += rangeMeasure[i].first * rangeMeasure[i].second;
+ sumRangeRange += rangeMeasure[i].first * rangeMeasure[i].first;
+ }
+ mConstant = sumRangeMeasure / sumRangeRange;
+
+ // calculate root mean square
+ double err = 0.0;
+ double sumMeasure = 0.0;
+ for (size_t i = 0; i < rangeMeasure.size(); ++i) {
+ auto diff = mConstant * rangeMeasure[i].first - rangeMeasure[i].second;
+ err += diff * diff;
+
+ sumMeasure += rangeMeasure[i].second;
+ }
+
+ auto n = static_cast<double>(rangeMeasure.size());
+ auto mean = sumMeasure / n;
+ mNormalizedRootMeanSquare = std::sqrt(err / n) / mean;
+}
+
+BigO::BigO(const char* bigOName, RangeMeasure const& rangeMeasure)
+ : BigO(std::string(bigOName), rangeMeasure) {}
+
+std::string const& BigO::name() const noexcept {
+ return mName;
+}
+
+double BigO::constant() const noexcept {
+ return mConstant;
+}
+
+double BigO::normalizedRootMeanSquare() const noexcept {
+ return mNormalizedRootMeanSquare;
+}
+
+bool BigO::operator<(BigO const& other) const noexcept {
+ return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName);
+}
+
+std::ostream& operator<<(std::ostream& os, BigO const& bigO) {
+ return os << bigO.constant() << " * " << bigO.name() << ", rms=" << bigO.normalizedRootMeanSquare();
+}
+
+std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) {
+ detail::fmt::StreamStateRestorer restorer(os);
+ os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl;
+ for (auto const& bigO : bigOs) {
+ os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " ";
+ os << "|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) << "% ";
+ os << "| " << bigO.name();
+ os << std::endl;
+ }
+ return os;
+}
+
+} // namespace nanobench
+} // namespace ankerl
+
+#endif // ANKERL_NANOBENCH_IMPLEMENT
+#endif // ANKERL_NANOBENCH_H_INCLUDED
diff --git a/src/bench/poly1305.cpp b/src/bench/poly1305.cpp
index 02e5fecc0d..d8db99e7d4 100644
--- a/src/bench/poly1305.cpp
+++ b/src/bench/poly1305.cpp
@@ -11,30 +11,31 @@ static constexpr uint64_t BUFFER_SIZE_TINY = 64;
static constexpr uint64_t BUFFER_SIZE_SMALL = 256;
static constexpr uint64_t BUFFER_SIZE_LARGE = 1024*1024;
-static void POLY1305(benchmark::State& state, size_t buffersize)
+static void POLY1305(benchmark::Bench& bench, size_t buffersize)
{
std::vector<unsigned char> tag(POLY1305_TAGLEN, 0);
std::vector<unsigned char> key(POLY1305_KEYLEN, 0);
std::vector<unsigned char> in(buffersize, 0);
- while (state.KeepRunning())
+ bench.batch(in.size()).unit("byte").run([&] {
poly1305_auth(tag.data(), in.data(), in.size(), key.data());
+ });
}
-static void POLY1305_64BYTES(benchmark::State& state)
+static void POLY1305_64BYTES(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_TINY);
+ POLY1305(bench, BUFFER_SIZE_TINY);
}
-static void POLY1305_256BYTES(benchmark::State& state)
+static void POLY1305_256BYTES(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_SMALL);
+ POLY1305(bench, BUFFER_SIZE_SMALL);
}
-static void POLY1305_1MB(benchmark::State& state)
+static void POLY1305_1MB(benchmark::Bench& bench)
{
- POLY1305(state, BUFFER_SIZE_LARGE);
+ POLY1305(bench, BUFFER_SIZE_LARGE);
}
-BENCHMARK(POLY1305_64BYTES, 500000);
-BENCHMARK(POLY1305_256BYTES, 250000);
-BENCHMARK(POLY1305_1MB, 340);
+BENCHMARK(POLY1305_64BYTES);
+BENCHMARK(POLY1305_256BYTES);
+BENCHMARK(POLY1305_1MB);
diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp
index 42b351a72d..a2dbefa54a 100644
--- a/src/bench/prevector.cpp
+++ b/src/bench/prevector.cpp
@@ -30,51 +30,44 @@ static_assert(IS_TRIVIALLY_CONSTRUCTIBLE<trivial_t>::value,
"expected trivial_t to be trivially constructible");
template <typename T>
-static void PrevectorDestructor(benchmark::State& state)
+static void PrevectorDestructor(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
- for (auto x = 0; x < 1000; ++x) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- t0.resize(28);
- t1.resize(29);
- }
- }
+ bench.batch(2).run([&] {
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ t0.resize(28);
+ t1.resize(29);
+ });
}
template <typename T>
-static void PrevectorClear(benchmark::State& state)
+static void PrevectorClear(benchmark::Bench& bench)
{
-
- while (state.KeepRunning()) {
- for (auto x = 0; x < 1000; ++x) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- t0.resize(28);
- t0.clear();
- t1.resize(29);
- t1.clear();
- }
- }
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ bench.batch(2).run([&] {
+ t0.resize(28);
+ t0.clear();
+ t1.resize(29);
+ t1.clear();
+ });
}
template <typename T>
-static void PrevectorResize(benchmark::State& state)
+static void PrevectorResize(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
- prevector<28, T> t0;
- prevector<28, T> t1;
- for (auto x = 0; x < 1000; ++x) {
- t0.resize(28);
- t0.resize(0);
- t1.resize(29);
- t1.resize(0);
- }
- }
+ prevector<28, T> t0;
+ prevector<28, T> t1;
+ bench.batch(4).run([&] {
+ t0.resize(28);
+ t0.resize(0);
+ t1.resize(29);
+ t1.resize(0);
+ });
}
template <typename T>
-static void PrevectorDeserialize(benchmark::State& state)
+static void PrevectorDeserialize(benchmark::Bench& bench)
{
CDataStream s0(SER_NETWORK, 0);
prevector<28, T> t0;
@@ -86,26 +79,28 @@ static void PrevectorDeserialize(benchmark::State& state)
for (auto x = 0; x < 101; ++x) {
s0 << t0;
}
- while (state.KeepRunning()) {
+ bench.batch(1000).run([&] {
prevector<28, T> t1;
for (auto x = 0; x < 1000; ++x) {
s0 >> t1;
}
s0.Init(SER_NETWORK, 0);
- }
+ });
}
-#define PREVECTOR_TEST(name, nontrivops, trivops) \
- static void Prevector ## name ## Nontrivial(benchmark::State& state) { \
- Prevector ## name<nontrivial_t>(state); \
- } \
- BENCHMARK(Prevector ## name ## Nontrivial, nontrivops); \
- static void Prevector ## name ## Trivial(benchmark::State& state) { \
- Prevector ## name<trivial_t>(state); \
- } \
- BENCHMARK(Prevector ## name ## Trivial, trivops);
+#define PREVECTOR_TEST(name) \
+ static void Prevector##name##Nontrivial(benchmark::Bench& bench) \
+ { \
+ Prevector##name<nontrivial_t>(bench); \
+ } \
+ BENCHMARK(Prevector##name##Nontrivial); \
+ static void Prevector##name##Trivial(benchmark::Bench& bench) \
+ { \
+ Prevector##name<trivial_t>(bench); \
+ } \
+ BENCHMARK(Prevector##name##Trivial);
-PREVECTOR_TEST(Clear, 28300, 88600)
-PREVECTOR_TEST(Destructor, 28800, 88900)
-PREVECTOR_TEST(Resize, 28900, 90300)
-PREVECTOR_TEST(Deserialize, 6800, 52000)
+PREVECTOR_TEST(Clear)
+PREVECTOR_TEST(Destructor)
+PREVECTOR_TEST(Resize)
+PREVECTOR_TEST(Deserialize)
diff --git a/src/bench/rollingbloom.cpp b/src/bench/rollingbloom.cpp
index 6cdb4ff0a7..9b43951e6e 100644
--- a/src/bench/rollingbloom.cpp
+++ b/src/bench/rollingbloom.cpp
@@ -6,12 +6,12 @@
#include <bench/bench.h>
#include <bloom.h>
-static void RollingBloom(benchmark::State& state)
+static void RollingBloom(benchmark::Bench& bench)
{
CRollingBloomFilter filter(120000, 0.000001);
std::vector<unsigned char> data(32);
uint32_t count = 0;
- while (state.KeepRunning()) {
+ bench.run([&] {
count++;
data[0] = count;
data[1] = count >> 8;
@@ -24,16 +24,16 @@ static void RollingBloom(benchmark::State& state)
data[2] = count >> 8;
data[3] = count;
filter.contains(data);
- }
+ });
}
-static void RollingBloomReset(benchmark::State& state)
+static void RollingBloomReset(benchmark::Bench& bench)
{
CRollingBloomFilter filter(120000, 0.000001);
- while (state.KeepRunning()) {
+ bench.run([&] {
filter.reset();
- }
+ });
}
-BENCHMARK(RollingBloom, 1500 * 1000);
-BENCHMARK(RollingBloomReset, 20000);
+BENCHMARK(RollingBloom);
+BENCHMARK(RollingBloomReset);
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index 511573abac..4b45264a3c 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -11,7 +11,8 @@
#include <univalue.h>
-static void BlockToJsonVerbose(benchmark::State& state) {
+static void BlockToJsonVerbose(benchmark::Bench& bench)
+{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
char a = '\0';
stream.write(&a, 1); // Prevent compaction
@@ -24,9 +25,9 @@ static void BlockToJsonVerbose(benchmark::State& state) {
blockindex.phashBlock = &blockHash;
blockindex.nBits = 403014710;
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true);
- }
+ });
}
-BENCHMARK(BlockToJsonVerbose, 10);
+BENCHMARK(BlockToJsonVerbose);
diff --git a/src/bench/rpc_mempool.cpp b/src/bench/rpc_mempool.cpp
index bf63cccf09..1ff41765cf 100644
--- a/src/bench/rpc_mempool.cpp
+++ b/src/bench/rpc_mempool.cpp
@@ -15,7 +15,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& poo
pool.addUnchecked(CTxMemPoolEntry(tx, fee, /* time */ 0, /* height */ 1, /* spendsCoinbase */ false, /* sigOpCost */ 4, lp));
}
-static void RpcMempool(benchmark::State& state)
+static void RpcMempool(benchmark::Bench& bench)
{
CTxMemPool pool;
LOCK2(cs_main, pool.cs);
@@ -32,9 +32,9 @@ static void RpcMempool(benchmark::State& state)
AddTx(tx_r, /* fee */ i, pool);
}
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)MempoolToJSON(pool, /*verbose*/ true);
- }
+ });
}
-BENCHMARK(RpcMempool, 40);
+BENCHMARK(RpcMempool);
diff --git a/src/bench/util_time.cpp b/src/bench/util_time.cpp
index 72d97354aa..fad179eb87 100644
--- a/src/bench/util_time.cpp
+++ b/src/bench/util_time.cpp
@@ -6,37 +6,37 @@
#include <util/time.h>
-static void BenchTimeDeprecated(benchmark::State& state)
+static void BenchTimeDeprecated(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime();
- }
+ });
}
-static void BenchTimeMock(benchmark::State& state)
+static void BenchTimeMock(benchmark::Bench& bench)
{
SetMockTime(111);
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime<std::chrono::seconds>();
- }
+ });
SetMockTime(0);
}
-static void BenchTimeMillis(benchmark::State& state)
+static void BenchTimeMillis(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTime<std::chrono::milliseconds>();
- }
+ });
}
-static void BenchTimeMillisSys(benchmark::State& state)
+static void BenchTimeMillisSys(benchmark::Bench& bench)
{
- while (state.KeepRunning()) {
+ bench.run([&] {
(void)GetTimeMillis();
- }
+ });
}
-BENCHMARK(BenchTimeDeprecated, 100000000);
-BENCHMARK(BenchTimeMillis, 6000000);
-BENCHMARK(BenchTimeMillisSys, 6000000);
-BENCHMARK(BenchTimeMock, 300000000);
+BENCHMARK(BenchTimeDeprecated);
+BENCHMARK(BenchTimeMillis);
+BENCHMARK(BenchTimeMillisSys);
+BENCHMARK(BenchTimeMock);
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index 14bca5f7d1..9af0b502eb 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -16,7 +16,7 @@
// Microbenchmark for verification of a basic P2WPKH script. Can be easily
// modified to measure performance of other types of scripts.
-static void VerifyScriptBench(benchmark::State& state)
+static void VerifyScriptBench(benchmark::Bench& bench)
{
const ECCVerifyHandle verify_handle;
ECC_Start();
@@ -34,7 +34,7 @@ static void VerifyScriptBench(benchmark::State& state)
key.Set(vchKey.begin(), vchKey.end(), false);
CPubKey pubkey = key.GetPubKey();
uint160 pubkeyHash;
- CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(pubkeyHash.begin());
+ CHash160().Write(pubkey).Finalize(pubkeyHash);
// Script.
CScript scriptPubKey = CScript() << witnessversion << ToByteVector(pubkeyHash);
@@ -49,7 +49,7 @@ static void VerifyScriptBench(benchmark::State& state)
witness.stack.push_back(ToByteVector(pubkey));
// Benchmark.
- while (state.KeepRunning()) {
+ bench.run([&] {
ScriptError err;
bool success = VerifyScript(
txSpend.vin[0].scriptSig,
@@ -71,11 +71,12 @@ static void VerifyScriptBench(benchmark::State& state)
(const unsigned char*)stream.data(), stream.size(), 0, flags, nullptr);
assert(csuccess == 1);
#endif
- }
+ });
ECC_Stop();
}
-static void VerifyNestedIfScript(benchmark::State& state) {
+static void VerifyNestedIfScript(benchmark::Bench& bench)
+{
std::vector<std::vector<unsigned char>> stack;
CScript script;
for (int i = 0; i < 100; ++i) {
@@ -87,15 +88,13 @@ static void VerifyNestedIfScript(benchmark::State& state) {
for (int i = 0; i < 100; ++i) {
script << OP_ENDIF;
}
- while (state.KeepRunning()) {
+ bench.run([&] {
auto stack_copy = stack;
ScriptError error;
bool ret = EvalScript(stack_copy, script, 0, BaseSignatureChecker(), SigVersion::BASE, &error);
assert(ret);
- }
+ });
}
-
-BENCHMARK(VerifyScriptBench, 6300);
-
-BENCHMARK(VerifyNestedIfScript, 100);
+BENCHMARK(VerifyScriptBench);
+BENCHMARK(VerifyNestedIfScript);
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index 05cfb3438e..e16182b48e 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -12,7 +12,7 @@
#include <validationinterface.h>
#include <wallet/wallet.h>
-static void WalletBalance(benchmark::State& state, const bool set_dirty, const bool add_watchonly, const bool add_mine)
+static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const bool add_watchonly, const bool add_mine)
{
TestingSetup test_setup{
CBaseChainParams::REGTEST,
@@ -45,20 +45,20 @@ static void WalletBalance(benchmark::State& state, const bool set_dirty, const b
auto bal = wallet.GetBalance(); // Cache
- while (state.KeepRunning()) {
+ bench.run([&] {
if (set_dirty) wallet.MarkDirty();
bal = wallet.GetBalance();
if (add_mine) assert(bal.m_mine_trusted > 0);
if (add_watchonly) assert(bal.m_watchonly_trusted > 0);
- }
+ });
}
-static void WalletBalanceDirty(benchmark::State& state) { WalletBalance(state, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); }
-static void WalletBalanceClean(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); }
-static void WalletBalanceMine(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); }
-static void WalletBalanceWatch(benchmark::State& state) { WalletBalance(state, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); }
+static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ true, /* add_watchonly */ true, /* add_mine */ true); }
+static void WalletBalanceClean(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ true); }
+static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ false, /* add_mine */ true); }
+static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_watchonly */ true, /* add_mine */ false); }
-BENCHMARK(WalletBalanceDirty, 2500);
-BENCHMARK(WalletBalanceClean, 8000);
-BENCHMARK(WalletBalanceMine, 16000);
-BENCHMARK(WalletBalanceWatch, 8000);
+BENCHMARK(WalletBalanceDirty);
+BENCHMARK(WalletBalanceClean);
+BENCHMARK(WalletBalanceMine);
+BENCHMARK(WalletBalanceWatch);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 9afcda4578..cf52b710cb 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -43,32 +43,32 @@ static const int CONTINUE_EXECUTION=-1;
/** Default number of blocks to generate for RPC generatetoaddress. */
static const std::string DEFAULT_NBLOCKS = "1";
-static void SetupCliArgs()
+static void SetupCliArgs(ArgsManager& argsman)
{
- SetupHelpOptions(gArgs);
+ SetupHelpOptions(argsman);
const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET);
const auto regtestBaseParams = CreateBaseChainParams(CBaseChainParams::REGTEST);
- gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- SetupChainParamsBaseOptions();
- gArgs.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-generate", strprintf("Generate blocks immediately, equivalent to RPC generatenewaddress followed by RPC generatetoaddress. Optional positional integer arguments are number of blocks to generate (default: %s) and maximum iterations to try (default: %s), equivalent to RPC generatetoaddress nblocks and maxtries arguments. Example: bitcoin-cli -generate 4 1000", DEFAULT_NBLOCKS, DEFAULT_MAX_TRIES), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-getinfo", "Get general information from the remote server. Note that unlike server-side RPC calls, the results of -getinfo is the result of multiple non-atomic requests. Some entries in the result may represent results from different states (e.g. wallet balance may be as of a different block from the chain state reported)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ SetupChainParamsBaseOptions(argsman);
+ argsman.AddArg("-named", strprintf("Pass named instead of positional arguments (default: %s)", DEFAULT_NAMED), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcclienttimeout=<n>", strprintf("Timeout in seconds during HTTP requests, or 0 for no timeout. (default: %d)", DEFAULT_HTTP_CLIENT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcconnect=<ip>", strprintf("Send commands to node running on <ip> (default: %s)", DEFAULT_RPCCONNECT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-stdinwalletpassphrase", "Read wallet passphrase from standard input as a single line. When combined with -stdin, the first line from standard input is used for the wallet passphrase.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
}
/** libevent event log callback */
@@ -111,7 +111,7 @@ static int AppInitRPC(int argc, char* argv[])
//
// Parameters
//
- SetupCliArgs();
+ SetupCliArgs(gArgs);
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index f54a299a36..a9119d5144 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -36,40 +36,40 @@ static const int CONTINUE_EXECUTION=-1;
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
-static void SetupBitcoinTxArgs()
+static void SetupBitcoinTxArgs(ArgsManager &argsman)
{
- SetupHelpOptions(gArgs);
-
- gArgs.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- SetupChainParamsBaseOptions();
-
- gArgs.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. "
+ SetupHelpOptions(argsman);
+
+ argsman.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ SetupChainParamsBaseOptions(argsman);
+
+ argsman.AddArg("delin=N", "Delete input N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("delout=N", "Delete output N from TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("in=TXID:VOUT(:SEQUENCE_NUMBER)", "Add input to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("locktime=N", "Set TX lock time to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("nversion=N", "Set TX version to N", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outaddr=VALUE:ADDRESS", "Add address-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outdata=[VALUE:]DATA", "Add data-based output to TX", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", "Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS. "
"Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. "
+ argsman.AddArg("outpubkey=VALUE:PUBKEY[:FLAGS]", "Add pay-to-pubkey output to TX. "
"Optionally add the \"W\" flag to produce a pay-to-witness-pubkey-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. "
+ argsman.AddArg("outscript=VALUE:SCRIPT[:FLAGS]", "Add raw script output to TX. "
"Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output. "
"Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. "
+ argsman.AddArg("replaceable(=N)", "Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("sign=SIGHASH-FLAGS", "Add zero or more signatures to transaction. "
"This command requires JSON registers:"
"prevtxs=JSON object, "
"privatekeys=JSON object. "
"See signrawtransactionwithkey docs for format of sighash flags, JSON objects.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
- gArgs.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
+ argsman.AddArg("load=NAME:FILENAME", "Load JSON file FILENAME into register NAME", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
+ argsman.AddArg("set=NAME:JSON-STRING", "Set register NAME to given JSON-STRING", ArgsManager::ALLOW_ANY, OptionsCategory::REGISTER_COMMANDS);
}
//
@@ -81,7 +81,7 @@ static int AppInitRawTx(int argc, char* argv[])
//
// Parameters
//
- SetupBitcoinTxArgs();
+ SetupBitcoinTxArgs(gArgs);
std::string error;
if (!gArgs.ParseParameters(argc, argv, error)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
@@ -320,8 +320,8 @@ static void MutateTxAddOutPubKey(CMutableTransaction& tx, const std::string& str
if (!pubkey.IsCompressed()) {
throw std::runtime_error("Uncompressed pubkeys are not useable for SegWit outputs");
}
- // Call GetScriptForWitness() to build a P2WSH scriptPubKey
- scriptPubKey = GetScriptForWitness(scriptPubKey);
+ // Build a P2WPKH script
+ scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(pubkey));
}
if (bScriptHash) {
// Get the ID for the script, and then construct a P2SH destination for it.
@@ -390,8 +390,8 @@ static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& s
throw std::runtime_error("Uncompressed pubkeys are not useable for SegWit outputs");
}
}
- // Call GetScriptForWitness() to build a P2WSH scriptPubKey
- scriptPubKey = GetScriptForWitness(scriptPubKey);
+ // Build a P2WSH with the multisig script
+ scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(scriptPubKey));
}
if (bScriptHash) {
if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) {
@@ -464,7 +464,7 @@ static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& str
}
if (bSegWit) {
- scriptPubKey = GetScriptForWitness(scriptPubKey);
+ scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(scriptPubKey));
}
if (bScriptHash) {
if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) {
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index b420463c00..06b0c86476 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -19,24 +19,24 @@
const std::function<std::string(const char*)> G_TRANSLATION_FUN = nullptr;
UrlDecodeFn* const URL_DECODE = nullptr;
-static void SetupWalletToolArgs()
+static void SetupWalletToolArgs(ArgsManager& argsman)
{
- SetupHelpOptions(gArgs);
- SetupChainParamsBaseOptions();
+ SetupHelpOptions(argsman);
+ SetupChainParamsBaseOptions(argsman);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
- gArgs.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
+ argsman.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS);
}
static bool WalletAppInit(int argc, char* argv[])
{
- SetupWalletToolArgs();
+ SetupWalletToolArgs(gArgs);
std::string error_message;
if (!gArgs.ParseParameters(argc, argv, error_message)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message);
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index b04cc12059..227626f40f 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -50,28 +50,23 @@ static bool AppInit(int argc, char* argv[])
util::ThreadSetInternalName("init");
- //
- // Parameters
- //
// If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main()
SetupServerArgs(node);
+ ArgsManager& args = *Assert(node.args);
std::string error;
- if (!gArgs.ParseParameters(argc, argv, error)) {
+ if (!args.ParseParameters(argc, argv, error)) {
return InitError(Untranslated(strprintf("Error parsing command line arguments: %s\n", error)));
}
// Process help and version before taking care about datadir
- if (HelpRequested(gArgs) || gArgs.IsArgSet("-version")) {
+ if (HelpRequested(args) || args.IsArgSet("-version")) {
std::string strUsage = PACKAGE_NAME " version " + FormatFullVersion() + "\n";
- if (gArgs.IsArgSet("-version"))
- {
+ if (args.IsArgSet("-version")) {
strUsage += FormatParagraph(LicenseInfo()) + "\n";
- }
- else
- {
+ } else {
strUsage += "\nUsage: bitcoind [options] Start " PACKAGE_NAME "\n";
- strUsage += "\n" + gArgs.GetHelpMessage();
+ strUsage += "\n" + args.GetHelpMessage();
}
tfm::format(std::cout, "%s", strUsage);
@@ -82,14 +77,14 @@ static bool AppInit(int argc, char* argv[])
try
{
if (!CheckDataDirOption()) {
- return InitError(Untranslated(strprintf("Specified data directory \"%s\" does not exist.\n", gArgs.GetArg("-datadir", ""))));
+ return InitError(Untranslated(strprintf("Specified data directory \"%s\" does not exist.\n", args.GetArg("-datadir", ""))));
}
- if (!gArgs.ReadConfigFiles(error, true)) {
+ if (!args.ReadConfigFiles(error, true)) {
return InitError(Untranslated(strprintf("Error reading configuration file: %s\n", error)));
}
// Check for -chain, -testnet or -regtest parameter (Params() calls are only valid after this clause)
try {
- SelectParams(gArgs.GetChainName());
+ SelectParams(args.GetChainName());
} catch (const std::exception& e) {
return InitError(Untranslated(strprintf("%s\n", e.what())));
}
@@ -101,23 +96,21 @@ static bool AppInit(int argc, char* argv[])
}
}
- if (!gArgs.InitSettings(error)) {
+ if (!args.InitSettings(error)) {
InitError(Untranslated(error));
return false;
}
// -server defaults to true for bitcoind but not for the GUI so do this here
- gArgs.SoftSetBoolArg("-server", true);
+ args.SoftSetBoolArg("-server", true);
// Set this early so that parameter interactions go to console
- InitLogging();
- InitParameterInteraction();
- if (!AppInitBasicSetup())
- {
+ InitLogging(args);
+ InitParameterInteraction(args);
+ if (!AppInitBasicSetup(args)) {
// InitError will have been called with detailed error, which ends up on console
return false;
}
- if (!AppInitParameterInteraction())
- {
+ if (!AppInitParameterInteraction(args)) {
// InitError will have been called with detailed error, which ends up on console
return false;
}
@@ -126,8 +119,7 @@ static bool AppInit(int argc, char* argv[])
// InitError will have been called with detailed error, which ends up on console
return false;
}
- if (gArgs.GetBoolArg("-daemon", false))
- {
+ if (args.GetBoolArg("-daemon", false)) {
#if HAVE_DECL_DAEMON
#if defined(MAC_OSX)
#pragma GCC diagnostic push
diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp
index 5f5bed5bda..9a6fb4abd0 100644
--- a/src/blockfilter.cpp
+++ b/src/blockfilter.cpp
@@ -291,7 +291,7 @@ uint256 BlockFilter::GetHash() const
const std::vector<unsigned char>& data = GetEncodedFilter();
uint256 result;
- CHash256().Write(data.data(), data.size()).Finalize(result.begin());
+ CHash256().Write(data).Finalize(result);
return result;
}
@@ -301,8 +301,8 @@ uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const
uint256 result;
CHash256()
- .Write(filter_hash.begin(), filter_hash.size())
- .Write(prev_header.begin(), prev_header.size())
- .Finalize(result.begin());
+ .Write(filter_hash)
+ .Write(prev_header)
+ .Finalize(result);
return result;
}
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 092c45e4ce..ffd2076c9a 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -110,7 +110,7 @@ public:
// Note that of those which support the service bits prefix, most only support a subset of
// possible options.
- // This is fine at runtime as we'll fall back to using them as a oneshot if they don't support the
+ // This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the
// service bits we want, but we should get them updated to support all service bits wanted by any
// release ASAP to avoid it where possible.
vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd
@@ -341,8 +341,8 @@ public:
void CRegTestParams::UpdateActivationParametersFromArgs(const ArgsManager& args)
{
- if (gArgs.IsArgSet("-segwitheight")) {
- int64_t height = gArgs.GetArg("-segwitheight", consensus.SegwitHeight);
+ if (args.IsArgSet("-segwitheight")) {
+ int64_t height = args.GetArg("-segwitheight", consensus.SegwitHeight);
if (height < -1 || height >= std::numeric_limits<int>::max()) {
throw std::runtime_error(strprintf("Activation height %ld for segwit is out of valid range. Use -1 to disable segwit.", height));
} else if (height == -1) {
diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp
index 894b8553c4..1825ced640 100644
--- a/src/chainparamsbase.cpp
+++ b/src/chainparamsbase.cpp
@@ -15,14 +15,14 @@ const std::string CBaseChainParams::MAIN = "main";
const std::string CBaseChainParams::TESTNET = "test";
const std::string CBaseChainParams::REGTEST = "regtest";
-void SetupChainParamsBaseOptions()
+void SetupChainParamsBaseOptions(ArgsManager& argsman)
{
- gArgs.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. "
+ argsman.AddArg("-chain=<chain>", "Use the chain <chain> (default: main). Allowed values: main, test, regtest", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-regtest", "Enter regression test mode, which uses a special chain in which blocks can be solved instantly. "
"This is intended for regression testing tools and app development. Equivalent to -chain=regtest.", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
- gArgs.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-segwitheight=<n>", "Set the activation height of segwit. -1 to disable. (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-testnet", "Use the test chain. Equivalent to -chain=test.", ArgsManager::ALLOW_ANY, OptionsCategory::CHAINPARAMS);
+ argsman.AddArg("-vbparams=deployment:start:end", "Use given start/end times for specified version bits deployment (regtest-only)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CHAINPARAMS);
}
static std::unique_ptr<CBaseChainParams> globalChainBaseParams;
diff --git a/src/chainparamsbase.h b/src/chainparamsbase.h
index 3c139931ea..1c52d0ea97 100644
--- a/src/chainparamsbase.h
+++ b/src/chainparamsbase.h
@@ -8,6 +8,8 @@
#include <memory>
#include <string>
+class ArgsManager;
+
/**
* CBaseChainParams defines the base parameters (shared between bitcoin-cli and bitcoind)
* of a given instance of the Bitcoin system.
@@ -43,7 +45,7 @@ std::unique_ptr<CBaseChainParams> CreateBaseChainParams(const std::string& chain
/**
*Set the arguments for chainparams
*/
-void SetupChainParamsBaseOptions();
+void SetupChainParamsBaseOptions(ArgsManager& argsman);
/**
* Return the currently selected parameters. This won't change after app
diff --git a/src/coins.cpp b/src/coins.cpp
index 7b76c13f98..5de2ed7810 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -245,6 +245,14 @@ bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const
return true;
}
+void CCoinsViewCache::ReallocateCache()
+{
+ // Cache should be empty when we're calling this.
+ assert(cacheCoins.size() == 0);
+ cacheCoins.~CCoinsMap();
+ ::new (&cacheCoins) CCoinsMap();
+}
+
static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), PROTOCOL_VERSION);
static const size_t MAX_OUTPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_OUTPUT_WEIGHT;
diff --git a/src/coins.h b/src/coins.h
index a3f34bb0ee..a3e241ac90 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -318,6 +318,13 @@ public:
//! Check whether all prevouts of the transaction are present in the UTXO set represented by this view
bool HaveInputs(const CTransaction& tx) const;
+ //! Force a reallocation of the cache map. This is required when downsizing
+ //! the cache because the map's allocator may be hanging onto a lot of
+ //! memory despite having called .clear().
+ //!
+ //! See: https://stackoverflow.com/questions/42114044/how-to-release-unordered-map-memory
+ void ReallocateCache();
+
private:
/**
* @note this is marked const, but may actually append to `cacheCoins`, increasing
diff --git a/src/compat.h b/src/compat.h
index 68f6eb692c..0be02cae03 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -11,9 +11,6 @@
#endif
#ifdef WIN32
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN 1
-#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index 8de7a8f2d8..2a93a090d6 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -26,7 +26,8 @@ enum class TxValidationResult {
* is uninteresting.
*/
TX_RECENT_CONSENSUS_CHANGE,
- TX_NOT_STANDARD, //!< didn't meet our local policy rules
+ TX_INPUTS_NOT_STANDARD, //!< inputs (covered by txid) failed policy rules
+ TX_NOT_STANDARD, //!< otherwise didn't meet our local policy rules
TX_MISSING_INPUTS, //!< transaction was missing some of its inputs
TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks
/**
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 34cfeecc6f..f9d918cb6d 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -48,13 +48,14 @@ std::string FormatScript(const CScript& script)
}
}
if (vch.size() > 0) {
- ret += strprintf("0x%x 0x%x ", HexStr(it2, it - vch.size()), HexStr(it - vch.size(), it));
+ ret += strprintf("0x%x 0x%x ", HexStr(std::vector<uint8_t>(it2, it - vch.size())),
+ HexStr(std::vector<uint8_t>(it - vch.size(), it)));
} else {
- ret += strprintf("0x%x ", HexStr(it2, it));
+ ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, it)));
}
continue;
}
- ret += strprintf("0x%x ", HexStr(it2, script.end()));
+ ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, script.end())));
break;
}
return ret.substr(0, ret.size() - 1);
diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp
index 0f7848bae1..380d4eb8ac 100644
--- a/src/dummywallet.cpp
+++ b/src/dummywallet.cpp
@@ -20,14 +20,14 @@ class DummyWalletInit : public WalletInitInterface {
public:
bool HasWalletSupport() const override {return false;}
- void AddWalletOptions() const override;
+ void AddWalletOptions(ArgsManager& argsman) const override;
bool ParameterInteraction() const override {return true;}
void Construct(NodeContext& node) const override {LogPrintf("No wallet support compiled in!\n");}
};
-void DummyWalletInit::AddWalletOptions() const
+void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
{
- gArgs.AddHiddenArgs({
+ argsman.AddHiddenArgs({
"-addresstype",
"-avoidpartialspends",
"-changetype",
@@ -35,6 +35,7 @@ void DummyWalletInit::AddWalletOptions() const
"-discardfee=<amt>",
"-fallbackfee=<amt>",
"-keypool=<n>",
+ "-maxapsfee=<n>",
"-maxtxfee=<amt>",
"-mintxfee=<amt>",
"-paytxfee=<amt>",
diff --git a/src/hash.cpp b/src/hash.cpp
index 26150e5ca8..83b90ae063 100644
--- a/src/hash.cpp
+++ b/src/hash.cpp
@@ -12,7 +12,7 @@ inline uint32_t ROTL32(uint32_t x, int8_t r)
return (x << r) | (x >> (32 - r));
}
-unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash)
+unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash)
{
// The following is MurmurHash3 (x86_32), see http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
uint32_t h1 = nHashSeed;
@@ -77,3 +77,10 @@ void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char he
num[3] = (nChild >> 0) & 0xFF;
CHMAC_SHA512(chainCode.begin(), chainCode.size()).Write(&header, 1).Write(data, 32).Write(num, 4).Finalize(output);
}
+
+uint256 SHA256Uint256(const uint256& input)
+{
+ uint256 result;
+ CSHA256().Write(input.begin(), 32).Finalize(result.begin());
+ return result;
+}
diff --git a/src/hash.h b/src/hash.h
index c295568a3e..c16bbb48ce 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -6,6 +6,7 @@
#ifndef BITCOIN_HASH_H
#define BITCOIN_HASH_H
+#include <attributes.h>
#include <crypto/common.h>
#include <crypto/ripemd160.h>
#include <crypto/sha256.h>
@@ -25,14 +26,15 @@ private:
public:
static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE;
- void Finalize(unsigned char hash[OUTPUT_SIZE]) {
+ void Finalize(Span<unsigned char> output) {
+ assert(output.size() == OUTPUT_SIZE);
unsigned char buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
- sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash);
+ sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
- CHash256& Write(const unsigned char *data, size_t len) {
- sha.Write(data, len);
+ CHash256& Write(Span<const unsigned char> input) {
+ sha.Write(input.data(), input.size());
return *this;
}
@@ -49,14 +51,15 @@ private:
public:
static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE;
- void Finalize(unsigned char hash[OUTPUT_SIZE]) {
+ void Finalize(Span<unsigned char> output) {
+ assert(output.size() == OUTPUT_SIZE);
unsigned char buf[CSHA256::OUTPUT_SIZE];
sha.Finalize(buf);
- CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash);
+ CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data());
}
- CHash160& Write(const unsigned char *data, size_t len) {
- sha.Write(data, len);
+ CHash160& Write(Span<const unsigned char> input) {
+ sha.Write(input.data(), input.size());
return *this;
}
@@ -67,57 +70,36 @@ public:
};
/** Compute the 256-bit hash of an object. */
-template<typename T1>
-inline uint256 Hash(const T1 pbegin, const T1 pend)
+template<typename T>
+inline uint256 Hash(const T& in1)
{
- static const unsigned char pblank[1] = {};
uint256 result;
- CHash256().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0]))
- .Finalize((unsigned char*)&result);
+ CHash256().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
/** Compute the 256-bit hash of the concatenation of two objects. */
template<typename T1, typename T2>
-inline uint256 Hash(const T1 p1begin, const T1 p1end,
- const T2 p2begin, const T2 p2end) {
- static const unsigned char pblank[1] = {};
+inline uint256 Hash(const T1& in1, const T2& in2) {
uint256 result;
- CHash256().Write(p1begin == p1end ? pblank : (const unsigned char*)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0]))
- .Write(p2begin == p2end ? pblank : (const unsigned char*)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0]))
- .Finalize((unsigned char*)&result);
+ CHash256().Write(MakeUCharSpan(in1)).Write(MakeUCharSpan(in2)).Finalize(result);
return result;
}
/** Compute the 160-bit hash an object. */
template<typename T1>
-inline uint160 Hash160(const T1 pbegin, const T1 pend)
+inline uint160 Hash160(const T1& in1)
{
- static unsigned char pblank[1] = {};
uint160 result;
- CHash160().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0]))
- .Finalize((unsigned char*)&result);
+ CHash160().Write(MakeUCharSpan(in1)).Finalize(result);
return result;
}
-/** Compute the 160-bit hash of a vector. */
-inline uint160 Hash160(const std::vector<unsigned char>& vch)
-{
- return Hash160(vch.begin(), vch.end());
-}
-
-/** Compute the 160-bit hash of a vector. */
-template<unsigned int N>
-inline uint160 Hash160(const prevector<N, unsigned char>& vch)
-{
- return Hash160(vch.begin(), vch.end());
-}
-
/** A writer stream (for serialization) that computes a 256-bit hash. */
class CHashWriter
{
private:
- CHash256 ctx;
+ CSHA256 ctx;
const int nType;
const int nVersion;
@@ -132,10 +114,24 @@ public:
ctx.Write((const unsigned char*)pch, size);
}
- // invalidates the object
+ /** Compute the double-SHA256 hash of all data written to this object.
+ *
+ * Invalidates this object.
+ */
uint256 GetHash() {
uint256 result;
- ctx.Finalize((unsigned char*)&result);
+ ctx.Finalize(result.begin());
+ ctx.Reset().Write(result.begin(), CSHA256::OUTPUT_SIZE).Finalize(result.begin());
+ return result;
+ }
+
+ /** Compute the SHA256 hash of all data written to this object.
+ *
+ * Invalidates this object.
+ */
+ uint256 GetSHA256() {
+ uint256 result;
+ ctx.Finalize(result.begin());
return result;
}
@@ -143,9 +139,8 @@ public:
* Returns the first 64 bits from the resulting hash.
*/
inline uint64_t GetCheapHash() {
- unsigned char result[CHash256::OUTPUT_SIZE];
- ctx.Finalize(result);
- return ReadLE64(result);
+ uint256 result = GetHash();
+ return ReadLE64(result.begin());
}
template<typename T>
@@ -200,7 +195,10 @@ uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL
return ss.GetHash();
}
-unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash);
+/** Single-SHA256 a 32-byte input (represented as uint256). */
+NODISCARD uint256 SHA256Uint256(const uint256& input);
+
+unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash);
void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64]);
diff --git a/src/index/base.cpp b/src/index/base.cpp
index f587205a28..e67b813763 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -319,3 +319,12 @@ void BaseIndex::Stop()
m_thread_sync.join();
}
}
+
+IndexSummary BaseIndex::GetSummary() const
+{
+ IndexSummary summary{};
+ summary.name = GetName();
+ summary.synced = m_synced;
+ summary.best_block_height = m_best_block_index.load()->nHeight;
+ return summary;
+}
diff --git a/src/index/base.h b/src/index/base.h
index 3fab810bb2..8559e3cb64 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -13,6 +13,12 @@
class CBlockIndex;
+struct IndexSummary {
+ std::string name;
+ bool synced{false};
+ int best_block_height{0};
+};
+
/**
* Base class for indices of blockchain data. This implements
* CValidationInterface and ensures blocks are indexed sequentially according
@@ -21,6 +27,13 @@ class CBlockIndex;
class BaseIndex : public CValidationInterface
{
protected:
+ /**
+ * The database stores a block locator of the chain the database is synced to
+ * so that the index can efficiently determine the point it last stopped at.
+ * A locator is used instead of a simple hash of the chain tip because blocks
+ * and block index entries may not be flushed to disk until after this database
+ * is updated.
+ */
class DB : public CDBWrapper
{
public:
@@ -106,6 +119,9 @@ public:
/// Stops the instance from staying in sync with blockchain updates.
void Stop();
+
+ /// Get a summary of the index and its state.
+ IndexSummary GetSummary() const;
};
#endif // BITCOIN_INDEX_BASE_H
diff --git a/src/index/disktxpos.h b/src/index/disktxpos.h
new file mode 100644
index 0000000000..69696b0ec5
--- /dev/null
+++ b/src/index/disktxpos.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INDEX_DISKTXPOS_H
+#define BITCOIN_INDEX_DISKTXPOS_H
+
+#include <flatfile.h>
+#include <serialize.h>
+
+struct CDiskTxPos : public FlatFilePos
+{
+ unsigned int nTxOffset; // after header
+
+ SERIALIZE_METHODS(CDiskTxPos, obj)
+ {
+ READWRITEAS(FlatFilePos, obj);
+ READWRITE(VARINT(obj.nTxOffset));
+ }
+
+ CDiskTxPos(const FlatFilePos &blockIn, unsigned int nTxOffsetIn) : FlatFilePos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
+ }
+
+ CDiskTxPos() {
+ SetNull();
+ }
+
+ void SetNull() {
+ FlatFilePos::SetNull();
+ nTxOffset = 0;
+ }
+};
+
+
+#endif // BITCOIN_INDEX_DISKTXPOS_H
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 64472714cc..462ac5962f 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <index/disktxpos.h>
#include <index/txindex.h>
#include <node/ui_interface.h>
#include <shutdown.h>
@@ -15,38 +16,9 @@ constexpr char DB_TXINDEX_BLOCK = 'T';
std::unique_ptr<TxIndex> g_txindex;
-struct CDiskTxPos : public FlatFilePos
-{
- unsigned int nTxOffset; // after header
-
- SERIALIZE_METHODS(CDiskTxPos, obj)
- {
- READWRITEAS(FlatFilePos, obj);
- READWRITE(VARINT(obj.nTxOffset));
- }
-
- CDiskTxPos(const FlatFilePos &blockIn, unsigned int nTxOffsetIn) : FlatFilePos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
- }
- CDiskTxPos() {
- SetNull();
- }
- void SetNull() {
- FlatFilePos::SetNull();
- nTxOffset = 0;
- }
-};
-
-/**
- * Access to the txindex database (indexes/txindex/)
- *
- * The database stores a block locator of the chain the database is synced to
- * so that the TxIndex can efficiently determine the point it last stopped at.
- * A locator is used instead of a simple hash of the chain tip because blocks
- * and block index entries may not be flushed to disk until after this database
- * is updated.
- */
+/** Access to the txindex database (indexes/txindex/) */
class TxIndex::DB : public BaseIndex::DB
{
public:
diff --git a/src/init.cpp b/src/init.cpp
index acf9f8bd91..ecd57960ad 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -24,6 +24,7 @@
#include <index/blockfilterindex.h>
#include <index/txindex.h>
#include <interfaces/chain.h>
+#include <interfaces/node.h>
#include <key.h>
#include <miner.h>
#include <net.h>
@@ -106,14 +107,14 @@ static const char* DEFAULT_ASMAP_FILENAME="ip_asn.map";
*/
static const char* BITCOIN_PID_FILENAME = "bitcoind.pid";
-static fs::path GetPidFile()
+static fs::path GetPidFile(const ArgsManager& args)
{
- return AbsPathForConfigVal(fs::path(gArgs.GetArg("-pid", BITCOIN_PID_FILENAME)));
+ return AbsPathForConfigVal(fs::path(args.GetArg("-pid", BITCOIN_PID_FILENAME)));
}
-NODISCARD static bool CreatePidFile()
+NODISCARD static bool CreatePidFile(const ArgsManager& args)
{
- fsbridge::ofstream file{GetPidFile()};
+ fsbridge::ofstream file{GetPidFile(args)};
if (file) {
#ifdef WIN32
tfm::format(file, "%d\n", GetCurrentProcessId());
@@ -122,7 +123,7 @@ NODISCARD static bool CreatePidFile()
#endif
return true;
} else {
- return InitError(strprintf(_("Unable to create the PID file '%s': %s"), GetPidFile().string(), std::strerror(errno)));
+ return InitError(strprintf(_("Unable to create the PID file '%s': %s"), GetPidFile(args).string(), std::strerror(errno)));
}
}
@@ -179,6 +180,7 @@ void Shutdown(NodeContext& node)
TRY_LOCK(g_shutdown_mutex, lock_shutdown);
if (!lock_shutdown) return;
LogPrintf("%s: In progress...\n", __func__);
+ Assert(node.args);
/// Note: Shutdown() must be able to handle cases in which initialization failed part of the way,
/// for example if the data directory was found to be locked.
@@ -229,7 +231,7 @@ void Shutdown(NodeContext& node)
node.connman.reset();
node.banman.reset();
- if (::mempool.IsLoaded() && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
+ if (::mempool.IsLoaded() && node.args->GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool(::mempool);
}
@@ -300,19 +302,19 @@ void Shutdown(NodeContext& node)
GetMainSignals().UnregisterBackgroundSignalScheduler();
globalVerifyHandle.reset();
ECC_Stop();
- node.args = nullptr;
node.mempool = nullptr;
node.chainman = nullptr;
node.scheduler.reset();
try {
- if (!fs::remove(GetPidFile())) {
+ if (!fs::remove(GetPidFile(*node.args))) {
LogPrintf("%s: Unable to remove PID file: File does not exist\n", __func__);
}
} catch (const fs::filesystem_error& e) {
LogPrintf("%s: Unable to remove PID file: %s\n", __func__, fsbridge::get_filesystem_error_message(e));
}
+ node.args = nullptr;
LogPrintf("%s: done\n", __func__);
}
@@ -369,9 +371,10 @@ void SetupServerArgs(NodeContext& node)
{
assert(!node.args);
node.args = &gArgs;
+ ArgsManager& argsman = *node.args;
- SetupHelpOptions(gArgs);
- gArgs.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now
+ SetupHelpOptions(argsman);
+ argsman.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now
const auto defaultBaseParams = CreateBaseChainParams(CBaseChainParams::MAIN);
const auto testnetBaseParams = CreateBaseChainParams(CBaseChainParams::TESTNET);
@@ -386,109 +389,109 @@ void SetupServerArgs(NodeContext& node)
// GUI args. These will be overwritten by SetupUIArgs for the GUI
"-choosedatadir", "-lang=<lang>", "-min", "-resetguisettings", "-splash", "-uiplatform"};
- gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#if HAVE_SYSTEM
- gArgs.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-alertnotify=<cmd>", "Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
- gArgs.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#if HAVE_SYSTEM
- gArgs.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
- gArgs.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)",
+ argsman.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-debuglogfile=<file>", strprintf("Specify location of debug log file. Relative paths will be prefixed by a net-specific datadir location. (-nodebuglogfile to disable; default: %s)", DEFAULT_DEBUGLOGFILE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-mempoolexpiry=<n>", strprintf("Do not keep transactions in the mempool longer than <n> hours (default: %u)", DEFAULT_MEMPOOL_EXPIRY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-minimumchainwork=<hex>", strprintf("Minimum work assumed to exist on a valid chain in hex (default: %s, testnet: %s)", defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(), testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-par=<n>", strprintf("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)",
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
+ argsman.AddArg("-persistmempool", strprintf("Whether to save the mempool on shutdown and load on restart (default: %u)", DEFAULT_PERSIST_MEMPOOL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-pid=<file>", strprintf("Specify pid file. Relative paths will be prefixed by a net-specific datadir location. (default: %s)", BITCOIN_PID_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks, and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#ifndef WIN32
- gArgs.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#else
hidden_args.emplace_back("-sysperms");
#endif
- gArgs.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-blockfilterindex=<type>",
+ argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-blockfilterindex=<type>",
strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- gArgs.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
- gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
+ argsman.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
+ argsman.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
#ifdef USE_UPNP
#if USE_UPNP
- gArgs.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#else
- gArgs.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-upnp", strprintf("Use UPnP to map the listening port (default: %u)", 0), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#endif
#else
hidden_args.emplace_back("-upnp");
#endif
- gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. "
+ argsman.AddArg("-whitebind=<[permissions@]addr>", "Bind to the given address and add permission flags to the peers connecting to it. "
"Use [host]:port notation for IPv6. Allowed permissions: " + Join(NET_PERMISSIONS_DOC, ", ") + ". "
"Specify multiple permissions separated by commas (default: download,noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or "
+ argsman.AddArg("-whitelist=<[permissions@]IP address or network>", "Add permission flags to the peers connecting from the given IP address (e.g. 1.2.3.4) or "
"CIDR-notated network (e.g. 1.2.3.0/24). Uses the same permissions as "
"-whitebind. Can be specified multiple times." , ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- g_wallet_init_interface.AddWalletOptions();
+ g_wallet_init_interface.AddWalletOptions(argsman);
#if ENABLE_ZMQ
- gArgs.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
- gArgs.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawtx=<address>", "Enable publish raw transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashblockhwm=<n>", strprintf("Set publish hash block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubhashtxhwm=<n>", strprintf("Set publish hash transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawblockhwm=<n>", strprintf("Set publish raw block outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
+ argsman.AddArg("-zmqpubrawtxhwm=<n>", strprintf("Set publish raw transaction outbound message high water mark (default: %d)", CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM), ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
#else
hidden_args.emplace_back("-zmqpubhashblock=<address>");
hidden_args.emplace_back("-zmqpubhashtx=<address>");
@@ -500,82 +503,82 @@ void SetupServerArgs(NodeContext& node)
hidden_args.emplace_back("-zmqpubrawtxhwm=<n>");
#endif
- gArgs.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). "
+ argsman.AddArg("-checkblocks=<n>", strprintf("How many blocks to check at startup (default: %u, 0 = all)", DEFAULT_CHECKBLOCKS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checklevel=<n>", strprintf("How thorough the block verification of -checkblocks is: %s (0-4, default: %u)", Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkblockindex", strprintf("Do a consistency check for the block tree, chainstate, and other validation data structures occasionally. (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u, regtest: %u)", defaultChainParams->DefaultConsistencyChecks(), regtestChainParams->DefaultConsistencyChecks()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block 295000 (default: %u)", DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-deprecatedrpc=<method>", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). "
"If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ".",
ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
#ifdef HAVE_THREAD_LOCAL
- gArgs.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
#else
hidden_args.emplace_back("-logthreadnames");
#endif
- gArgs.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
-
- SetupChainParamsBaseOptions();
-
- gArgs.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
+ argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+ argsman.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
+
+ SetupChainParamsBaseOptions(argsman);
+
+ argsman.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define cost of relay, used for mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
-
-
- gArgs.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
- gArgs.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
- gArgs.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION);
-
- gArgs.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
- gArgs.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
- gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
- gArgs.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ argsman.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+
+
+ argsman.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
+ argsman.AddArg("-blockmintxfee=<amt>", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
+ argsman.AddArg("-blockversion=<n>", "Override block version to test forking scenarios", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::BLOCK_CREATION);
+
+ argsman.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ argsman.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
+ argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
+ argsman.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
#if HAVE_DECL_DAEMON
- gArgs.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-daemon", "Run in the background as a daemon and accept commands", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#else
hidden_args.emplace_back("-daemon");
#endif
// Add the hidden options
- gArgs.AddHiddenArgs(hidden_args);
+ argsman.AddHiddenArgs(hidden_args);
}
std::string LicenseInfo()
@@ -597,21 +600,6 @@ std::string LicenseInfo()
"\n";
}
-#if HAVE_SYSTEM
-static void BlockNotifyCallback(SynchronizationState sync_state, const CBlockIndex* pBlockIndex)
-{
- if (sync_state != SynchronizationState::POST_INIT || !pBlockIndex)
- return;
-
- std::string strCmd = gArgs.GetArg("-blocknotify", "");
- if (!strCmd.empty()) {
- boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
- std::thread t(runCommand, strCmd);
- t.detach(); // thread runs free
- }
-}
-#endif
-
static bool fHaveGenesis = false;
static Mutex g_genesis_wait_mutex;
static std::condition_variable g_genesis_wait_cv;
@@ -682,7 +670,7 @@ static void CleanupBlockRevFiles()
}
}
-static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
+static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args)
{
const CChainParams& chainparams = Params();
ScheduleBatchPriority();
@@ -744,13 +732,13 @@ static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImp
}
}
- if (gArgs.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
+ if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
StartShutdown();
return;
}
} // End scope of CImportingNow
- if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
+ if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
LoadMempool(::mempool);
}
::mempool.SetIsLoaded(!ShutdownRequested());
@@ -778,6 +766,7 @@ static bool InitSanityCheck()
static bool AppInitServers(const util::Ref& context, NodeContext& node)
{
+ const ArgsManager& args = *Assert(node.args);
RPCServer::OnStarted(&OnRPCStarted);
RPCServer::OnStopped(&OnRPCStopped);
if (!InitHTTPServer())
@@ -786,71 +775,71 @@ static bool AppInitServers(const util::Ref& context, NodeContext& node)
node.rpc_interruption_point = RpcInterruptionPoint;
if (!StartHTTPRPC(context))
return false;
- if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) StartREST(context);
+ if (args.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) StartREST(context);
StartHTTPServer();
return true;
}
// Parameter interaction based on rules
-void InitParameterInteraction()
+void InitParameterInteraction(ArgsManager& args)
{
// when specifying an explicit binding address, you want to listen on it
// even when -connect or -proxy is specified
- if (gArgs.IsArgSet("-bind")) {
- if (gArgs.SoftSetBoolArg("-listen", true))
+ if (args.IsArgSet("-bind")) {
+ if (args.SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -bind set -> setting -listen=1\n", __func__);
}
- if (gArgs.IsArgSet("-whitebind")) {
- if (gArgs.SoftSetBoolArg("-listen", true))
+ if (args.IsArgSet("-whitebind")) {
+ if (args.SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -whitebind set -> setting -listen=1\n", __func__);
}
- if (gArgs.IsArgSet("-connect")) {
+ if (args.IsArgSet("-connect")) {
// when only connecting to trusted nodes, do not seed via DNS, or listen by default
- if (gArgs.SoftSetBoolArg("-dnsseed", false))
+ if (args.SoftSetBoolArg("-dnsseed", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -dnsseed=0\n", __func__);
- if (gArgs.SoftSetBoolArg("-listen", false))
+ if (args.SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -listen=0\n", __func__);
}
- if (gArgs.IsArgSet("-proxy")) {
+ if (args.IsArgSet("-proxy")) {
// to protect privacy, do not listen by default if a default proxy server is specified
- if (gArgs.SoftSetBoolArg("-listen", false))
+ if (args.SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__);
// to protect privacy, do not use UPNP when a proxy is set. The user may still specify -listen=1
// to listen locally, so don't rely on this happening through -listen below.
- if (gArgs.SoftSetBoolArg("-upnp", false))
+ if (args.SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__);
// to protect privacy, do not discover addresses by default
- if (gArgs.SoftSetBoolArg("-discover", false))
+ if (args.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -discover=0\n", __func__);
}
- if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
+ if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
// do not map ports or try to retrieve public IP when not listening (pointless)
- if (gArgs.SoftSetBoolArg("-upnp", false))
+ if (args.SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__);
- if (gArgs.SoftSetBoolArg("-discover", false))
+ if (args.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__);
- if (gArgs.SoftSetBoolArg("-listenonion", false))
+ if (args.SoftSetBoolArg("-listenonion", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -listenonion=0\n", __func__);
}
- if (gArgs.IsArgSet("-externalip")) {
+ if (args.IsArgSet("-externalip")) {
// if an explicit public IP is specified, do not try to find others
- if (gArgs.SoftSetBoolArg("-discover", false))
+ if (args.SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
}
// disable whitelistrelay in blocksonly mode
- if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
- if (gArgs.SoftSetBoolArg("-whitelistrelay", false))
+ if (args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
+ if (args.SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
- if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
- if (gArgs.SoftSetBoolArg("-whitelistrelay", true))
+ if (args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
+ if (args.SoftSetBoolArg("-whitelistrelay", true))
LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__);
}
}
@@ -861,18 +850,18 @@ void InitParameterInteraction()
* Note that this is called very early in the process lifetime, so you should be
* careful about what global state you rely on here.
*/
-void InitLogging()
+void InitLogging(const ArgsManager& args)
{
- LogInstance().m_print_to_file = !gArgs.IsArgNegated("-debuglogfile");
- LogInstance().m_file_path = AbsPathForConfigVal(gArgs.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
- LogInstance().m_print_to_console = gArgs.GetBoolArg("-printtoconsole", !gArgs.GetBoolArg("-daemon", false));
- LogInstance().m_log_timestamps = gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
- LogInstance().m_log_time_micros = gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
+ LogInstance().m_print_to_file = !args.IsArgNegated("-debuglogfile");
+ LogInstance().m_file_path = AbsPathForConfigVal(args.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
+ LogInstance().m_print_to_console = args.GetBoolArg("-printtoconsole", !args.GetBoolArg("-daemon", false));
+ LogInstance().m_log_timestamps = args.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
+ LogInstance().m_log_time_micros = args.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
#ifdef HAVE_THREAD_LOCAL
- LogInstance().m_log_threadnames = gArgs.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES);
+ LogInstance().m_log_threadnames = args.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES);
#endif
- fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
+ fLogIPs = args.GetBoolArg("-logips", DEFAULT_LOGIPS);
std::string version_string = FormatFullVersion();
#ifdef DEBUG
@@ -907,7 +896,7 @@ std::set<BlockFilterType> g_enabled_filter_types;
std::terminate();
};
-bool AppInitBasicSetup()
+bool AppInitBasicSetup(ArgsManager& args)
{
// ********************************************************* Step 1: setup
#ifdef _MSC_VER
@@ -927,7 +916,7 @@ bool AppInitBasicSetup()
}
#ifndef WIN32
- if (!gArgs.GetBoolArg("-sysperms", false)) {
+ if (!args.GetBoolArg("-sysperms", false)) {
umask(077);
}
@@ -949,7 +938,7 @@ bool AppInitBasicSetup()
return true;
}
-bool AppInitParameterInteraction()
+bool AppInitParameterInteraction(const ArgsManager& args)
{
const CChainParams& chainparams = Params();
// ********************************************************* Step 2: parameter interactions
@@ -959,9 +948,9 @@ bool AppInitParameterInteraction()
// Error if network-specific options (-addnode, -connect, etc) are
// specified in default section of config file, but not overridden
// on the command line or in this network's section of the config file.
- std::string network = gArgs.GetChainName();
+ std::string network = args.GetChainName();
bilingual_str errors;
- for (const auto& arg : gArgs.GetUnsuitableSectionOnlyArgs()) {
+ for (const auto& arg : args.GetUnsuitableSectionOnlyArgs()) {
errors += strprintf(_("Config setting for %s only applied on %s network when in [%s] section.") + Untranslated("\n"), arg, network, network);
}
@@ -971,7 +960,7 @@ bool AppInitParameterInteraction()
// Warn if unrecognized section name are present in the config file.
bilingual_str warnings;
- for (const auto& section : gArgs.GetUnrecognizedSections()) {
+ for (const auto& section : args.GetUnrecognizedSections()) {
warnings += strprintf(Untranslated("%s:%i ") + _("Section [%s] is not recognized.") + Untranslated("\n"), section.m_file, section.m_line, section.m_name);
}
@@ -980,15 +969,15 @@ bool AppInitParameterInteraction()
}
if (!fs::is_directory(GetBlocksDir())) {
- return InitError(strprintf(_("Specified blocks directory \"%s\" does not exist."), gArgs.GetArg("-blocksdir", "")));
+ return InitError(strprintf(_("Specified blocks directory \"%s\" does not exist."), args.GetArg("-blocksdir", "")));
}
// parse and validate enabled filter types
- std::string blockfilterindex_value = gArgs.GetArg("-blockfilterindex", DEFAULT_BLOCKFILTERINDEX);
+ std::string blockfilterindex_value = args.GetArg("-blockfilterindex", DEFAULT_BLOCKFILTERINDEX);
if (blockfilterindex_value == "" || blockfilterindex_value == "1") {
g_enabled_filter_types = AllBlockFilterTypes();
} else if (blockfilterindex_value != "0") {
- const std::vector<std::string> names = gArgs.GetArgs("-blockfilterindex");
+ const std::vector<std::string> names = args.GetArgs("-blockfilterindex");
for (const auto& name : names) {
BlockFilterType filter_type;
if (!BlockFilterTypeByName(name, filter_type)) {
@@ -998,16 +987,18 @@ bool AppInitParameterInteraction()
}
}
- // Basic filters are the only supported filters. The basic filters index must be enabled
- // to serve compact filters
- if (gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS) &&
- g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) {
- return InitError(_("Cannot set -peerblockfilters without -blockfilterindex."));
+ // Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled.
+ if (args.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) {
+ if (g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) {
+ return InitError(_("Cannot set -peerblockfilters without -blockfilterindex."));
+ }
+
+ nLocalServices = ServiceFlags(nLocalServices | NODE_COMPACT_FILTERS);
}
// if using block pruning, then disallow txindex
- if (gArgs.GetArg("-prune", 0)) {
- if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX))
+ if (args.GetArg("-prune", 0)) {
+ if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX))
return InitError(_("Prune mode is incompatible with -txindex."));
if (!g_enabled_filter_types.empty()) {
return InitError(_("Prune mode is incompatible with -blockfilterindex."));
@@ -1015,14 +1006,14 @@ bool AppInitParameterInteraction()
}
// -bind and -whitebind can't be set when not listening
- size_t nUserBind = gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
- if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
+ size_t nUserBind = args.GetArgs("-bind").size() + args.GetArgs("-whitebind").size();
+ if (nUserBind != 0 && !args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
return InitError(Untranslated("Cannot set -bind or -whitebind together with -listen=0"));
}
// Make sure enough file descriptors are available
int nBind = std::max(nUserBind, size_t(1));
- nUserMaxConnections = gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
+ nUserMaxConnections = args.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
nMaxConnections = std::max(nUserMaxConnections, 0);
// Trim requested connection counts, to fit into system limitations
@@ -1042,9 +1033,9 @@ bool AppInitParameterInteraction()
InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections));
// ********************************************************* Step 3: parameter-to-internal-flags
- if (gArgs.IsArgSet("-debug")) {
+ if (args.IsArgSet("-debug")) {
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
- const std::vector<std::string> categories = gArgs.GetArgs("-debug");
+ const std::vector<std::string> categories = args.GetArgs("-debug");
if (std::none_of(categories.begin(), categories.end(),
[](std::string cat){return cat == "0" || cat == "none";})) {
@@ -1057,28 +1048,28 @@ bool AppInitParameterInteraction()
}
// Now remove the logging categories which were explicitly excluded
- for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
+ for (const std::string& cat : args.GetArgs("-debugexclude")) {
if (!LogInstance().DisableCategory(cat)) {
InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
}
}
// Checkmempool and checkblockindex default to true in regtest mode
- int ratio = std::min<int>(std::max<int>(gArgs.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000);
+ int ratio = std::min<int>(std::max<int>(args.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000);
if (ratio != 0) {
mempool.setSanityCheck(1.0 / ratio);
}
- fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks());
- fCheckpointsEnabled = gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
+ fCheckBlockIndex = args.GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks());
+ fCheckpointsEnabled = args.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
- hashAssumeValid = uint256S(gArgs.GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex()));
+ hashAssumeValid = uint256S(args.GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex()));
if (!hashAssumeValid.IsNull())
LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex());
else
LogPrintf("Validating signatures for all blocks.\n");
- if (gArgs.IsArgSet("-minimumchainwork")) {
- const std::string minChainWorkStr = gArgs.GetArg("-minimumchainwork", "");
+ if (args.IsArgSet("-minimumchainwork")) {
+ const std::string minChainWorkStr = args.GetArg("-minimumchainwork", "");
if (!IsHexNumber(minChainWorkStr)) {
return InitError(strprintf(Untranslated("Invalid non-hex (%s) minimum chain work value specified"), minChainWorkStr));
}
@@ -1092,22 +1083,21 @@ bool AppInitParameterInteraction()
}
// mempool limits
- int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t nMempoolSizeMin = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
+ int64_t nMempoolSizeMax = args.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
+ int64_t nMempoolSizeMin = args.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin)
return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0)));
// incremental relay fee sets the minimum feerate increase necessary for BIP 125 replacement in the mempool
// and the amount the mempool min fee increases above the feerate of txs evicted due to mempool limiting.
- if (gArgs.IsArgSet("-incrementalrelayfee"))
- {
+ if (args.IsArgSet("-incrementalrelayfee")) {
CAmount n = 0;
- if (!ParseMoney(gArgs.GetArg("-incrementalrelayfee", ""), n))
- return InitError(AmountErrMsg("incrementalrelayfee", gArgs.GetArg("-incrementalrelayfee", "")));
+ if (!ParseMoney(args.GetArg("-incrementalrelayfee", ""), n))
+ return InitError(AmountErrMsg("incrementalrelayfee", args.GetArg("-incrementalrelayfee", "")));
incrementalRelayFee = CFeeRate(n);
}
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
- int64_t nPruneArg = gArgs.GetArg("-prune", 0);
+ int64_t nPruneArg = args.GetArg("-prune", 0);
if (nPruneArg < 0) {
return InitError(_("Prune cannot be configured with a negative value."));
}
@@ -1124,20 +1114,20 @@ bool AppInitParameterInteraction()
fPruneMode = true;
}
- nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
+ nConnectTimeout = args.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0) {
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
}
- peer_connect_timeout = gArgs.GetArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
+ peer_connect_timeout = args.GetArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
if (peer_connect_timeout <= 0) {
return InitError(Untranslated("peertimeout cannot be configured with a negative value."));
}
- if (gArgs.IsArgSet("-minrelaytxfee")) {
+ if (args.IsArgSet("-minrelaytxfee")) {
CAmount n = 0;
- if (!ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n)) {
- return InitError(AmountErrMsg("minrelaytxfee", gArgs.GetArg("-minrelaytxfee", "")));
+ if (!ParseMoney(args.GetArg("-minrelaytxfee", ""), n)) {
+ return InitError(AmountErrMsg("minrelaytxfee", args.GetArg("-minrelaytxfee", "")));
}
// High fee check is done afterward in CWallet::CreateWalletFromFile()
::minRelayTxFee = CFeeRate(n);
@@ -1149,48 +1139,46 @@ bool AppInitParameterInteraction()
// Sanity check argument for min fee for including tx in block
// TODO: Harmonize which arguments need sanity checking and where that happens
- if (gArgs.IsArgSet("-blockmintxfee"))
- {
+ if (args.IsArgSet("-blockmintxfee")) {
CAmount n = 0;
- if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n))
- return InitError(AmountErrMsg("blockmintxfee", gArgs.GetArg("-blockmintxfee", "")));
+ if (!ParseMoney(args.GetArg("-blockmintxfee", ""), n))
+ return InitError(AmountErrMsg("blockmintxfee", args.GetArg("-blockmintxfee", "")));
}
// Feerate used to define dust. Shouldn't be changed lightly as old
// implementations may inadvertently create non-standard transactions
- if (gArgs.IsArgSet("-dustrelayfee"))
- {
+ if (args.IsArgSet("-dustrelayfee")) {
CAmount n = 0;
- if (!ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n))
- return InitError(AmountErrMsg("dustrelayfee", gArgs.GetArg("-dustrelayfee", "")));
+ if (!ParseMoney(args.GetArg("-dustrelayfee", ""), n))
+ return InitError(AmountErrMsg("dustrelayfee", args.GetArg("-dustrelayfee", "")));
dustRelayFee = CFeeRate(n);
}
- fRequireStandard = !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
+ fRequireStandard = !args.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
if (!chainparams.IsTestChain() && !fRequireStandard) {
return InitError(strprintf(Untranslated("acceptnonstdtxn is not currently supported for %s chain"), chainparams.NetworkIDString()));
}
- nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
+ nBytesPerSigOp = args.GetArg("-bytespersigop", nBytesPerSigOp);
if (!g_wallet_init_interface.ParameterInteraction()) return false;
- fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
- fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
- nMaxDatacarrierBytes = gArgs.GetArg("-datacarriersize", nMaxDatacarrierBytes);
+ fIsBareMultisigStd = args.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
+ fAcceptDatacarrier = args.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
+ nMaxDatacarrierBytes = args.GetArg("-datacarriersize", nMaxDatacarrierBytes);
// Option to startup with mocktime set (used for regression testing):
- SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
+ SetMockTime(args.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
- if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
+ if (args.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
- if (gArgs.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) < 0)
+ if (args.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) < 0)
return InitError(Untranslated("rpcserialversion must be non-negative."));
- if (gArgs.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) > 1)
+ if (args.GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) > 1)
return InitError(Untranslated("Unknown rpcserialversion requested."));
- nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
+ nMaxTipAge = args.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
return true;
}
@@ -1241,16 +1229,17 @@ bool AppInitLockDataDirectory()
return true;
}
-bool AppInitMain(const util::Ref& context, NodeContext& node)
+bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
{
+ const ArgsManager& args = *Assert(node.args);
const CChainParams& chainparams = Params();
// ********************************************************* Step 4a: application initialization
- if (!CreatePidFile()) {
+ if (!CreatePidFile(args)) {
// Detailed error printed inside CreatePidFile().
return false;
}
if (LogInstance().m_print_to_file) {
- if (gArgs.GetBoolArg("-shrinkdebugfile", LogInstance().DefaultShrinkDebugFile())) {
+ if (args.GetBoolArg("-shrinkdebugfile", LogInstance().DefaultShrinkDebugFile())) {
// Do this first since it both loads a bunch of debug.log into memory,
// and because this needs to happen before any other debug.log printing
LogInstance().ShrinkDebugFile();
@@ -1267,10 +1256,10 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
LogPrintf("Using data directory %s\n", GetDataDir().string());
// Only log conf file usage message if conf file actually exists.
- fs::path config_file_path = GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME));
+ fs::path config_file_path = GetConfigFile(args.GetArg("-conf", BITCOIN_CONF_FILENAME));
if (fs::exists(config_file_path)) {
LogPrintf("Config file: %s\n", config_file_path.string());
- } else if (gArgs.IsArgSet("-conf")) {
+ } else if (args.IsArgSet("-conf")) {
// Warn if no conf file exists at path provided by user
InitWarning(strprintf(_("The specified config file %s does not exist\n"), config_file_path.string()));
} else {
@@ -1279,23 +1268,23 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// Log the config arguments to debug.log
- gArgs.LogArgs();
+ args.LogArgs();
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
// Warn about relative -datadir path.
- if (gArgs.IsArgSet("-datadir") && !fs::path(gArgs.GetArg("-datadir", "")).is_absolute()) {
+ if (args.IsArgSet("-datadir") && !fs::path(args.GetArg("-datadir", "")).is_absolute()) {
LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the " /* Continued */
"current working directory '%s'. This is fragile, because if bitcoin is started in the future "
"from a different location, it will be unable to locate the current data files. There could "
"also be data loss if bitcoin is started while in a temporary directory.\n",
- gArgs.GetArg("-datadir", ""), fs::current_path().string());
+ args.GetArg("-datadir", ""), fs::current_path().string());
}
InitSignatureCache();
InitScriptExecutionCache();
- int script_threads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
+ int script_threads = args.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
if (script_threads <= 0) {
// -par=0 means autodetect (number of cores - 1 script threads)
// -par=-n means "leave n cores free" (number of cores - n - 1 script threads)
@@ -1351,8 +1340,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
* that the server is there and will be ready later). Warmup mode will
* be disabled when initialisation is finished.
*/
- if (gArgs.GetBoolArg("-server", false))
- {
+ if (args.GetBoolArg("-server", false)) {
uiInterface.InitMessage_connect(SetRPCWarmupStatus);
if (!AppInitServers(context, node))
return InitError(_("Unable to start HTTP server. See debug log for details."));
@@ -1372,9 +1360,9 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
// need to reindex later.
assert(!node.banman);
- node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
+ node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, args.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
assert(!node.connman);
- node.connman = MakeUnique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), gArgs.GetBoolArg("-networkactive", true));
+ node.connman = MakeUnique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), args.GetBoolArg("-networkactive", true));
// Make mempool generally available in the node context. For example the connection manager, wallet, or RPC threads,
// which are all started after this, may use it from the node context.
assert(!node.mempool);
@@ -1383,12 +1371,12 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
node.chainman = &g_chainman;
ChainstateManager& chainman = *Assert(node.chainman);
- node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, chainman, *node.mempool));
+ node.peer_logic.reset(new PeerLogicValidation(*node.connman, node.banman.get(), *node.scheduler, chainman, *node.mempool));
RegisterValidationInterface(node.peer_logic.get());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
- for (const std::string& cmt : gArgs.GetArgs("-uacomment")) {
+ for (const std::string& cmt : args.GetArgs("-uacomment")) {
if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
uacomments.push_back(cmt);
@@ -1399,9 +1387,9 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
strSubVersion.size(), MAX_SUBVERSION_LENGTH));
}
- if (gArgs.IsArgSet("-onlynet")) {
+ if (args.IsArgSet("-onlynet")) {
std::set<enum Network> nets;
- for (const std::string& snet : gArgs.GetArgs("-onlynet")) {
+ for (const std::string& snet : args.GetArgs("-onlynet")) {
enum Network net = ParseNetwork(snet);
if (net == NET_UNROUTABLE)
return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet));
@@ -1415,12 +1403,12 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// Check for host lookup allowed before parsing any network related parameters
- fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
+ fNameLookup = args.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
- bool proxyRandomize = gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
+ bool proxyRandomize = args.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
// -proxy sets a proxy for all outgoing network traffic
// -noproxy (or -proxy=0) as well as the empty string can be used to not set a proxy, this is the default
- std::string proxyArg = gArgs.GetArg("-proxy", "");
+ std::string proxyArg = args.GetArg("-proxy", "");
SetReachable(NET_ONION, false);
if (proxyArg != "" && proxyArg != "0") {
CService proxyAddr;
@@ -1442,7 +1430,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
// -onion can be used to set only a proxy for .onion, or override normal proxy for .onion addresses
// -noonion (or -onion=0) disables connecting to .onion entirely
// An empty string is used to not override the onion proxy (in which case it defaults to -proxy set above, or none)
- std::string onionArg = gArgs.GetArg("-onion", "");
+ std::string onionArg = args.GetArg("-onion", "");
if (onionArg != "") {
if (onionArg == "0") { // Handle -noonion/-onion=0
SetReachable(NET_ONION, false);
@@ -1460,11 +1448,11 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// see Step 2: parameter interactions for more information about these
- fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN);
- fDiscover = gArgs.GetBoolArg("-discover", true);
- g_relay_txes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
+ fListen = args.GetBoolArg("-listen", DEFAULT_LISTEN);
+ fDiscover = args.GetBoolArg("-discover", true);
+ g_relay_txes = !args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
- for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
+ for (const std::string& strAddr : args.GetArgs("-externalip")) {
CService addrLocal;
if (Lookup(strAddr, addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
AddLocal(addrLocal, LOCAL_MANUAL);
@@ -1473,8 +1461,8 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// Read asmap file if configured
- if (gArgs.IsArgSet("-asmap")) {
- fs::path asmap_path = fs::path(gArgs.GetArg("-asmap", ""));
+ if (args.IsArgSet("-asmap")) {
+ fs::path asmap_path = fs::path(args.GetArg("-asmap", ""));
if (asmap_path.empty()) {
asmap_path = DEFAULT_ASMAP_FILENAME;
}
@@ -1507,22 +1495,22 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
uint64_t nMaxOutboundLimit = 0; //unlimited unless -maxuploadtarget is set
uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME;
- if (gArgs.IsArgSet("-maxuploadtarget")) {
- nMaxOutboundLimit = gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET)*1024*1024;
+ if (args.IsArgSet("-maxuploadtarget")) {
+ nMaxOutboundLimit = args.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * 1024;
}
// ********************************************************* Step 7: load block chain
- fReindex = gArgs.GetBoolArg("-reindex", false);
- bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
+ fReindex = args.GetBoolArg("-reindex", false);
+ bool fReindexChainState = args.GetBoolArg("-reindex-chainstate", false);
// cache size calculations
- int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
+ int64_t nTotalCache = (args.GetArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greater than nMaxDbcache
int64_t nBlockTreeDBCache = std::min(nTotalCache / 8, nMaxBlockDBCache << 20);
nTotalCache -= nBlockTreeDBCache;
- int64_t nTxIndexCache = std::min(nTotalCache / 8, gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxTxIndexCache << 20 : 0);
+ int64_t nTxIndexCache = std::min(nTotalCache / 8, args.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxTxIndexCache << 20 : 0);
nTotalCache -= nTxIndexCache;
int64_t filter_index_cache = 0;
if (!g_enabled_filter_types.empty()) {
@@ -1534,11 +1522,11 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // use 25%-50% of the remainder for disk cache
nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); // cap total coins db cache
nTotalCache -= nCoinDBCache;
- nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
- int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
+ int64_t nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
+ int64_t nMempoolSizeMax = args.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1f MiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024));
- if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
+ if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
LogPrintf("* Using %.1f MiB for transaction index database\n", nTxIndexCache * (1.0 / 1024 / 1024));
}
for (BlockFilterType filter_type : g_enabled_filter_types) {
@@ -1563,7 +1551,10 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
try {
LOCK(cs_main);
chainman.InitializeChainstate();
- UnloadBlockIndex();
+ chainman.m_total_coinstip_cache = nCoinCacheUsage;
+ chainman.m_total_coinsdb_cache = nCoinDBCache;
+
+ UnloadBlockIndex(node.mempool);
// new CBlockTreeDB tries to delete the existing file, which
// fails if it's still open from the previous loop. Close it first:
@@ -1646,7 +1637,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
// The on-disk coinsdb is now in a good state, create the cache
- chainstate->InitCoinsCache();
+ chainstate->InitCoinsCache(nCoinCacheUsage);
assert(chainstate->CanFlushToDisk());
if (!is_coinsview_empty(chainstate)) {
@@ -1700,7 +1691,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
for (CChainState* chainstate : chainman.GetAll()) {
if (!is_coinsview_empty(chainstate)) {
uiInterface.InitMessage(_("Verifying blocks...").translated);
- if (fHavePruned && gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
+ if (fHavePruned && args.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks\n",
MIN_BLOCKS_TO_KEEP);
}
@@ -1718,10 +1709,10 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
// Only verify the DB of the active chainstate. This is fixed in later
// work when we allow VerifyDB to be parameterized by chainstate.
if (&::ChainstateActive() == chainstate &&
- !CVerifyDB().VerifyDB(
+ !CVerifyDB().VerifyDB(
chainparams, &chainstate->CoinsDB(),
- gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
- gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) {
+ args.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
+ args.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) {
strLoadError = _("Corrupted block database detected");
failed_verification = true;
break;
@@ -1777,7 +1768,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
fFeeEstimatesInitialized = true;
// ********************************************************* Step 8: start indexers
- if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
+ if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
g_txindex = MakeUnique<TxIndex>(nTxIndexCache, false, fReindex);
g_txindex->Start();
}
@@ -1837,16 +1828,31 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
}
#if HAVE_SYSTEM
- if (gArgs.IsArgSet("-blocknotify"))
+ if (args.IsArgSet("-blocknotify")) {
+ const std::string block_notify = args.GetArg("-blocknotify", "");
+ const auto BlockNotifyCallback = [block_notify](SynchronizationState sync_state, const CBlockIndex* pBlockIndex) {
+ if (sync_state != SynchronizationState::POST_INIT || !pBlockIndex)
+ return;
+
+ std::string strCmd = block_notify;
+ if (!strCmd.empty()) {
+ boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
+ std::thread t(runCommand, strCmd);
+ t.detach(); // thread runs free
+ }
+ };
uiInterface.NotifyBlockTip_connect(BlockNotifyCallback);
+ }
#endif
std::vector<fs::path> vImportFiles;
- for (const std::string& strFile : gArgs.GetArgs("-loadblock")) {
+ for (const std::string& strFile : args.GetArgs("-loadblock")) {
vImportFiles.push_back(strFile);
}
- g_load_block = std::thread(&TraceThread<std::function<void()>>, "loadblk", [=, &chainman]{ ThreadImport(chainman, vImportFiles); });
+ g_load_block = std::thread(&TraceThread<std::function<void()>>, "loadblk", [=, &chainman, &args] {
+ ThreadImport(chainman, vImportFiles, args);
+ });
// Wait for genesis block to be processed
{
@@ -1873,16 +1879,25 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
LOCK(cs_main);
LogPrintf("block tree size = %u\n", chainman.BlockIndex().size());
chain_active_height = chainman.ActiveChain().Height();
+ if (tip_info) {
+ tip_info->block_height = chain_active_height;
+ tip_info->block_time = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockTime() : Params().GenesisBlock().GetBlockTime();
+ tip_info->verification_progress = GuessVerificationProgress(Params().TxData(), chainman.ActiveChain().Tip());
+ }
+ if (tip_info && ::pindexBestHeader) {
+ tip_info->header_height = ::pindexBestHeader->nHeight;
+ tip_info->header_time = ::pindexBestHeader->GetBlockTime();
+ }
}
LogPrintf("nBestHeight = %d\n", chain_active_height);
- if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION))
+ if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION))
StartTorControl();
Discover();
// Map ports with UPnP
- if (gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)) {
+ if (args.GetBoolArg("-upnp", DEFAULT_UPNP)) {
StartMapPort();
}
@@ -1897,41 +1912,41 @@ bool AppInitMain(const util::Ref& context, NodeContext& node)
connOptions.uiInterface = &uiInterface;
connOptions.m_banman = node.banman.get();
connOptions.m_msgproc = node.peer_logic.get();
- connOptions.nSendBufferMaxSize = 1000*gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
- connOptions.nReceiveFloodSize = 1000*gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
- connOptions.m_added_nodes = gArgs.GetArgs("-addnode");
+ connOptions.nSendBufferMaxSize = 1000 * args.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
+ connOptions.nReceiveFloodSize = 1000 * args.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
+ connOptions.m_added_nodes = args.GetArgs("-addnode");
connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
connOptions.m_peer_connect_timeout = peer_connect_timeout;
- for (const std::string& strBind : gArgs.GetArgs("-bind")) {
+ for (const std::string& strBind : args.GetArgs("-bind")) {
CService addrBind;
if (!Lookup(strBind, addrBind, GetListenPort(), false)) {
return InitError(ResolveErrMsg("bind", strBind));
}
connOptions.vBinds.push_back(addrBind);
}
- for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
+ for (const std::string& strBind : args.GetArgs("-whitebind")) {
NetWhitebindPermissions whitebind;
bilingual_str error;
if (!NetWhitebindPermissions::TryParse(strBind, whitebind, error)) return InitError(error);
connOptions.vWhiteBinds.push_back(whitebind);
}
- for (const auto& net : gArgs.GetArgs("-whitelist")) {
+ for (const auto& net : args.GetArgs("-whitelist")) {
NetWhitelistPermissions subnet;
bilingual_str error;
if (!NetWhitelistPermissions::TryParse(net, subnet, error)) return InitError(error);
connOptions.vWhitelistedRange.push_back(subnet);
}
- connOptions.vSeedNodes = gArgs.GetArgs("-seednode");
+ connOptions.vSeedNodes = args.GetArgs("-seednode");
// Initiate outbound connections unless connect=0
- connOptions.m_use_addrman_outgoing = !gArgs.IsArgSet("-connect");
+ connOptions.m_use_addrman_outgoing = !args.IsArgSet("-connect");
if (!connOptions.m_use_addrman_outgoing) {
- const auto connect = gArgs.GetArgs("-connect");
+ const auto connect = args.GetArgs("-connect");
if (connect.size() != 1 || connect[0] != "0") {
connOptions.m_specified_outgoing = connect;
}
diff --git a/src/init.h b/src/init.h
index 33fe96e8ea..ce12a80dc7 100644
--- a/src/init.h
+++ b/src/init.h
@@ -8,9 +8,12 @@
#include <memory>
#include <string>
-#include <util/system.h>
+class ArgsManager;
struct NodeContext;
+namespace interfaces {
+struct BlockAndHeaderTipInfo;
+}
namespace boost {
class thread_group;
} // namespace boost
@@ -22,21 +25,21 @@ class Ref;
void Interrupt(NodeContext& node);
void Shutdown(NodeContext& node);
//!Initialize the logging infrastructure
-void InitLogging();
+void InitLogging(const ArgsManager& args);
//!Parameter interaction: change current parameters depending on various rules
-void InitParameterInteraction();
+void InitParameterInteraction(ArgsManager& args);
/** Initialize bitcoin core: Basic context setup.
* @note This can be done before daemonization. Do not call Shutdown() if this function fails.
* @pre Parameters should be parsed and config file should be read.
*/
-bool AppInitBasicSetup();
+bool AppInitBasicSetup(ArgsManager& args);
/**
* Initialization: parameter interaction.
* @note This can be done before daemonization. Do not call Shutdown() if this function fails.
* @pre Parameters should be parsed and config file should be read, AppInitBasicSetup should have been called.
*/
-bool AppInitParameterInteraction();
+bool AppInitParameterInteraction(const ArgsManager& args);
/**
* Initialization sanity checks: ecc init, sanity checks, dir lock.
* @note This can be done before daemonization. Do not call Shutdown() if this function fails.
@@ -54,7 +57,7 @@ bool AppInitLockDataDirectory();
* @note This should only be done after daemonization. Call Shutdown() if this function fails.
* @pre Parameters should be parsed and config file should be read, AppInitLockDataDirectory should have been called.
*/
-bool AppInitMain(const util::Ref& context, NodeContext& node);
+bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr);
/**
* Register all arguments with the ArgsManager
diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp
index d49e4454af..313c1265de 100644
--- a/src/interfaces/chain.cpp
+++ b/src/interfaces/chain.cpp
@@ -372,6 +372,27 @@ public:
RPCRunLater(name, std::move(fn), seconds);
}
int rpcSerializationFlags() override { return RPCSerializationFlags(); }
+ util::SettingsValue getRwSetting(const std::string& name) override
+ {
+ util::SettingsValue result;
+ gArgs.LockSettings([&](const util::Settings& settings) {
+ if (const util::SettingsValue* value = util::FindKey(settings.rw_settings, name)) {
+ result = *value;
+ }
+ });
+ return result;
+ }
+ bool updateRwSetting(const std::string& name, const util::SettingsValue& value) override
+ {
+ gArgs.LockSettings([&](util::Settings& settings) {
+ if (value.isNull()) {
+ settings.rw_settings.erase(name);
+ } else {
+ settings.rw_settings[name] = value;
+ }
+ });
+ return gArgs.WriteSettingsFile();
+ }
void requestMempoolTransactions(Notifications& notifications) override
{
LOCK2(::cs_main, ::mempool.cs);
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index bbeb0fa801..053d40335f 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -7,6 +7,7 @@
#include <optional.h> // For Optional and nullopt
#include <primitives/transaction.h> // For CTransactionRef
+#include <util/settings.h> // For util::SettingsValue
#include <functional>
#include <memory>
@@ -269,6 +270,12 @@ public:
//! Current RPC serialization flags.
virtual int rpcSerializationFlags() = 0;
+ //! Return <datadir>/settings.json setting value.
+ virtual util::SettingsValue getRwSetting(const std::string& name) = 0;
+
+ //! Write a setting to <datadir>/settings.json.
+ virtual bool updateRwSetting(const std::string& name, const util::SettingsValue& value) = 0;
+
//! Synchronously send transactionAddedToMempool notifications about all
//! current mempool transactions to the specified handler and return after
//! the last one is sent. These notifications aren't coordinated with async
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index 33f0dac263..206262eb03 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -56,38 +56,25 @@ namespace {
class NodeImpl : public Node
{
public:
- void initError(const bilingual_str& message) override { InitError(message); }
- bool parseParameters(int argc, const char* const argv[], std::string& error) override
- {
- return gArgs.ParseParameters(argc, argv, error);
- }
- bool readConfigFiles(std::string& error) override { return gArgs.ReadConfigFiles(error, true); }
- void forceSetArg(const std::string& arg, const std::string& value) override { gArgs.ForceSetArg(arg, value); }
- bool softSetArg(const std::string& arg, const std::string& value) override { return gArgs.SoftSetArg(arg, value); }
- bool softSetBoolArg(const std::string& arg, bool value) override { return gArgs.SoftSetBoolArg(arg, value); }
- void selectParams(const std::string& network) override { SelectParams(network); }
- bool initSettings(std::string& error) override { return gArgs.InitSettings(error); }
- uint64_t getAssumedBlockchainSize() override { return Params().AssumedBlockchainSize(); }
- uint64_t getAssumedChainStateSize() override { return Params().AssumedChainStateSize(); }
- std::string getNetwork() override { return Params().NetworkIDString(); }
- void initLogging() override { InitLogging(); }
- void initParameterInteraction() override { InitParameterInteraction(); }
+ NodeImpl(NodeContext* context) { setContext(context); }
+ void initLogging() override { InitLogging(*Assert(m_context->args)); }
+ void initParameterInteraction() override { InitParameterInteraction(*Assert(m_context->args)); }
bilingual_str getWarnings() override { return GetWarnings(true); }
uint32_t getLogCategories() override { return LogInstance().GetCategoryMask(); }
bool baseInitialize() override
{
- return AppInitBasicSetup() && AppInitParameterInteraction() && AppInitSanityChecks() &&
+ return AppInitBasicSetup(gArgs) && AppInitParameterInteraction(gArgs) && AppInitSanityChecks() &&
AppInitLockDataDirectory();
}
- bool appInitMain() override
+ bool appInitMain(interfaces::BlockAndHeaderTipInfo* tip_info) override
{
- m_context.chain = MakeChain(m_context);
- return AppInitMain(m_context_ref, m_context);
+ m_context->chain = MakeChain(*m_context);
+ return AppInitMain(m_context_ref, *m_context, tip_info);
}
void appShutdown() override
{
- Interrupt(m_context);
- Shutdown(m_context);
+ Interrupt(*m_context);
+ Shutdown(*m_context);
}
void startShutdown() override
{
@@ -108,19 +95,18 @@ public:
StopMapPort();
}
}
- void setupServerArgs() override { return SetupServerArgs(m_context); }
bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); }
size_t getNodeCount(CConnman::NumConnections flags) override
{
- return m_context.connman ? m_context.connman->GetNodeCount(flags) : 0;
+ return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0;
}
bool getNodesStats(NodesStats& stats) override
{
stats.clear();
- if (m_context.connman) {
+ if (m_context->connman) {
std::vector<CNodeStats> stats_temp;
- m_context.connman->GetNodeStats(stats_temp);
+ m_context->connman->GetNodeStats(stats_temp);
stats.reserve(stats_temp.size());
for (auto& node_stats_temp : stats_temp) {
@@ -141,46 +127,46 @@ public:
}
bool getBanned(banmap_t& banmap) override
{
- if (m_context.banman) {
- m_context.banman->GetBanned(banmap);
+ if (m_context->banman) {
+ m_context->banman->GetBanned(banmap);
return true;
}
return false;
}
bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) override
{
- if (m_context.banman) {
- m_context.banman->Ban(net_addr, ban_time_offset);
+ if (m_context->banman) {
+ m_context->banman->Ban(net_addr, ban_time_offset);
return true;
}
return false;
}
bool unban(const CSubNet& ip) override
{
- if (m_context.banman) {
- m_context.banman->Unban(ip);
+ if (m_context->banman) {
+ m_context->banman->Unban(ip);
return true;
}
return false;
}
bool disconnectByAddress(const CNetAddr& net_addr) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(net_addr);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(net_addr);
}
return false;
}
bool disconnectById(NodeId id) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(id);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(id);
}
return false;
}
- int64_t getTotalBytesRecv() override { return m_context.connman ? m_context.connman->GetTotalBytesRecv() : 0; }
- int64_t getTotalBytesSent() override { return m_context.connman ? m_context.connman->GetTotalBytesSent() : 0; }
- size_t getMempoolSize() override { return m_context.mempool ? m_context.mempool->size() : 0; }
- size_t getMempoolDynamicUsage() override { return m_context.mempool ? m_context.mempool->DynamicMemoryUsage() : 0; }
+ int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; }
+ int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; }
+ size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; }
+ size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; }
bool getHeaderTip(int& height, int64_t& block_time) override
{
LOCK(::cs_main);
@@ -223,11 +209,11 @@ public:
bool getImporting() override { return ::fImporting; }
void setNetworkActive(bool active) override
{
- if (m_context.connman) {
- m_context.connman->SetNetworkActive(active);
+ if (m_context->connman) {
+ m_context->connman->SetNetworkActive(active);
}
}
- bool getNetworkActive() override { return m_context.connman && m_context.connman->GetNetworkActive(); }
+ bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); }
CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override
{
FeeCalculation fee_calc;
@@ -269,7 +255,7 @@ public:
std::vector<std::unique_ptr<Wallet>> getWallets() override
{
std::vector<std::unique_ptr<Wallet>> wallets;
- for (auto& client : m_context.chain_clients) {
+ for (auto& client : m_context->chain_clients) {
auto client_wallets = client->getWallets();
std::move(client_wallets.begin(), client_wallets.end(), std::back_inserter(wallets));
}
@@ -277,12 +263,12 @@ public:
}
std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override
{
- return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings));
+ return MakeWallet(LoadWallet(*m_context->chain, name, error, warnings));
}
std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings, WalletCreationStatus& status) override
{
std::shared_ptr<CWallet> wallet;
- status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
+ status = CreateWallet(*m_context->chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
return MakeWallet(wallet);
}
std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override
@@ -336,13 +322,22 @@ public:
/* verification progress is unused when a header was received */ 0);
}));
}
- NodeContext* context() override { return &m_context; }
- NodeContext m_context;
- util::Ref m_context_ref{m_context};
+ NodeContext* context() override { return m_context; }
+ void setContext(NodeContext* context) override
+ {
+ m_context = context;
+ if (context) {
+ m_context_ref.Set(*context);
+ } else {
+ m_context_ref.Clear();
+ }
+ }
+ NodeContext* m_context{nullptr};
+ util::Ref m_context_ref;
};
} // namespace
-std::unique_ptr<Node> MakeNode() { return MakeUnique<NodeImpl>(); }
+std::unique_ptr<Node> MakeNode(NodeContext* context) { return MakeUnique<NodeImpl>(context); }
} // namespace interfaces
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index a9680c42b5..0cff7ae3a1 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -39,47 +39,22 @@ class Handler;
class Wallet;
struct BlockTip;
+//! Block and header tip information
+struct BlockAndHeaderTipInfo
+{
+ int block_height;
+ int64_t block_time;
+ int header_height;
+ int64_t header_time;
+ double verification_progress;
+};
+
//! Top-level interface for a bitcoin node (bitcoind process).
class Node
{
public:
virtual ~Node() {}
- //! Send init error.
- virtual void initError(const bilingual_str& message) = 0;
-
- //! Set command line arguments.
- virtual bool parseParameters(int argc, const char* const argv[], std::string& error) = 0;
-
- //! Set a command line argument
- virtual void forceSetArg(const std::string& arg, const std::string& value) = 0;
-
- //! Set a command line argument if it doesn't already have a value
- virtual bool softSetArg(const std::string& arg, const std::string& value) = 0;
-
- //! Set a command line boolean argument if it doesn't already have a value
- virtual bool softSetBoolArg(const std::string& arg, bool value) = 0;
-
- //! Load settings from configuration file.
- virtual bool readConfigFiles(std::string& error) = 0;
-
- //! Choose network parameters.
- virtual void selectParams(const std::string& network) = 0;
-
- //! Read and update <datadir>/settings.json file with saved settings. This
- //! needs to be called after selectParams() because the settings file
- //! location is network-specific.
- virtual bool initSettings(std::string& error) = 0;
-
- //! Get the (assumed) blockchain size.
- virtual uint64_t getAssumedBlockchainSize() = 0;
-
- //! Get the (assumed) chain state size.
- virtual uint64_t getAssumedChainStateSize() = 0;
-
- //! Get network name.
- virtual std::string getNetwork() = 0;
-
//! Init logging.
virtual void initLogging() = 0;
@@ -96,7 +71,7 @@ public:
virtual bool baseInitialize() = 0;
//! Start node.
- virtual bool appInitMain() = 0;
+ virtual bool appInitMain(interfaces::BlockAndHeaderTipInfo* tip_info = nullptr) = 0;
//! Stop node.
virtual void appShutdown() = 0;
@@ -107,9 +82,6 @@ public:
//! Return whether shutdown was requested.
virtual bool shutdownRequested() = 0;
- //! Setup arguments
- virtual void setupServerArgs() = 0;
-
//! Map port.
virtual void mapPort(bool use_upnp) = 0;
@@ -268,12 +240,14 @@ public:
std::function<void(SynchronizationState, interfaces::BlockTip tip, double verification_progress)>;
virtual std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) = 0;
- //! Return pointer to internal chain interface, useful for testing.
+ //! Get and set internal node context. Useful for testing, but not
+ //! accessible across processes.
virtual NodeContext* context() { return nullptr; }
+ virtual void setContext(NodeContext* context) { }
};
//! Return implementation of Node interface.
-std::unique_ptr<Node> MakeNode();
+std::unique_ptr<Node> MakeNode(NodeContext* context = nullptr);
//! Block tip (could be a header or not, depends on the subscribed signal).
struct BlockTip {
diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp
index 7fd24425cf..937e602fb0 100644
--- a/src/interfaces/wallet.cpp
+++ b/src/interfaces/wallet.cpp
@@ -37,6 +37,7 @@ namespace {
//! Construct wallet tx struct.
WalletTx MakeWalletTx(CWallet& wallet, const CWalletTx& wtx)
{
+ LOCK(wallet.cs_wallet);
WalletTx result;
result.tx = wtx.tx;
result.txin_is_mine.reserve(wtx.tx->vin.size());
@@ -132,7 +133,11 @@ public:
{
return m_wallet->SignMessage(message, pkhash, str_sig);
}
- bool isSpendable(const CTxDestination& dest) override { return m_wallet->IsMine(dest) & ISMINE_SPENDABLE; }
+ bool isSpendable(const CTxDestination& dest) override
+ {
+ LOCK(m_wallet->cs_wallet);
+ return m_wallet->IsMine(dest) & ISMINE_SPENDABLE;
+ }
bool haveWatchOnly() override
{
auto spk_man = m_wallet->GetLegacyScriptPubKeyMan();
diff --git a/src/key.cpp b/src/key.cpp
index 7eecc6e083..4ed74a39b1 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -237,7 +237,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const {
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd, sizeof(rnd));
uint256 hash;
- CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin());
+ CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
std::vector<unsigned char> vchSig;
Sign(hash, vchSig);
return pubkey.Verify(hash, vchSig);
diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp
index 8072b12119..b571d463c9 100644
--- a/src/merkleblock.cpp
+++ b/src/merkleblock.cpp
@@ -70,7 +70,7 @@ uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::ve
else
right = left;
// combine subhashes
- return Hash(left.begin(), left.end(), right.begin(), right.end());
+ return Hash(left, right);
}
}
@@ -126,7 +126,7 @@ uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, uns
right = left;
}
// and combine them before returning
- return Hash(left.begin(), left.end(), right.begin(), right.end());
+ return Hash(left, right);
}
}
diff --git a/src/net.cpp b/src/net.cpp
index 0c56cddbdc..883e57bdf0 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -105,10 +105,10 @@ std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
std::string strSubVersion;
-void CConnman::AddOneShot(const std::string& strDest)
+void CConnman::AddAddrFetch(const std::string& strDest)
{
- LOCK(cs_vOneShots);
- vOneShots.push_back(strDest);
+ LOCK(m_addr_fetches_mutex);
+ m_addr_fetches.push_back(strDest);
}
uint16_t GetListenPort()
@@ -346,7 +346,7 @@ bool CConnman::CheckIncomingNonce(uint64_t nonce)
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce)
+ if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce)
return false;
}
return true;
@@ -368,8 +368,10 @@ static CAddress GetBindAddress(SOCKET sock)
return addr_bind;
}
-CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only)
+CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type)
{
+ assert(conn_type != ConnectionType::INBOUND);
+
if (pszDest == nullptr) {
if (IsLocal(addrConnect))
return nullptr;
@@ -432,7 +434,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (hSocket == INVALID_SOCKET) {
return nullptr;
}
- connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, manual_connection);
+ connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL);
}
if (!proxyConnectionFailed) {
// If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to
@@ -459,7 +461,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
CAddress addr_bind = GetBindAddress(hSocket);
- CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false, block_relay_only);
+ CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type);
pnode->AddRef();
// We're making a new connection, harvest entropy from the time (and our peer count)
@@ -528,6 +530,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
}
X(nLastSend);
X(nLastRecv);
+ X(nLastTXTime);
+ X(nLastBlockTime);
X(nTimeConnected);
X(nTimeOffset);
stats.addrName = GetAddrName();
@@ -536,8 +540,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
LOCK(cs_SubVer);
X(cleanSubVer);
}
- X(fInbound);
- X(m_manual_connection);
+ stats.fInbound = IsInboundConn();
+ stats.m_manual_connection = IsManualConn();
X(nStartingHeight);
{
LOCK(cs_vSend);
@@ -685,7 +689,7 @@ int V1TransportDeserializer::readData(const char *pch, unsigned int nBytes)
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
- hasher.Write((const unsigned char*)pch, nCopy);
+ hasher.Write({(const unsigned char*)pch, nCopy});
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
@@ -696,7 +700,7 @@ const uint256& V1TransportDeserializer::GetMessageHash() const
{
assert(Complete());
if (data_hash.IsNull())
- hasher.Finalize(data_hash.begin());
+ hasher.Finalize(data_hash);
return data_hash;
}
@@ -722,8 +726,8 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
if (!msg.m_valid_checksum) {
LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n",
SanitizeString(msg.m_command), msg.m_message_size,
- HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
- HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
+ HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)),
+ HexStr(hdr.pchChecksum));
}
// store receive time
@@ -736,7 +740,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) {
// create dbl-sha256 checksum
- uint256 hash = Hash(msg.data.begin(), msg.data.end());
+ uint256 hash = Hash(msg.data);
// create header
CMessageHeader hdr(Params().MessageStart(), msg.m_type.c_str(), msg.data.size());
@@ -872,7 +876,7 @@ bool CConnman::AttemptToEvictConnection()
for (const CNode* node : vNodes) {
if (node->HasPermission(PF_NOBAN))
continue;
- if (!node->fInbound)
+ if (!node->IsInboundConn())
continue;
if (node->fDisconnect)
continue;
@@ -983,7 +987,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (pnode->fInbound) nInbound++;
+ if (pnode->IsInboundConn()) nInbound++;
}
}
@@ -1048,7 +1052,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) {
nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM);
}
- CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true);
+ CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND);
pnode->AddRef();
pnode->m_permissionFlags = permissionFlags;
// If this flag is present, the user probably expect that RPC and QT report it as whitelisted (backward compatibility)
@@ -1646,7 +1650,7 @@ void CConnman::ThreadDNSAddressSeed()
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- nRelevant += pnode->fSuccessfullyConnected && !pnode->fFeeler && !pnode->fOneShot && !pnode->m_manual_connection && !pnode->fInbound;
+ if (pnode->fSuccessfullyConnected && pnode->IsOutboundOrBlockRelayConn()) ++nRelevant;
}
}
if (nRelevant >= 2) {
@@ -1674,7 +1678,7 @@ void CConnman::ThreadDNSAddressSeed()
LogPrintf("Loading addresses from DNS seed %s\n", seed);
if (HaveNameProxy()) {
- AddOneShot(seed);
+ AddAddrFetch(seed);
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
@@ -1696,8 +1700,8 @@ void CConnman::ThreadDNSAddressSeed()
addrman.Add(vAdd, resolveSource);
} else {
// We now avoid directly using results from DNS Seeds which do not support service bit filtering,
- // instead using them as a oneshot to get nodes with our desired service bits.
- AddOneShot(seed);
+ // instead using them as a addrfetch to get nodes with our desired service bits.
+ AddAddrFetch(seed);
}
}
--seeds_right_now;
@@ -1705,17 +1709,6 @@ void CConnman::ThreadDNSAddressSeed()
LogPrintf("%d addresses found from DNS seeds\n", found);
}
-
-
-
-
-
-
-
-
-
-
-
void CConnman::DumpAddresses()
{
int64_t nStart = GetTimeMillis();
@@ -1727,20 +1720,20 @@ void CConnman::DumpAddresses()
addrman.size(), GetTimeMillis() - nStart);
}
-void CConnman::ProcessOneShot()
+void CConnman::ProcessAddrFetch()
{
std::string strDest;
{
- LOCK(cs_vOneShots);
- if (vOneShots.empty())
+ LOCK(m_addr_fetches_mutex);
+ if (m_addr_fetches.empty())
return;
- strDest = vOneShots.front();
- vOneShots.pop_front();
+ strDest = m_addr_fetches.front();
+ m_addr_fetches.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
- OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true);
+ OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH);
}
}
@@ -1767,7 +1760,7 @@ int CConnman::GetExtraOutboundCount()
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) {
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) {
++nOutbound;
}
}
@@ -1782,11 +1775,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
{
for (int64_t nLoop = 0;; nLoop++)
{
- ProcessOneShot();
+ ProcessAddrFetch();
for (const std::string& strAddr : connect)
{
CAddress addr(CService(), NODE_NONE);
- OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true);
+ OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL);
for (int i = 0; i < 10 && i < nLoop; i++)
{
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
@@ -1805,7 +1798,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL);
while (!interruptNet)
{
- ProcessOneShot();
+ ProcessAddrFetch();
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
return;
@@ -1838,21 +1831,27 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int nOutboundFullRelay = 0;
int nOutboundBlockRelay = 0;
std::set<std::vector<unsigned char> > setConnected;
+
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fInbound && !pnode->m_manual_connection) {
- // Netgroups for inbound and addnode peers are not excluded because our goal here
- // is to not use multiple of our limited outbound slots on a single netgroup
- // but inbound and addnode peers do not use our outbound slots. Inbound peers
- // also have the added issue that they're attacker controlled and could be used
- // to prevent us from connecting to particular hosts if we used them here.
- setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
- if (pnode->m_tx_relay == nullptr) {
- nOutboundBlockRelay++;
- } else if (!pnode->fFeeler) {
- nOutboundFullRelay++;
- }
+ if (pnode->IsFullOutboundConn()) nOutboundFullRelay++;
+ if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++;
+
+ // Netgroups for inbound and manual peers are not excluded because our goal here
+ // is to not use multiple of our limited outbound slots on a single netgroup
+ // but inbound and manual peers do not use our outbound slots. Inbound peers
+ // also have the added issue that they could be attacker controlled and used
+ // to prevent us from connecting to particular hosts if we used them here.
+ switch(pnode->m_conn_type){
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ break;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ case ConnectionType::FEELER:
+ setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
}
}
}
@@ -1945,14 +1944,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString());
}
- // Open this connection as block-relay-only if we're already at our
- // full-relay capacity, but not yet at our block-relay peer limit.
- // (It should not be possible for fFeeler to be set if we're not
- // also at our block-relay peer limit, but check against that as
- // well for sanity.)
- bool block_relay_only = nOutboundBlockRelay < m_max_outbound_block_relay && !fFeeler && nOutboundFullRelay >= m_max_outbound_full_relay;
+ ConnectionType conn_type;
+ // Determine what type of connection to open. If fFeeler is not
+ // set, open OUTBOUND connections until we meet our full-relay
+ // capacity. Then open BLOCK_RELAY connections until we hit our
+ // block-relay peer limit. Otherwise, default to opening an
+ // OUTBOUND connection.
+ if (fFeeler) {
+ conn_type = ConnectionType::FEELER;
+ } else if (nOutboundFullRelay < m_max_outbound_full_relay) {
+ conn_type = ConnectionType::OUTBOUND;
+ } else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
+ conn_type = ConnectionType::BLOCK_RELAY;
+ } else {
+ // GetTryNewOutboundPeer() is true
+ conn_type = ConnectionType::OUTBOUND;
+ }
- OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler, false, block_relay_only);
+ OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type);
}
}
}
@@ -1976,11 +1985,11 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
if (pnode->addr.IsValid()) {
- mapConnected[pnode->addr] = pnode->fInbound;
+ mapConnected[pnode->addr] = pnode->IsInboundConn();
}
std::string addrName = pnode->GetAddrName();
if (!addrName.empty()) {
- mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr));
+ mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr));
}
}
}
@@ -2027,7 +2036,7 @@ void CConnman::ThreadOpenAddedConnections()
}
tried = true;
CAddress addr(CService(), NODE_NONE);
- OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), false, false, true);
+ OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL);
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
return;
}
@@ -2039,8 +2048,10 @@ void CConnman::ThreadOpenAddedConnections()
}
// if successful, this moves the passed grant to the constructed node
-void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection, bool block_relay_only)
+void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type)
{
+ assert(conn_type != ConnectionType::INBOUND);
+
//
// Initiate outbound network connection
//
@@ -2058,18 +2069,12 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai
} else if (FindNode(std::string(pszDest)))
return;
- CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, manual_connection, block_relay_only);
+ CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type);
if (!pnode)
return;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
- if (fOneShot)
- pnode->fOneShot = true;
- if (fFeeler)
- pnode->fFeeler = true;
- if (manual_connection)
- pnode->m_manual_connection = true;
m_msgproc->InitializeNode(pnode);
{
@@ -2127,11 +2132,6 @@ void CConnman::ThreadMessageHandler()
}
}
-
-
-
-
-
bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions)
{
int nOne = 1;
@@ -2337,7 +2337,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
}
for (const auto& strDest : connOptions.vSeedNodes) {
- AddOneShot(strDest);
+ AddAddrFetch(strDest);
}
if (clientInterface) {
@@ -2390,7 +2390,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
else
threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this)));
- // Initiate outbound connections from -addnode
+ // Initiate manual connections
threadOpenAddedConnections = std::thread(&TraceThread<std::function<void()> >, "addcon", std::function<void()>(std::bind(&CConnman::ThreadOpenAddedConnections, this)));
if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) {
@@ -2523,14 +2523,31 @@ void CConnman::MarkAddressGood(const CAddress& addr)
addrman.Good(addr);
}
-void CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty)
+bool CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty)
{
- addrman.Add(vAddr, addrFrom, nTimePenalty);
+ return addrman.Add(vAddr, addrFrom, nTimePenalty);
}
-std::vector<CAddress> CConnman::GetAddresses()
+std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pct)
{
- return addrman.GetAddr();
+ std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct);
+ if (m_banman) {
+ addresses.erase(std::remove_if(addresses.begin(), addresses.end(),
+ [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}),
+ addresses.end());
+ }
+ return addresses;
+}
+
+std::vector<CAddress> CConnman::GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct)
+{
+ const auto current_time = GetTime<std::chrono::microseconds>();
+ if (m_addr_response_caches.find(requestor_network) == m_addr_response_caches.end() ||
+ m_addr_response_caches[requestor_network].m_update_addr_response < current_time) {
+ m_addr_response_caches[requestor_network].m_addrs_response_cache = GetAddresses(max_addresses, max_pct);
+ m_addr_response_caches[requestor_network].m_update_addr_response = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6));
+ }
+ return m_addr_response_caches[requestor_network].m_addrs_response_cache;
}
bool CConnman::AddNode(const std::string& strNode)
@@ -2564,7 +2581,7 @@ size_t CConnman::GetNodeCount(NumConnections flags)
int nNum = 0;
for (const auto& pnode : vNodes) {
- if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
+ if (flags & (pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
nNum++;
}
}
@@ -2748,26 +2765,26 @@ int CConnman::GetBestHeight() const
unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; }
-CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, bool fInboundIn, bool block_relay_only)
+CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in)
: nTimeConnected(GetSystemTimeInSeconds()),
addr(addrIn),
addrBind(addrBindIn),
- fInbound(fInboundIn),
nKeyedNetGroup(nKeyedNetGroupIn),
// Don't relay addr messages to peers that we connect to as block-relay-only
// peers (to prevent adversaries from inferring these links from addr
// traffic).
- m_addr_known{block_relay_only ? nullptr : MakeUnique<CRollingBloomFilter>(5000, 0.001)},
id(idIn),
nLocalHostNonce(nLocalHostNonceIn),
+ m_conn_type(conn_type_in),
nLocalServices(nLocalServicesIn),
nMyStartingHeight(nMyStartingHeightIn)
{
hSocket = hSocketIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
hashContinue = uint256();
- if (!block_relay_only) {
+ if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_tx_relay = MakeUnique<TxRelay>();
+ m_addr_known = MakeUnique<CRollingBloomFilter>(5000, 0.001);
}
for (const std::string &msg : getAllNetMessageTypes())
diff --git a/src/net.h b/src/net.h
index 17d8fda372..c72eada3ff 100644
--- a/src/net.h
+++ b/src/net.h
@@ -27,6 +27,7 @@
#include <atomic>
#include <cstdint>
#include <deque>
+#include <map>
#include <thread>
#include <memory>
#include <condition_variable>
@@ -50,8 +51,8 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false;
static const int TIMEOUT_INTERVAL = 20 * 60;
/** Run the feeler connection loop once every 2 minutes or 120 seconds. **/
static const int FEELER_INTERVAL = 120;
-/** The maximum number of new addresses to accumulate before announcing. */
-static const unsigned int MAX_ADDR_TO_SEND = 1000;
+/** The maximum number of addresses from our addrman to return in response to a getaddr message. */
+static constexpr size_t MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000;
/** Maximum length of the user agent string in `version` message */
@@ -113,6 +114,17 @@ struct CSerializedNetMsg
std::string m_type;
};
+/** Different types of connections to a peer. This enum encapsulates the
+ * information we have available at the time of opening or accepting the
+ * connection. Aside from INBOUND, all types are initiated by us. */
+enum class ConnectionType {
+ INBOUND, /**< peer initiated connections */
+ OUTBOUND, /**< full relay connections (blocks, addrs, txns) made automatically. Addresses selected from AddrMan. */
+ MANUAL, /**< connections to addresses added via addnode or the connect command line argument */
+ FEELER, /**< short lived connections used to test address validity */
+ BLOCK_RELAY, /**< only relay blocks to these automatic outbound connections. Addresses selected from AddrMan. */
+ ADDR_FETCH, /**< short lived connections used to solicit addrs when starting the node without a populated AddrMan */
+};
class NetEventsInterface;
class CConnman
@@ -197,7 +209,7 @@ public:
bool GetNetworkActive() const { return fNetworkActive; };
bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; };
void SetNetworkActive(bool active);
- void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false);
+ void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND);
bool CheckIncomingNonce(uint64_t nonce);
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
@@ -249,8 +261,15 @@ public:
// Addrman functions
void SetServices(const CService &addr, ServiceFlags nServices);
void MarkAddressGood(const CAddress& addr);
- void AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0);
- std::vector<CAddress> GetAddresses();
+ bool AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0);
+ std::vector<CAddress> GetAddresses(size_t max_addresses, size_t max_pct);
+ /**
+ * Cache is used to minimize topology leaks, so it should
+ * be used for all non-trusted calls, for example, p2p.
+ * A non-malicious call (from RPC or a peer with addr permission) should
+ * call the function without a parameter to avoid using the cache.
+ */
+ std::vector<CAddress> GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct);
// This allows temporarily exceeding m_max_outbound_full_relay, with the goal of finding
// a peer that is better than all our current peers.
@@ -340,8 +359,8 @@ private:
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
bool InitBinds(const std::vector<CService>& binds, const std::vector<NetWhitebindPermissions>& whiteBinds);
void ThreadOpenAddedConnections();
- void AddOneShot(const std::string& strDest);
- void ProcessOneShot();
+ void AddAddrFetch(const std::string& strDest);
+ void ProcessAddrFetch();
void ThreadOpenConnections(std::vector<std::string> connect);
void ThreadMessageHandler();
void AcceptConnection(const ListenSocket& hListenSocket);
@@ -362,7 +381,7 @@ private:
CNode* FindNode(const CService& addr);
bool AttemptToEvictConnection();
- CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only);
+ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type);
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const;
void DeleteNode(CNode* pnode);
@@ -405,8 +424,8 @@ private:
std::atomic<bool> fNetworkActive{true};
bool fAddressesInitialized{false};
CAddrMan addrman;
- std::deque<std::string> vOneShots GUARDED_BY(cs_vOneShots);
- RecursiveMutex cs_vOneShots;
+ std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex);
+ RecursiveMutex m_addr_fetches_mutex;
std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
RecursiveMutex cs_vAddedNodes;
std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes);
@@ -416,6 +435,29 @@ private:
unsigned int nPrevNodeCount{0};
/**
+ * Cache responses to addr requests to minimize privacy leak.
+ * Attack example: scraping addrs in real-time may allow an attacker
+ * to infer new connections of the victim by detecting new records
+ * with fresh timestamps (per self-announcement).
+ */
+ struct CachedAddrResponse {
+ std::vector<CAddress> m_addrs_response_cache;
+ std::chrono::microseconds m_update_addr_response{0};
+ };
+
+ /**
+ * Addr responses stored in different caches
+ * per network prevent cross-network node identification.
+ * If a node for example is multi-homed under Tor and IPv6,
+ * a single cache (or no cache at all) would let an attacker
+ * to easily detect that it is the same node by comparing responses.
+ * The used memory equals to 1000 CAddress records (or around 32 bytes) per
+ * distinct Network (up to 5) we have/had an inbound peer from,
+ * resulting in at most ~160 KB.
+ */
+ std::map<Network, CachedAddrResponse> m_addr_response_caches;
+
+ /**
* Services this instance offers.
*
* This data is replicated in each CNode instance we create during peer
@@ -577,6 +619,8 @@ public:
bool fRelayTxes;
int64_t nLastSend;
int64_t nLastRecv;
+ int64_t nLastTXTime;
+ int64_t nLastBlockTime;
int64_t nTimeConnected;
int64_t nTimeOffset;
std::string addrName;
@@ -764,12 +808,8 @@ public:
}
// This boolean is unusued in actual processing, only present for backward compatibility at RPC/QT level
bool m_legacyWhitelisted{false};
- bool fFeeler{false}; // If true this node is being used as a short lived feeler.
- bool fOneShot{false};
- bool m_manual_connection{false};
bool fClient{false}; // set by version message
bool m_limited_node{false}; //after BIP159, set by version message
- const bool fInbound;
std::atomic_bool fSuccessfullyConnected{false};
// Setting fDisconnect to true will cause the node to be disconnected the
// next time DisconnectNodes() runs
@@ -782,6 +822,60 @@ public:
std::atomic_bool fPauseRecv{false};
std::atomic_bool fPauseSend{false};
+ bool IsOutboundOrBlockRelayConn() const {
+ switch(m_conn_type) {
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ return true;
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::ADDR_FETCH:
+ case ConnectionType::FEELER:
+ return false;
+ }
+
+ assert(false);
+ }
+
+ bool IsFullOutboundConn() const {
+ return m_conn_type == ConnectionType::OUTBOUND;
+ }
+
+ bool IsManualConn() const {
+ return m_conn_type == ConnectionType::MANUAL;
+ }
+
+ bool IsBlockOnlyConn() const {
+ return m_conn_type == ConnectionType::BLOCK_RELAY;
+ }
+
+ bool IsFeelerConn() const {
+ return m_conn_type == ConnectionType::FEELER;
+ }
+
+ bool IsAddrFetchConn() const {
+ return m_conn_type == ConnectionType::ADDR_FETCH;
+ }
+
+ bool IsInboundConn() const {
+ return m_conn_type == ConnectionType::INBOUND;
+ }
+
+ bool ExpectServicesFromConn() const {
+ switch(m_conn_type) {
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::FEELER:
+ return false;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ return true;
+ }
+
+ assert(false);
+ }
+
protected:
mapMsgCmdSize mapSendBytesPerMsgCmd;
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);
@@ -792,7 +886,7 @@ public:
// flood relay
std::vector<CAddress> vAddrToSend;
- const std::unique_ptr<CRollingBloomFilter> m_addr_known;
+ std::unique_ptr<CRollingBloomFilter> m_addr_known = nullptr;
bool fGetAddr{false};
std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0};
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0};
@@ -856,7 +950,7 @@ public:
std::set<uint256> orphan_work_set;
- CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false);
+ CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in);
~CNode();
CNode(const CNode&) = delete;
CNode& operator=(const CNode&) = delete;
@@ -864,6 +958,7 @@ public:
private:
const NodeId id;
const uint64_t nLocalHostNonce;
+ const ConnectionType m_conn_type;
//! Services offered to this peer.
//!
diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp
index a75838307c..53648deb40 100644
--- a/src/net_permissions.cpp
+++ b/src/net_permissions.cpp
@@ -15,6 +15,7 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{
"relay (relay even in -blocksonly mode)",
"mempool (allow requesting BIP35 mempool contents)",
"download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)",
+ "addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)"
};
namespace {
@@ -50,6 +51,7 @@ bool TryParsePermissionFlags(const std::string str, NetPermissionFlags& output,
else if (permission == "download") NetPermissions::AddFlag(flags, PF_DOWNLOAD);
else if (permission == "all") NetPermissions::AddFlag(flags, PF_ALL);
else if (permission == "relay") NetPermissions::AddFlag(flags, PF_RELAY);
+ else if (permission == "addr") NetPermissions::AddFlag(flags, PF_ADDR);
else if (permission.length() == 0); // Allow empty entries
else {
error = strprintf(_("Invalid P2P permission: '%s'"), permission);
@@ -75,6 +77,7 @@ std::vector<std::string> NetPermissions::ToStrings(NetPermissionFlags flags)
if (NetPermissions::HasFlag(flags, PF_RELAY)) strings.push_back("relay");
if (NetPermissions::HasFlag(flags, PF_MEMPOOL)) strings.push_back("mempool");
if (NetPermissions::HasFlag(flags, PF_DOWNLOAD)) strings.push_back("download");
+ if (NetPermissions::HasFlag(flags, PF_ADDR)) strings.push_back("addr");
return strings;
}
diff --git a/src/net_permissions.h b/src/net_permissions.h
index a9633ee2ae..5b68f635a7 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -29,10 +29,12 @@ enum NetPermissionFlags {
PF_NOBAN = (1U << 4) | PF_DOWNLOAD,
// Can query the mempool
PF_MEMPOOL = (1U << 5),
+ // Can request addrs without hitting a privacy-preserving cache
+ PF_ADDR = (1U << 7),
// True if the user did not specifically set fine grained permissions
PF_ISIMPLICIT = (1U << 31),
- PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD,
+ PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD | PF_ADDR,
};
class NetPermissions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 5f1e7318f3..60bdfbe9f5 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -143,6 +143,8 @@ static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
+/** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
+static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
@@ -157,9 +159,6 @@ std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUA
void EraseOrphansFor(NodeId peer);
-/** Increase a node's misbehavior score. */
-void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
// Internal stuff
namespace {
/** Number of nodes with fSyncStarted. */
@@ -190,7 +189,7 @@ namespace {
* million to make it highly unlikely for users to have issues with this
* filter.
*
- * We only need to add wtxids to this filter. For non-segwit
+ * We typically only add wtxids to this filter. For non-segwit
* transactions, the txid == wtxid, so this only prevents us from
* re-downloading non-segwit transactions when communicating with
* non-wtxidrelay peers -- which is important for avoiding malleation
@@ -199,6 +198,12 @@ namespace {
* the reject filter store wtxids is exactly what we want to avoid
* redownload of a rejected transaction.
*
+ * In cases where we can tell that a segwit transaction will fail
+ * validation no matter the witness, we may add the txid of such
+ * transaction to the filter as well. This can be helpful when
+ * communicating with txid-relay peers or if we were to otherwise fetch a
+ * transaction via txid (eg in our orphan handling).
+ *
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
@@ -239,7 +244,7 @@ namespace {
/** When our tip was last updated. */
std::atomic<int64_t> g_last_tip_update(0);
- /** Relay map */
+ /** Relay map (txid or wtxid -> CTransactionRef) */
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */
@@ -401,7 +406,7 @@ struct CNodeState {
/* Track when to attempt download of announced transactions (process
* time in micros -> txid)
*/
- std::multimap<std::chrono::microseconds, uint256> m_tx_process_time;
+ std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time;
//! Store all the transactions a peer has recently announced
std::set<uint256> m_tx_announced;
@@ -476,7 +481,7 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
- state->fPreferredDownload = (!node.fInbound || node.HasPermission(PF_NOBAN)) && !node.fOneShot && !node.fClient;
+ state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient;
nPreferredDownload += state->fPreferredDownload;
}
@@ -754,34 +759,34 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
}
}
-void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- g_already_asked_for.erase(txid);
+ g_already_asked_for.erase(gtxid.GetHash());
}
-std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- auto it = g_already_asked_for.find(txid);
+ auto it = g_already_asked_for.find(gtxid.GetHash());
if (it != g_already_asked_for.end()) {
return it->second;
}
return {};
}
-void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- auto it = g_already_asked_for.find(txid);
+ auto it = g_already_asked_for.find(gtxid.GetHash());
if (it == g_already_asked_for.end()) {
- g_already_asked_for.insert(std::make_pair(txid, request_time));
+ g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time));
} else {
g_already_asked_for.update(it, request_time);
}
}
-std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::chrono::microseconds process_time;
- const auto last_request_time = GetTxRequestTime(txid);
+ const auto last_request_time = GetTxRequestTime(gtxid);
// First time requesting this tx
if (last_request_time.count() == 0) {
process_time = current_time;
@@ -800,23 +805,23 @@ std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chron
return process_time;
}
-void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
CNodeState::TxDownloadState& peer_download_state = state->m_tx_download;
if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
- peer_download_state.m_tx_announced.count(txid)) {
+ peer_download_state.m_tx_announced.count(gtxid.GetHash())) {
// Too many queued announcements from this peer, or we already have
// this announcement
return;
}
- peer_download_state.m_tx_announced.insert(txid);
+ peer_download_state.m_tx_announced.insert(gtxid.GetHash());
// Calculate the time to try requesting this transaction. Use
// fPreferredDownload as a proxy for outbound peers.
- const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0);
+ const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0);
- peer_download_state.m_tx_process_time.emplace(process_time, txid);
+ peer_download_state.m_tx_process_time.emplace(process_time, gtxid);
}
} // namespace
@@ -830,23 +835,16 @@ void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
if (state) state->m_last_block_announcement = time_in_seconds;
}
-// Returns true for outbound peers, excluding manual connections, feelers, and
-// one-shots.
-static bool IsOutboundDisconnectionCandidate(const CNode& node)
-{
- return !(node.fInbound || node.m_manual_connection || node.fFeeler || node.fOneShot);
-}
-
void PeerLogicValidation::InitializeNode(CNode *pnode) {
CAddress addr = pnode->addr;
std::string addrName = pnode->GetAddrName();
NodeId nodeid = pnode->GetId();
{
LOCK(cs_main);
- mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection));
+ mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->IsInboundConn(), pnode->IsManualConn()));
}
- if(!pnode->fInbound)
- PushNodeVersion(*pnode, *connman, GetTime());
+ if(!pnode->IsInboundConn())
+ PushNodeVersion(*pnode, m_connman, GetTime());
}
void PeerLogicValidation::ReattemptInitialBroadcast(CScheduler& scheduler) const
@@ -857,7 +855,7 @@ void PeerLogicValidation::ReattemptInitialBroadcast(CScheduler& scheduler) const
// Sanity check: all unbroadcast txns should exist in the mempool
if (m_mempool.exists(elem.first)) {
LOCK(cs_main);
- RelayTransaction(elem.first, elem.second, *connman);
+ RelayTransaction(elem.first, elem.second, m_connman);
} else {
m_mempool.RemoveUnbroadcastTx(elem.first, true);
}
@@ -1062,23 +1060,22 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
* Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
* to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
*/
-void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- if (howmuch == 0)
- return;
+ assert(howmuch > 0);
- CNodeState *state = State(pnode);
- if (state == nullptr)
- return;
+ CNodeState* const state = State(pnode);
+ if (state == nullptr) return;
state->nMisbehavior += howmuch;
- std::string message_prefixed = message.empty() ? "" : (": " + message);
+ const std::string message_prefixed = message.empty() ? "" : (": " + message);
if (state->nMisbehavior >= DISCOURAGEMENT_THRESHOLD && state->nMisbehavior - howmuch < DISCOURAGEMENT_THRESHOLD)
{
- LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
+ LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, state->nMisbehavior - howmuch, state->nMisbehavior, message_prefixed);
state->m_should_discourage = true;
- } else
- LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
+ } else {
+ LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, state->nMisbehavior - howmuch, state->nMisbehavior, message_prefixed);
+ }
}
/**
@@ -1165,6 +1162,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
+ case TxValidationResult::TX_INPUTS_NOT_STANDARD:
case TxValidationResult::TX_NOT_STANDARD:
case TxValidationResult::TX_MISSING_INPUTS:
case TxValidationResult::TX_PREMATURE_SPEND:
@@ -1199,8 +1197,8 @@ static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Para
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
}
-PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
- : connman(connmanIn),
+PeerLogicValidation::PeerLogicValidation(CConnman& connman, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
+ : m_connman(connman),
m_banman(banman),
m_chainman(chainman),
m_mempool(pool),
@@ -1328,7 +1326,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
}
- connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
+ m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
AssertLockHeld(cs_main);
// TODO: Avoid the repeated-serialization here
@@ -1343,7 +1341,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
hashBlock.ToString(), pnode->GetId());
- connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
+ m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
state.pindexBestHeaderSent = pindex;
}
});
@@ -1355,7 +1353,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
*/
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
const int nNewHeight = pindexNew->nHeight;
- connman->SetBestHeight(nNewHeight);
+ m_connman.SetBestHeight(nNewHeight);
SetServiceFlagsIBDCache(!fInitialDownload);
if (!fInitialDownload) {
@@ -1372,7 +1370,7 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB
}
}
// Relay inventory, but don't relay old inventory during initial block download.
- connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
+ m_connman.ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
LOCK(pnode->cs_inventory);
if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
for (const uint256& hash : reverse_iterate(vHashes)) {
@@ -1380,7 +1378,7 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB
}
}
});
- connman->WakeMessageHandler();
+ m_connman.WakeMessageHandler();
}
}
@@ -1411,7 +1409,7 @@ void PeerLogicValidation::BlockChecked(const CBlock& block, const BlockValidatio
!::ChainstateActive().IsInitialBlockDownload() &&
mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
if (it != mapBlockSource.end()) {
- MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman);
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
}
}
if (it != mapBlockSource.end())
@@ -1445,9 +1443,9 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO
{
LOCK(g_cs_orphans);
- if (inv.type != MSG_WTX && mapOrphanTransactions.count(inv.hash)) {
+ if (!inv.IsMsgWtx() && mapOrphanTransactions.count(inv.hash)) {
return true;
- } else if (inv.type == MSG_WTX && g_orphans_by_wtxid.count(inv.hash)) {
+ } else if (inv.IsMsgWtx() && g_orphans_by_wtxid.count(inv.hash)) {
return true;
}
}
@@ -1457,8 +1455,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO
if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
}
- const bool by_wtxid = (inv.type == MSG_WTX);
- return recentRejects->contains(inv.hash) || mempool.exists(inv.hash, by_wtxid);
+ return recentRejects->contains(inv.hash) || mempool.exists(ToGenTxid(inv));
}
case MSG_BLOCK:
case MSG_WITNESS_BLOCK:
@@ -1676,9 +1673,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c
}
//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed).
-CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_or_wtxid, bool use_wtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
+CTransactionRef static FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
{
- auto txinfo = mempool.info(txid_or_wtxid, use_wtxid);
+ auto txinfo = mempool.info(gtxid);
if (txinfo.tx) {
// If a TX could have been INVed in reply to a MEMPOOL request,
// or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
@@ -1691,11 +1688,11 @@ CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_o
{
LOCK(cs_main);
// Otherwise, the transaction must have been announced recently.
- if (State(peer.GetId())->m_recently_announced_invs.contains(txid_or_wtxid)) {
+ if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
// If it was, it can be relayed from either the mempool...
if (txinfo.tx) return std::move(txinfo.tx);
// ... or the relay pool.
- auto mi = mapRelay.find(txid_or_wtxid);
+ auto mi = mapRelay.find(gtxid.GetHash());
if (mi != mapRelay.end()) return mi->second;
}
}
@@ -1719,7 +1716,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
// them.
- while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX || it->type == MSG_WTX)) {
+ while (it != pfrom.vRecvGetData.end() && it->IsGenTxMsg()) {
if (interruptMsgProc) return;
// The send buffer provides backpressure. If there's no space in
// the buffer, pause processing until the next call.
@@ -1732,23 +1729,34 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
continue;
}
- CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, inv.type == MSG_WTX, mempool_req, now);
+ CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now);
if (tx) {
// WTX and WITNESS_TX imply we serialize with witness
- int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
+ int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
mempool.RemoveUnbroadcastTx(tx->GetHash());
// As we're going to send tx, make sure its unconfirmed parents are made requestable.
- for (const auto& txin : tx->vin) {
- auto txinfo = mempool.info(txin.prevout.hash);
- if (txinfo.tx && txinfo.m_time > now - UNCONDITIONAL_RELAY_DELAY) {
- // Relaying a transaction with a recent but unconfirmed parent.
- if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(txin.prevout.hash))) {
- LOCK(cs_main);
- State(pfrom.GetId())->m_recently_announced_invs.insert(txin.prevout.hash);
+ std::vector<uint256> parent_ids_to_add;
+ {
+ LOCK(mempool.cs);
+ auto txiter = mempool.GetIter(tx->GetHash());
+ if (txiter) {
+ const CTxMemPool::setEntries& parents = mempool.GetMemPoolParents(*txiter);
+ parent_ids_to_add.reserve(parents.size());
+ for (CTxMemPool::txiter parent_iter : parents) {
+ if (parent_iter->GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
+ parent_ids_to_add.push_back(parent_iter->GetTx().GetHash());
+ }
}
}
}
+ for (const uint256& parent_txid : parent_ids_to_add) {
+ // Relaying a transaction with a recent but unconfirmed parent.
+ if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
+ LOCK(cs_main);
+ State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
+ }
+ }
} else {
vNotFound.push_back(inv);
}
@@ -1799,7 +1807,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom.GetId()));
+ Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
@@ -1848,7 +1856,7 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman& connman, ChainstateMan
UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(pfrom.GetId(), 20);
+ Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
}
return;
}
@@ -1969,14 +1977,14 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman& connman, ChainstateMan
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
// as an anti-DoS measure.
- if (IsOutboundDisconnectionCandidate(pfrom)) {
+ if (pfrom.IsOutboundOrBlockRelayConn()) {
LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
pfrom.fDisconnect = true;
}
}
}
- if (!pfrom.fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) {
+ if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) {
// If this is an outbound full-relay peer, check to see if we should protect
// it from the bad/lagging chain logic.
// Note that block-relay-only peers are already implicitly protected, so we
@@ -2057,6 +2065,19 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(orphanTx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) {
+ // We only add the txid if it differs from the wtxid, to
+ // avoid wasting entries in the rolling bloom filter.
+ recentRejects->insert(orphanTx.GetHash());
+ }
}
EraseOrphanTx(orphanHash);
done = true;
@@ -2070,7 +2091,7 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
*
* May disconnect from the peer in the case of a bad request.
*
- * @param[in] pfrom The peer that we received the request from
+ * @param[in] peer The peer that we received the request from
* @param[in] chain_params Chain parameters
* @param[in] filter_type The filter type the request is for. Must be basic filters.
* @param[in] start_height The start height for the request
@@ -2080,7 +2101,7 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
* @param[out] filter_index The filter index, if the request can be serviced.
* @return True if the request can be serviced.
*/
-static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_params,
+static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params,
BlockFilterType filter_type, uint32_t start_height,
const uint256& stop_hash, uint32_t max_height_diff,
const CBlockIndex*& stop_index,
@@ -2088,11 +2109,11 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa
{
const bool supported_filter_type =
(filter_type == BlockFilterType::BASIC &&
- gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS));
+ (peer.GetLocalServices() & NODE_COMPACT_FILTERS));
if (!supported_filter_type) {
LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
- pfrom.GetId(), static_cast<uint8_t>(filter_type));
- pfrom.fDisconnect = true;
+ peer.GetId(), static_cast<uint8_t>(filter_type));
+ peer.fDisconnect = true;
return false;
}
@@ -2103,8 +2124,8 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa
// Check that the stop block exists and the peer would be allowed to fetch it.
if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
- pfrom.GetId(), stop_hash.ToString());
- pfrom.fDisconnect = true;
+ peer.GetId(), stop_hash.ToString());
+ peer.fDisconnect = true;
return false;
}
}
@@ -2113,14 +2134,14 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa
if (start_height > stop_height) {
LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
"start height %d and stop height %d\n",
- pfrom.GetId(), start_height, stop_height);
- pfrom.fDisconnect = true;
+ peer.GetId(), start_height, stop_height);
+ peer.fDisconnect = true;
return false;
}
if (stop_height - start_height >= max_height_diff) {
LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
- pfrom.GetId(), stop_height - start_height + 1, max_height_diff);
- pfrom.fDisconnect = true;
+ peer.GetId(), stop_height - start_height + 1, max_height_diff);
+ peer.fDisconnect = true;
return false;
}
@@ -2138,12 +2159,12 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa
*
* May disconnect from the peer in the case of a bad request.
*
- * @param[in] pfrom The peer that we received the request from
+ * @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
* @param[in] chain_params Chain parameters
* @param[in] connman Pointer to the connection manager
*/
-static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
+static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
CConnman& connman)
{
uint8_t filter_type_ser;
@@ -2156,13 +2177,12 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar
const CBlockIndex* stop_index;
BlockFilterIndex* filter_index;
- if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash,
+ if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
return;
}
std::vector<BlockFilter> filters;
-
if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
@@ -2170,9 +2190,9 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar
}
for (const auto& filter : filters) {
- CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion())
+ CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
.Make(NetMsgType::CFILTER, filter);
- connman.PushMessage(&pfrom, std::move(msg));
+ connman.PushMessage(&peer, std::move(msg));
}
}
@@ -2181,12 +2201,12 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar
*
* May disconnect from the peer in the case of a bad request.
*
- * @param[in] pfrom The peer that we received the request from
+ * @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
* @param[in] chain_params Chain parameters
* @param[in] connman Pointer to the connection manager
*/
-static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
+static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
CConnman& connman)
{
uint8_t filter_type_ser;
@@ -2199,7 +2219,7 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa
const CBlockIndex* stop_index;
BlockFilterIndex* filter_index;
- if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash,
+ if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
return;
}
@@ -2222,13 +2242,13 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa
return;
}
- CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion())
+ CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
.Make(NetMsgType::CFHEADERS,
filter_type_ser,
stop_index->GetBlockHash(),
prev_header,
filter_hashes);
- connman.PushMessage(&pfrom, std::move(msg));
+ connman.PushMessage(&peer, std::move(msg));
}
/**
@@ -2236,12 +2256,12 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa
*
* May disconnect from the peer in the case of a bad request.
*
- * @param[in] pfrom The peer that we received the request from
+ * @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
* @param[in] chain_params Chain parameters
* @param[in] connman Pointer to the connection manager
*/
-static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
+static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
CConnman& connman)
{
uint8_t filter_type_ser;
@@ -2253,7 +2273,7 @@ static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainPa
const CBlockIndex* stop_index;
BlockFilterIndex* filter_index;
- if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, /*start_height=*/0, stop_hash,
+ if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
/*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
stop_index, filter_index)) {
return;
@@ -2274,25 +2294,17 @@ static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainPa
}
}
- CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion())
+ CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion())
.Make(NetMsgType::CFCHECKPT,
filter_type_ser,
stop_index->GetBlockHash(),
headers);
- connman.PushMessage(&pfrom, std::move(msg));
+ connman.PushMessage(&peer, std::move(msg));
}
-void ProcessMessage(
- CNode& pfrom,
- const std::string& msg_type,
- CDataStream& vRecv,
- const std::chrono::microseconds time_received,
- const CChainParams& chainparams,
- ChainstateManager& chainman,
- CTxMemPool& mempool,
- CConnman& connman,
- BanMan* banman,
- const std::atomic<bool>& interruptMsgProc)
+void PeerLogicValidation::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
+ const std::chrono::microseconds time_received,
+ const CChainParams& chainparams, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
@@ -2307,7 +2319,7 @@ void ProcessMessage(
if (pfrom.nVersion != 0)
{
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 1);
+ Misbehaving(pfrom.GetId(), 1, "redundant version message");
return;
}
@@ -2326,11 +2338,11 @@ void ProcessMessage(
vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
nServices = ServiceFlags(nServiceInt);
- if (!pfrom.fInbound)
+ if (!pfrom.IsInboundConn())
{
- connman.SetServices(pfrom.addr, nServices);
+ m_connman.SetServices(pfrom.addr, nServices);
}
- if (!pfrom.fInbound && !pfrom.fFeeler && !pfrom.m_manual_connection && !HasAllDesirableServiceFlags(nServices))
+ if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
{
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
pfrom.fDisconnect = true;
@@ -2357,27 +2369,27 @@ void ProcessMessage(
if (!vRecv.empty())
vRecv >> fRelay;
// Disconnect if we connected to ourself
- if (pfrom.fInbound && !connman.CheckIncomingNonce(nNonce))
+ if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
pfrom.fDisconnect = true;
return;
}
- if (pfrom.fInbound && addrMe.IsRoutable())
+ if (pfrom.IsInboundConn() && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
// Be shy and don't send version until we hear
- if (pfrom.fInbound)
- PushNodeVersion(pfrom, connman, GetAdjustedTime());
+ if (pfrom.IsInboundConn())
+ PushNodeVersion(pfrom, m_connman, GetAdjustedTime());
if (nVersion >= WTXID_RELAY_VERSION) {
- connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::WTXIDRELAY));
+ m_connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::WTXIDRELAY));
}
- connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
+ m_connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
pfrom.nServices = nServices;
pfrom.SetAddrLocal(addrMe);
@@ -2414,7 +2426,7 @@ void ProcessMessage(
UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
}
- if (!pfrom.fInbound && pfrom.IsAddrRelayPeer())
+ if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer())
{
// Advertise our address
if (fListen && !::ChainstateActive().IsInitialBlockDownload())
@@ -2433,9 +2445,9 @@ void ProcessMessage(
}
// Get recent addresses
- connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
+ m_connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
pfrom.fGetAddr = true;
- connman.MarkAddressGood(pfrom.addr);
+ m_connman.MarkAddressGood(pfrom.addr);
}
std::string remoteAddr;
@@ -2454,12 +2466,11 @@ void ProcessMessage(
// If the peer is old enough to have the old alert system, send it the final alert.
if (pfrom.nVersion <= 70012) {
CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
- connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
+ m_connman.PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
}
// Feeler connections exist only to verify if address is online.
- if (pfrom.fFeeler) {
- assert(pfrom.fInbound == false);
+ if (pfrom.IsFeelerConn()) {
pfrom.fDisconnect = true;
}
return;
@@ -2468,7 +2479,7 @@ void ProcessMessage(
if (pfrom.nVersion == 0) {
// Must have a version message before anything else
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 1);
+ Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
return;
}
@@ -2479,7 +2490,7 @@ void ProcessMessage(
{
pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION));
- if (!pfrom.fInbound) {
+ if (!pfrom.IsInboundConn()) {
// Mark this node as currently connected, so we update its timestamp later.
LOCK(cs_main);
State(pfrom.GetId())->fCurrentlyConnected = true;
@@ -2494,7 +2505,7 @@ void ProcessMessage(
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
}
if (pfrom.nVersion >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks
@@ -2505,9 +2516,9 @@ void ProcessMessage(
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2;
if (pfrom.GetLocalServices() & NODE_WITNESS)
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
nCMPCTBLOCKVersion = 1;
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
}
pfrom.fSuccessfullyConnected = true;
return;
@@ -2535,7 +2546,7 @@ void ProcessMessage(
if (!pfrom.fSuccessfullyConnected) {
// Must have a verack message before anything else
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 1);
+ Misbehaving(pfrom.GetId(), 1, "non-verack message before version handshake");
return;
}
@@ -2546,7 +2557,7 @@ void ProcessMessage(
if (!pfrom.IsAddrRelayPeer()) {
return;
}
- if (vAddr.size() > 1000)
+ if (vAddr.size() > MAX_ADDR_TO_SEND)
{
LOCK(cs_main);
Misbehaving(pfrom.GetId(), 20, strprintf("addr message size = %u", vAddr.size()));
@@ -2571,7 +2582,7 @@ void ProcessMessage(
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom.AddAddressKnown(addr);
- if (banman && (banman->IsDiscouraged(addr) || banman->IsBanned(addr))) {
+ if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
// Do not process banned/discouraged addresses beyond remembering we received them
continue;
}
@@ -2579,16 +2590,16 @@ void ProcessMessage(
if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
- RelayAddress(addr, fReachable, connman);
+ RelayAddress(addr, fReachable, m_connman);
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
- connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
+ m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom.fGetAddr = false;
- if (pfrom.fOneShot)
+ if (pfrom.IsAddrFetchConn())
pfrom.fDisconnect = true;
return;
}
@@ -2652,17 +2663,19 @@ void ProcessMessage(
if (interruptMsgProc)
return;
- // ignore INVs that don't match wtxidrelay setting
+ // Ignore INVs that don't match wtxidrelay setting.
+ // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
+ // This is fine as no INV messages are involved in that process.
if (State(pfrom.GetId())->m_wtxid_relay) {
- if (inv.type == MSG_TX) continue;
+ if (inv.IsMsgTx()) continue;
} else {
- if (inv.type == MSG_WTX) continue;
+ if (inv.IsMsgWtx()) continue;
}
- bool fAlreadyHave = AlreadyHave(inv, mempool);
+ bool fAlreadyHave = AlreadyHave(inv, m_mempool);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
- if (inv.type == MSG_TX) {
+ if (inv.IsMsgTx()) {
inv.type |= nFetchFlags;
}
@@ -2682,14 +2695,14 @@ void ProcessMessage(
LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
pfrom.fDisconnect = true;
return;
- } else if (!fAlreadyHave && !chainman.ActiveChainstate().IsInitialBlockDownload()) {
- RequestTx(State(pfrom.GetId()), inv.hash, current_time);
+ } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
+ RequestTx(State(pfrom.GetId()), ToGenTxid(inv), current_time);
}
}
}
if (best_block != nullptr) {
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
}
@@ -2713,7 +2726,7 @@ void ProcessMessage(
}
pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom, chainparams, connman, mempool, interruptMsgProc);
+ ProcessGetData(pfrom, chainparams, m_connman, m_mempool, interruptMsgProc);
return;
}
@@ -2797,7 +2810,7 @@ void ProcessMessage(
// Unlock cs_most_recent_block to avoid cs_main lock inversion
}
if (recent_block) {
- SendBlockTransactions(*recent_block, req, pfrom, connman);
+ SendBlockTransactions(*recent_block, req, pfrom, m_connman);
return;
}
@@ -2830,7 +2843,7 @@ void ProcessMessage(
bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
assert(ret);
- SendBlockTransactions(block, req, pfrom, connman);
+ SendBlockTransactions(block, req, pfrom, m_connman);
return;
}
@@ -2897,7 +2910,7 @@ void ProcessMessage(
// will re-announce the new block via headers (or compact blocks again)
// in the SendMessages logic.
nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
return;
}
@@ -2936,15 +2949,17 @@ void ProcessMessage(
TxValidationState state;
- nodestate->m_tx_download.m_tx_announced.erase(hash);
- nodestate->m_tx_download.m_tx_in_flight.erase(hash);
- EraseTxRequest(hash);
+ for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) {
+ nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash());
+ nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
+ EraseTxRequest(gtxid);
+ }
std::list<CTransactionRef> lRemovedTxn;
// We do the AlreadyHave() check using wtxid, rather than txid - in the
// absence of witness malleation, this is strictly better, because the
- // recent rejects filter may contain the wtxid but will never contain
+ // recent rejects filter may contain the wtxid but rarely contains
// the txid of a segwit transaction that has been rejected.
// In the presence of witness malleation, it's possible that by only
// doing the check with wtxid, we could overlook a transaction which
@@ -2954,10 +2969,10 @@ void ProcessMessage(
// already; and an adversary can already relay us old transactions
// (older than our recency filter) if trying to DoS us, without any need
// for witness malleation.
- if (!AlreadyHave(CInv(MSG_WTX, wtxid), mempool) &&
- AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
- mempool.check(&::ChainstateActive().CoinsTip());
- RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman);
+ if (!AlreadyHave(CInv(MSG_WTX, wtxid), m_mempool) &&
+ AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
+ m_mempool.check(&::ChainstateActive().CoinsTip());
+ RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
@@ -2972,16 +2987,27 @@ void ProcessMessage(
LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom.GetId(),
tx.GetHash().ToString(),
- mempool.size(), mempool.DynamicMemoryUsage() / 1000);
+ m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
// Recursively process any orphan transactions that depended on this one
- ProcessOrphanTx(connman, mempool, pfrom.orphan_work_set, lRemovedTxn);
+ ProcessOrphanTx(m_connman, m_mempool, pfrom.orphan_work_set, lRemovedTxn);
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
+
+ // Deduplicate parent txids, so that we don't have to loop over
+ // the same parent txid more than once down below.
+ std::vector<uint256> unique_parents;
+ unique_parents.reserve(tx.vin.size());
for (const CTxIn& txin : tx.vin) {
- if (recentRejects->contains(txin.prevout.hash)) {
+ // We start with all parents, and then remove duplicates below.
+ unique_parents.push_back(txin.prevout.hash);
+ }
+ std::sort(unique_parents.begin(), unique_parents.end());
+ unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
+ for (const uint256& parent_txid : unique_parents) {
+ if (recentRejects->contains(parent_txid)) {
fRejectedParents = true;
break;
}
@@ -2990,17 +3016,15 @@ void ProcessMessage(
uint32_t nFetchFlags = GetFetchFlags(pfrom);
const auto current_time = GetTime<std::chrono::microseconds>();
- if (!State(pfrom.GetId())->m_wtxid_relay) {
- for (const CTxIn& txin : tx.vin) {
- // Here, we only have the txid (and not wtxid) of the
- // inputs, so we only request parents from
- // non-wtxid-relay peers.
- // Eventually we should replace this with an improved
- // protocol for getting all unconfirmed parents.
- CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
- pfrom.AddKnownTx(txin.prevout.hash);
- if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time);
- }
+ for (const uint256& parent_txid : unique_parents) {
+ // Here, we only have the txid (and not wtxid) of the
+ // inputs, so we only request in txid mode, even for
+ // wtxidrelay peers.
+ // Eventually we should replace this with an improved
+ // protocol for getting all unconfirmed parents.
+ CInv _inv(MSG_TX | nFetchFlags, parent_txid);
+ pfrom.AddKnownTx(parent_txid);
+ if (!AlreadyHave(_inv, m_mempool)) RequestTx(State(pfrom.GetId()), ToGenTxid(_inv), current_time);
}
AddOrphanTx(ptx, pfrom.GetId());
@@ -3038,6 +3062,17 @@ void ProcessMessage(
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
+ recentRejects->insert(tx.GetHash());
+ }
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
@@ -3050,11 +3085,11 @@ void ProcessMessage(
// if they were already in the mempool,
// allowing the node to function as a gateway for
// nodes hidden behind it.
- if (!mempool.exists(tx.GetHash())) {
+ if (!m_mempool.exists(tx.GetHash())) {
LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
} else {
LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
- RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman);
+ RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
}
}
}
@@ -3107,7 +3142,7 @@ void ProcessMessage(
if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!::ChainstateActive().IsInitialBlockDownload())
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
return;
}
@@ -3118,7 +3153,7 @@ void ProcessMessage(
const CBlockIndex *pindex = nullptr;
BlockValidationState state;
- if (!chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
+ if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
return;
@@ -3168,7 +3203,7 @@ void ProcessMessage(
// so we just grab the block via normal getdata
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
}
return;
}
@@ -3189,9 +3224,9 @@ void ProcessMessage(
if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
(fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
- if (!MarkBlockAsInFlight(mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
+ if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock)
- (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
+ (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
else {
// The block was already in flight using compact blocks from the same peer
LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
@@ -3203,13 +3238,13 @@ void ProcessMessage(
ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom.GetId()));
+ Misbehaving(pfrom.GetId(), 100, "invalid compact block");
return;
} else if (status == READ_STATUS_FAILED) {
// Duplicate txindexes, the block is now in-flight, so just request it
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return;
}
@@ -3226,7 +3261,7 @@ void ProcessMessage(
fProcessBLOCKTXN = true;
} else {
req.blockhash = pindex->GetBlockHash();
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
}
} else {
// This block is either already in flight from a different
@@ -3234,7 +3269,7 @@ void ProcessMessage(
// download from.
// Optimistically try to reconstruct anyway since we might be
// able to without any round trips.
- PartiallyDownloadedBlock tempBlock(&mempool);
+ PartiallyDownloadedBlock tempBlock(&m_mempool);
ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status != READ_STATUS_OK) {
// TODO: don't ignore failures
@@ -3252,7 +3287,7 @@ void ProcessMessage(
// mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return;
} else {
// If this was an announce-cmpctblock, we want the same treatment as a header message
@@ -3262,7 +3297,7 @@ void ProcessMessage(
} // cs_main
if (fProcessBLOCKTXN)
- return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, chainparams, chainman, mempool, connman, banman, interruptMsgProc);
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, chainparams, interruptMsgProc);
if (fRevertToHeaderProcessing) {
// Headers received from HB compact block peers are permitted to be
@@ -3270,7 +3305,7 @@ void ProcessMessage(
// the peer if the header turns out to be for an invalid block.
// Note that if a peer tries to build on an invalid chain, that
// will be detected and the peer will be disconnected/discouraged.
- return ProcessHeadersMessage(pfrom, connman, chainman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
+ return ProcessHeadersMessage(pfrom, m_connman, m_chainman, m_mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
}
if (fBlockReconstructed) {
@@ -3290,7 +3325,7 @@ void ProcessMessage(
// we have a chain with at least nMinimumChainWork), and we ignore
// compact blocks with less work than our tip, it is safe to treat
// reconstructed compact blocks as having been requested.
- chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
+ m_chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom.nLastBlockTime = GetTime();
} else {
@@ -3336,13 +3371,13 @@ void ProcessMessage(
ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom.GetId()));
+ Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
return;
} else if (status == READ_STATUS_FAILED) {
// Might have collided, fall back to getdata now :(
std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
} else {
// Block is either okay, or possibly we received
// READ_STATUS_CHECKBLOCK_FAILED.
@@ -3380,7 +3415,7 @@ void ProcessMessage(
// disk-space attacks), but this should be safe due to the
// protections in the compact block handler -- see related comment
// in compact block optimistic reconstruction handling.
- chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
+ m_chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom.nLastBlockTime = GetTime();
} else {
@@ -3414,7 +3449,7 @@ void ProcessMessage(
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
- return ProcessHeadersMessage(pfrom, connman, chainman, mempool, headers, chainparams, /*via_compact_block=*/false);
+ return ProcessHeadersMessage(pfrom, m_connman, m_chainman, m_mempool, headers, chainparams, /*via_compact_block=*/false);
}
if (msg_type == NetMsgType::BLOCK)
@@ -3443,7 +3478,7 @@ void ProcessMessage(
mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
}
bool fNewBlock = false;
- chainman.ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
+ m_chainman.ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
if (fNewBlock) {
pfrom.nLastBlockTime = GetTime();
} else {
@@ -3459,7 +3494,7 @@ void ProcessMessage(
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
- if (!pfrom.fInbound) {
+ if (!pfrom.IsInboundConn()) {
LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId());
return;
}
@@ -3477,13 +3512,15 @@ void ProcessMessage(
pfrom.fSentAddr = true;
pfrom.vAddrToSend.clear();
- std::vector<CAddress> vAddr = connman.GetAddresses();
+ std::vector<CAddress> vAddr;
+ if (pfrom.HasPermission(PF_ADDR)) {
+ vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
+ } else {
+ vAddr = m_connman.GetAddresses(pfrom.addr.GetNetwork(), MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
+ }
FastRandomContext insecure_rand;
for (const CAddress &addr : vAddr) {
- bool banned_or_discouraged = banman && (banman->IsDiscouraged(addr) || banman->IsBanned(addr));
- if (!banned_or_discouraged) {
- pfrom.PushAddress(addr, insecure_rand);
- }
+ pfrom.PushAddress(addr, insecure_rand);
}
return;
}
@@ -3499,7 +3536,7 @@ void ProcessMessage(
return;
}
- if (connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL))
+ if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL))
{
if (!pfrom.HasPermission(PF_NOBAN))
{
@@ -3532,7 +3569,7 @@ void ProcessMessage(
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
- connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
}
return;
}
@@ -3605,7 +3642,7 @@ void ProcessMessage(
{
// There is no excuse for sending a too-large filter
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 100);
+ Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
}
else if (pfrom.m_tx_relay != nullptr)
{
@@ -3639,7 +3676,7 @@ void ProcessMessage(
}
if (bad) {
LOCK(cs_main);
- Misbehaving(pfrom.GetId(), 100);
+ Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
}
return;
}
@@ -3672,17 +3709,17 @@ void ProcessMessage(
}
if (msg_type == NetMsgType::GETCFILTERS) {
- ProcessGetCFilters(pfrom, vRecv, chainparams, connman);
+ ProcessGetCFilters(pfrom, vRecv, chainparams, m_connman);
return;
}
if (msg_type == NetMsgType::GETCFHEADERS) {
- ProcessGetCFHeaders(pfrom, vRecv, chainparams, connman);
+ ProcessGetCFHeaders(pfrom, vRecv, chainparams, m_connman);
return;
}
if (msg_type == NetMsgType::GETCFCHECKPT) {
- ProcessGetCFCheckPt(pfrom, vRecv, chainparams, connman);
+ ProcessGetCFCheckPt(pfrom, vRecv, chainparams, m_connman);
return;
}
@@ -3694,7 +3731,7 @@ void ProcessMessage(
vRecv >> vInv;
if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
for (CInv &inv : vInv) {
- if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX || inv.type == MSG_WTX) {
+ if (inv.IsGenTxMsg()) {
// If we receive a NOTFOUND message for a txid we requested, erase
// it from our data structures for this peer.
auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash);
@@ -3716,32 +3753,49 @@ void ProcessMessage(
return;
}
+/** Maybe disconnect a peer and discourage future connections from its address.
+ *
+ * @param[in] pnode The node to check.
+ * @return True if the peer was marked for disconnection in this function
+ */
bool PeerLogicValidation::MaybeDiscourageAndDisconnect(CNode& pnode)
{
- AssertLockHeld(cs_main);
- CNodeState &state = *State(pnode.GetId());
+ const NodeId peer_id{pnode.GetId()};
+ {
+ LOCK(cs_main);
+ CNodeState& state = *State(peer_id);
+
+ // There's nothing to do if the m_should_discourage flag isn't set
+ if (!state.m_should_discourage) return false;
- if (state.m_should_discourage) {
state.m_should_discourage = false;
- if (pnode.HasPermission(PF_NOBAN)) {
- LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode.addr.ToString());
- } else if (pnode.m_manual_connection) {
- LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode.addr.ToString());
- } else if (pnode.addr.IsLocal()) {
- // Disconnect but don't discourage this local node
- LogPrintf("Warning: disconnecting but not discouraging local peer %s!\n", pnode.addr.ToString());
- pnode.fDisconnect = true;
- } else {
- // Disconnect and discourage all nodes sharing the address
- LogPrintf("Disconnecting and discouraging peer %s!\n", pnode.addr.ToString());
- if (m_banman) {
- m_banman->Discourage(pnode.addr);
- }
- connman->DisconnectNode(pnode.addr);
- }
+ } // cs_main
+
+ if (pnode.HasPermission(PF_NOBAN)) {
+ // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag
+ LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
+ return false;
+ }
+
+ if (pnode.IsManualConn()) {
+ // We never disconnect or discourage manual peers for bad behavior
+ LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id);
+ return false;
+ }
+
+ if (pnode.addr.IsLocal()) {
+ // We disconnect local peers for bad behavior but don't discourage (since that would discourage
+ // all peers on the same local address)
+ LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id);
+ pnode.fDisconnect = true;
return true;
}
- return false;
+
+ // Normal case: Disconnect the peer and discourage all nodes sharing the address
+ LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
+ if (m_banman) m_banman->Discourage(pnode.addr);
+ m_connman.DisconnectNode(pnode.addr);
+ return true;
}
bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
@@ -3758,12 +3812,12 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
bool fMoreWork = false;
if (!pfrom->vRecvGetData.empty())
- ProcessGetData(*pfrom, chainparams, *connman, m_mempool, interruptMsgProc);
+ ProcessGetData(*pfrom, chainparams, m_connman, m_mempool, interruptMsgProc);
if (!pfrom->orphan_work_set.empty()) {
std::list<CTransactionRef> removed_txn;
LOCK2(cs_main, g_cs_orphans);
- ProcessOrphanTx(*connman, m_mempool, pfrom->orphan_work_set, removed_txn);
+ ProcessOrphanTx(m_connman, m_mempool, pfrom->orphan_work_set, removed_txn);
for (const CTransactionRef& removedTx : removed_txn) {
AddToCompactExtraTransactions(removedTx);
}
@@ -3789,7 +3843,7 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
- pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
+ pfrom->fPauseRecv = pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
fMoreWork = !pfrom->vProcessMsg.empty();
}
CNetMessage& msg(msgs.front());
@@ -3823,7 +3877,7 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
}
try {
- ProcessMessage(*pfrom, msg_type, vRecv, msg.m_time, chainparams, m_chainman, m_mempool, *connman, m_banman, interruptMsgProc);
+ ProcessMessage(*pfrom, msg_type, vRecv, msg.m_time, chainparams, interruptMsgProc);
if (interruptMsgProc)
return false;
if (!pfrom->vRecvGetData.empty())
@@ -3834,9 +3888,6 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
}
- LOCK(cs_main);
- MaybeDiscourageAndDisconnect(*pfrom);
-
return fMoreWork;
}
@@ -3847,7 +3898,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
CNodeState &state = *State(pto.GetId());
const CNetMsgMaker msgMaker(pto.GetSendVersion());
- if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
+ if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
// This is an outbound peer subject to disconnection if they don't
// announce a block with as much work as the current tip within
// CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
@@ -3879,7 +3930,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
} else {
assert(state.m_chain_sync.m_work_header);
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
- connman->PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
+ m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
// Bump the timeout to allow a response, which could clear the timeout
@@ -3896,7 +3947,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
{
// Check whether we have too many outbound peers
- int extra_peers = connman->GetExtraOutboundCount();
+ int extra_peers = m_connman.GetExtraOutboundCount();
if (extra_peers > 0) {
// If we have more outbound peers than we target, disconnect one.
// Pick the outbound peer that least recently announced
@@ -3905,11 +3956,11 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
NodeId worst_peer = -1;
int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
- connman->ForEachNode([&](CNode* pnode) {
+ m_connman.ForEachNode([&](CNode* pnode) {
AssertLockHeld(cs_main);
// Ignore non-outbound peers, or nodes marked for disconnect already
- if (!IsOutboundDisconnectionCandidate(*pnode) || pnode->fDisconnect) return;
+ if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
CNodeState *state = State(pnode->GetId());
if (state == nullptr) return; // shouldn't be possible, but just in case
// Don't evict our protected peers
@@ -3922,7 +3973,7 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
}
});
if (worst_peer != -1) {
- bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
+ bool disconnected = m_connman.ForNode(worst_peer, [&](CNode *pnode) {
AssertLockHeld(cs_main);
// Only disconnect a peer that has been connected to us for
@@ -3946,7 +3997,7 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
// detected a stale tip. Don't try any more extra peers until
// we next detect a stale tip, to limit the load we put on the
// network from these extra connections.
- connman->SetTryNewOutboundPeer(false);
+ m_connman.SetTryNewOutboundPeer(false);
}
}
}
@@ -3956,8 +4007,6 @@ void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params
{
LOCK(cs_main);
- if (connman == nullptr) return;
-
int64_t time_in_seconds = GetTime();
EvictExtraOutboundPeers(time_in_seconds);
@@ -3965,11 +4014,11 @@ void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params
if (time_in_seconds > m_stale_tip_check_time) {
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer
- if (!fImporting && !fReindex && connman->GetNetworkActive() && connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
+ if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
- connman->SetTryNewOutboundPeer(true);
- } else if (connman->GetTryNewOutboundPeer()) {
- connman->SetTryNewOutboundPeer(false);
+ m_connman.SetTryNewOutboundPeer(true);
+ } else if (m_connman.GetTryNewOutboundPeer()) {
+ m_connman.SetTryNewOutboundPeer(false);
}
m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
}
@@ -3999,48 +4048,49 @@ public:
bool PeerLogicValidation::SendMessages(CNode* pto)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
- {
- // Don't send anything until the version handshake is complete
- if (!pto->fSuccessfullyConnected || pto->fDisconnect)
- return true;
- // If we get here, the outgoing message serialization version is set and can't change.
- const CNetMsgMaker msgMaker(pto->GetSendVersion());
+ // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
+ // disconnect misbehaving peers even before the version handshake is complete.
+ if (MaybeDiscourageAndDisconnect(*pto)) return true;
- //
- // Message: ping
- //
- bool pingSend = false;
- if (pto->fPingQueued) {
- // RPC ping request by user
- pingSend = true;
- }
- if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
- // Ping automatically sent as a latency probe & keepalive.
- pingSend = true;
- }
- if (pingSend) {
- uint64_t nonce = 0;
- while (nonce == 0) {
- GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
- }
- pto->fPingQueued = false;
- pto->m_ping_start = GetTime<std::chrono::microseconds>();
- if (pto->nVersion > BIP0031_VERSION) {
- pto->nPingNonceSent = nonce;
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
- } else {
- // Peer is too old to support ping command with nonce, pong will never arrive.
- pto->nPingNonceSent = 0;
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
- }
- }
+ // Don't send anything until the version handshake is complete
+ if (!pto->fSuccessfullyConnected || pto->fDisconnect)
+ return true;
- TRY_LOCK(cs_main, lockMain);
- if (!lockMain)
- return true;
+ // If we get here, the outgoing message serialization version is set and can't change.
+ const CNetMsgMaker msgMaker(pto->GetSendVersion());
- if (MaybeDiscourageAndDisconnect(*pto)) return true;
+ //
+ // Message: ping
+ //
+ bool pingSend = false;
+ if (pto->fPingQueued) {
+ // RPC ping request by user
+ pingSend = true;
+ }
+ if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
+ // Ping automatically sent as a latency probe & keepalive.
+ pingSend = true;
+ }
+ if (pingSend) {
+ uint64_t nonce = 0;
+ while (nonce == 0) {
+ GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
+ }
+ pto->fPingQueued = false;
+ pto->m_ping_start = GetTime<std::chrono::microseconds>();
+ if (pto->nVersion > BIP0031_VERSION) {
+ pto->nPingNonceSent = nonce;
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
+ } else {
+ // Peer is too old to support ping command with nonce, pong will never arrive.
+ pto->nPingNonceSent = 0;
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
+ }
+ }
+
+ {
+ LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
@@ -4067,17 +4117,17 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
{
pto->m_addr_known->insert(addr.GetKey());
vAddr.push_back(addr);
- // receiver rejects addr messages larger than 1000
- if (vAddr.size() >= 1000)
+ // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
+ if (vAddr.size() >= MAX_ADDR_TO_SEND)
{
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
// we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit();
@@ -4086,7 +4136,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = ::ChainActive().Tip();
- bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
+ bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
@@ -4104,7 +4154,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
}
}
@@ -4188,10 +4238,10 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
LOCK(cs_most_recent_block);
if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
- connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
+ m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
else {
CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
- connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
fGotBlockFromCache = true;
}
@@ -4201,7 +4251,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
assert(ret);
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
- connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
@@ -4214,7 +4264,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->GetId());
}
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
state.pindexBestHeaderSent = pBestIndex;
} else
fRevertToInv = true;
@@ -4259,7 +4309,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
for (const uint256& hash : pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
@@ -4271,8 +4321,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
bool fSendTrickle = pto->HasPermission(PF_NOBAN);
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
- if (pto->fInbound) {
- pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
+ if (pto->IsInboundConn()) {
+ pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
} else {
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
@@ -4312,7 +4362,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
@@ -4346,6 +4396,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
+ CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
// Remove it from the to-be-sent set
pto->m_tx_relay->setInventoryTxToSend.erase(it);
// Check if not in the filter already
@@ -4353,7 +4404,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
continue;
}
// Not in the mempool anymore? don't bother sending it.
- auto txinfo = m_mempool.info(hash, state.m_wtxid_relay);
+ auto txinfo = m_mempool.info(ToGenTxid(inv));
if (!txinfo.tx) {
continue;
}
@@ -4366,7 +4417,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
// Send
State(pto->GetId())->m_recently_announced_invs.insert(hash);
- vInv.push_back(CInv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash));
+ vInv.push_back(inv);
nRelayedTransactions++;
{
// Expire old relay messages
@@ -4387,7 +4438,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
}
}
if (vInv.size() == MAX_INV_SZ) {
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
pto->m_tx_relay->filterInventoryKnown.insert(hash);
@@ -4404,7 +4455,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
}
}
if (!vInv.empty())
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
current_time = GetTime<std::chrono::microseconds>();
@@ -4519,24 +4570,24 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
auto& tx_process_time = state.m_tx_download.m_tx_process_time;
while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
- const uint256 txid = tx_process_time.begin()->second;
+ const GenTxid gtxid = tx_process_time.begin()->second;
// Erase this entry from tx_process_time (it may be added back for
// processing at a later time, see below)
tx_process_time.erase(tx_process_time.begin());
- CInv inv(state.m_wtxid_relay ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), txid);
+ CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
if (!AlreadyHave(inv, m_mempool)) {
// If this transaction was last requested more than 1 minute ago,
// then request.
- const auto last_request_time = GetTxRequestTime(inv.hash);
+ const auto last_request_time = GetTxRequestTime(gtxid);
if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
vGetData.push_back(inv);
if (vGetData.size() >= MAX_GETDATA_SZ) {
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
vGetData.clear();
}
- UpdateTxRequestTime(inv.hash, current_time);
- state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time);
+ UpdateTxRequestTime(gtxid, current_time);
+ state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time);
} else {
// This transaction is in flight from someone else; queue
// up processing to happen after the download times out
@@ -4550,19 +4601,19 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// would open us up to an attacker using inbound
// wtxid-relay to prevent us from requesting transactions
// from outbound txid-relay peers).
- const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload, false);
- tx_process_time.emplace(next_process_time, txid);
+ const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false);
+ tx_process_time.emplace(next_process_time, gtxid);
}
} else {
// We have already seen this transaction, no need to download.
- state.m_tx_download.m_tx_announced.erase(inv.hash);
- state.m_tx_download.m_tx_in_flight.erase(inv.hash);
+ state.m_tx_download.m_tx_announced.erase(gtxid.GetHash());
+ state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
}
}
if (!vGetData.empty())
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
//
// Message: feefilter
@@ -4590,7 +4641,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
- connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->m_tx_relay->lastSentFeeFilter = filterToSend;
}
pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
@@ -4602,7 +4653,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
}
}
- }
+ } // release cs_main
return true;
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 0534828761..74d6603747 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -11,6 +11,7 @@
#include <sync.h>
#include <validationinterface.h>
+class CChainParams;
class CTxMemPool;
class ChainstateManager;
@@ -28,16 +29,16 @@ static const int DISCOURAGEMENT_THRESHOLD{100};
class PeerLogicValidation final : public CValidationInterface, public NetEventsInterface {
private:
- CConnman* const connman;
+ CConnman& m_connman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* const m_banman;
ChainstateManager& m_chainman;
CTxMemPool& m_mempool;
- bool MaybeDiscourageAndDisconnect(CNode& pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool MaybeDiscourageAndDisconnect(CNode& pnode);
public:
- PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool);
+ PeerLogicValidation(CConnman& connman, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool);
/**
* Overridden from CValidationInterface.
@@ -85,8 +86,14 @@ public:
/** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
void ReattemptInitialBroadcast(CScheduler& scheduler) const;
+ /** Process a single message from a peer. Public for fuzz testing */
+ void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
+ const std::chrono::microseconds time_received, const CChainParams& chainparams,
+ const std::atomic<bool>& interruptMsgProc);
+
private:
int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
+
};
struct CNodeStateStats {
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 0aaba440b8..8adfe38dc9 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -3,95 +3,120 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <cstdint>
#include <netaddress.h>
+
#include <hash.h>
+#include <tinyformat.h>
#include <util/strencodings.h>
#include <util/asmap.h>
-#include <tinyformat.h>
-static const unsigned char pchIPv4[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff };
-static const unsigned char pchOnionCat[] = {0xFD,0x87,0xD8,0x7E,0xEB,0x43};
+#include <algorithm>
+#include <array>
+#include <cstdint>
+#include <iterator>
+#include <tuple>
-// 0xFD + sha256("bitcoin")[0:5]
-static const unsigned char g_internal_prefix[] = { 0xFD, 0x6B, 0x88, 0xC0, 0x87, 0x24 };
+constexpr size_t CNetAddr::V1_SERIALIZATION_SIZE;
/**
* Construct an unspecified IPv6 network address (::/128).
*
* @note This address is considered invalid by CNetAddr::IsValid()
*/
-CNetAddr::CNetAddr()
+CNetAddr::CNetAddr() {}
+
+void CNetAddr::SetIP(const CNetAddr& ipIn)
{
- memset(ip, 0, sizeof(ip));
+ // Size check.
+ switch (ipIn.m_net) {
+ case NET_IPV4:
+ assert(ipIn.m_addr.size() == ADDR_IPV4_SIZE);
+ break;
+ case NET_IPV6:
+ assert(ipIn.m_addr.size() == ADDR_IPV6_SIZE);
+ break;
+ case NET_ONION:
+ assert(ipIn.m_addr.size() == ADDR_TORV2_SIZE);
+ break;
+ case NET_INTERNAL:
+ assert(ipIn.m_addr.size() == ADDR_INTERNAL_SIZE);
+ break;
+ case NET_UNROUTABLE:
+ case NET_MAX:
+ assert(false);
+ } // no default case, so the compiler can warn about missing cases
+
+ m_net = ipIn.m_net;
+ m_addr = ipIn.m_addr;
}
-void CNetAddr::SetIP(const CNetAddr& ipIn)
+template <typename T1, size_t PREFIX_LEN>
+inline bool HasPrefix(const T1& obj, const std::array<uint8_t, PREFIX_LEN>& prefix)
{
- memcpy(ip, ipIn.ip, sizeof(ip));
+ return obj.size() >= PREFIX_LEN &&
+ std::equal(std::begin(prefix), std::end(prefix), std::begin(obj));
}
-void CNetAddr::SetRaw(Network network, const uint8_t *ip_in)
+void CNetAddr::SetLegacyIPv6(Span<const uint8_t> ipv6)
{
- switch(network)
- {
- case NET_IPV4:
- memcpy(ip, pchIPv4, 12);
- memcpy(ip+12, ip_in, 4);
- break;
- case NET_IPV6:
- memcpy(ip, ip_in, 16);
- break;
- default:
- assert(!"invalid network");
+ assert(ipv6.size() == ADDR_IPV6_SIZE);
+
+ size_t skip{0};
+
+ if (HasPrefix(ipv6, IPV4_IN_IPV6_PREFIX)) {
+ // IPv4-in-IPv6
+ m_net = NET_IPV4;
+ skip = sizeof(IPV4_IN_IPV6_PREFIX);
+ } else if (HasPrefix(ipv6, TORV2_IN_IPV6_PREFIX)) {
+ // TORv2-in-IPv6
+ m_net = NET_ONION;
+ skip = sizeof(TORV2_IN_IPV6_PREFIX);
+ } else if (HasPrefix(ipv6, INTERNAL_IN_IPV6_PREFIX)) {
+ // Internal-in-IPv6
+ m_net = NET_INTERNAL;
+ skip = sizeof(INTERNAL_IN_IPV6_PREFIX);
+ } else {
+ // IPv6
+ m_net = NET_IPV6;
}
+
+ m_addr.assign(ipv6.begin() + skip, ipv6.end());
}
/**
- * Try to make this a dummy address that maps the specified name into IPv6 like
- * so: (0xFD + %sha256("bitcoin")[0:5]) + %sha256(name)[0:10]. Such dummy
- * addresses have a prefix of fd6b:88c0:8724::/48 and are guaranteed to not be
- * publicly routable as it falls under RFC4193's fc00::/7 subnet allocated to
- * unique-local addresses.
- *
- * CAddrMan uses these fake addresses to keep track of which DNS seeds were
- * used.
- *
+ * Create an "internal" address that represents a name or FQDN. CAddrMan uses
+ * these fake addresses to keep track of which DNS seeds were used.
* @returns Whether or not the operation was successful.
- *
- * @see CNetAddr::IsInternal(), CNetAddr::IsRFC4193()
+ * @see NET_INTERNAL, INTERNAL_IN_IPV6_PREFIX, CNetAddr::IsInternal(), CNetAddr::IsRFC4193()
*/
bool CNetAddr::SetInternal(const std::string &name)
{
if (name.empty()) {
return false;
}
+ m_net = NET_INTERNAL;
unsigned char hash[32] = {};
CSHA256().Write((const unsigned char*)name.data(), name.size()).Finalize(hash);
- memcpy(ip, g_internal_prefix, sizeof(g_internal_prefix));
- memcpy(ip + sizeof(g_internal_prefix), hash, sizeof(ip) - sizeof(g_internal_prefix));
+ m_addr.assign(hash, hash + ADDR_INTERNAL_SIZE);
return true;
}
/**
- * Try to make this a dummy address that maps the specified onion address into
- * IPv6 using OnionCat's range and encoding. Such dummy addresses have a prefix
- * of fd87:d87e:eb43::/48 and are guaranteed to not be publicly routable as they
- * fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses.
+ * Parse a TORv2 address and set this object to it.
*
* @returns Whether or not the operation was successful.
*
- * @see CNetAddr::IsTor(), CNetAddr::IsRFC4193()
+ * @see CNetAddr::IsTor()
*/
bool CNetAddr::SetSpecial(const std::string &strName)
{
if (strName.size()>6 && strName.substr(strName.size() - 6, 6) == ".onion") {
std::vector<unsigned char> vchAddr = DecodeBase32(strName.substr(0, strName.size() - 6).c_str());
- if (vchAddr.size() != 16-sizeof(pchOnionCat))
+ if (vchAddr.size() != ADDR_TORV2_SIZE) {
return false;
- memcpy(ip, pchOnionCat, sizeof(pchOnionCat));
- for (unsigned int i=0; i<16-sizeof(pchOnionCat); i++)
- ip[i + sizeof(pchOnionCat)] = vchAddr[i];
+ }
+ m_net = NET_ONION;
+ m_addr.assign(vchAddr.begin(), vchAddr.end());
return true;
}
return false;
@@ -99,121 +124,114 @@ bool CNetAddr::SetSpecial(const std::string &strName)
CNetAddr::CNetAddr(const struct in_addr& ipv4Addr)
{
- SetRaw(NET_IPV4, (const uint8_t*)&ipv4Addr);
+ m_net = NET_IPV4;
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&ipv4Addr);
+ m_addr.assign(ptr, ptr + ADDR_IPV4_SIZE);
}
CNetAddr::CNetAddr(const struct in6_addr& ipv6Addr, const uint32_t scope)
{
- SetRaw(NET_IPV6, (const uint8_t*)&ipv6Addr);
+ SetLegacyIPv6(Span<const uint8_t>(reinterpret_cast<const uint8_t*>(&ipv6Addr), sizeof(ipv6Addr)));
scopeId = scope;
}
-unsigned int CNetAddr::GetByte(int n) const
-{
- return ip[15-n];
-}
-
bool CNetAddr::IsBindAny() const
{
- const int cmplen = IsIPv4() ? 4 : 16;
- for (int i = 0; i < cmplen; ++i) {
- if (GetByte(i)) return false;
+ if (!IsIPv4() && !IsIPv6()) {
+ return false;
}
-
- return true;
+ return std::all_of(m_addr.begin(), m_addr.end(), [](uint8_t b) { return b == 0; });
}
-bool CNetAddr::IsIPv4() const
-{
- return (memcmp(ip, pchIPv4, sizeof(pchIPv4)) == 0);
-}
+bool CNetAddr::IsIPv4() const { return m_net == NET_IPV4; }
-bool CNetAddr::IsIPv6() const
-{
- return (!IsIPv4() && !IsTor() && !IsInternal());
-}
+bool CNetAddr::IsIPv6() const { return m_net == NET_IPV6; }
bool CNetAddr::IsRFC1918() const
{
return IsIPv4() && (
- GetByte(3) == 10 ||
- (GetByte(3) == 192 && GetByte(2) == 168) ||
- (GetByte(3) == 172 && (GetByte(2) >= 16 && GetByte(2) <= 31)));
+ m_addr[0] == 10 ||
+ (m_addr[0] == 192 && m_addr[1] == 168) ||
+ (m_addr[0] == 172 && m_addr[1] >= 16 && m_addr[1] <= 31));
}
bool CNetAddr::IsRFC2544() const
{
- return IsIPv4() && GetByte(3) == 198 && (GetByte(2) == 18 || GetByte(2) == 19);
+ return IsIPv4() && m_addr[0] == 198 && (m_addr[1] == 18 || m_addr[1] == 19);
}
bool CNetAddr::IsRFC3927() const
{
- return IsIPv4() && (GetByte(3) == 169 && GetByte(2) == 254);
+ return IsIPv4() && HasPrefix(m_addr, std::array<uint8_t, 2>{169, 254});
}
bool CNetAddr::IsRFC6598() const
{
- return IsIPv4() && GetByte(3) == 100 && GetByte(2) >= 64 && GetByte(2) <= 127;
+ return IsIPv4() && m_addr[0] == 100 && m_addr[1] >= 64 && m_addr[1] <= 127;
}
bool CNetAddr::IsRFC5737() const
{
- return IsIPv4() && ((GetByte(3) == 192 && GetByte(2) == 0 && GetByte(1) == 2) ||
- (GetByte(3) == 198 && GetByte(2) == 51 && GetByte(1) == 100) ||
- (GetByte(3) == 203 && GetByte(2) == 0 && GetByte(1) == 113));
+ return IsIPv4() && (HasPrefix(m_addr, std::array<uint8_t, 3>{192, 0, 2}) ||
+ HasPrefix(m_addr, std::array<uint8_t, 3>{198, 51, 100}) ||
+ HasPrefix(m_addr, std::array<uint8_t, 3>{203, 0, 113}));
}
bool CNetAddr::IsRFC3849() const
{
- return GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x0D && GetByte(12) == 0xB8;
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 4>{0x20, 0x01, 0x0D, 0xB8});
}
bool CNetAddr::IsRFC3964() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x02);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 2>{0x20, 0x02});
}
bool CNetAddr::IsRFC6052() const
{
- static const unsigned char pchRFC6052[] = {0,0x64,0xFF,0x9B,0,0,0,0,0,0,0,0};
- return (memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0);
+ return IsIPv6() &&
+ HasPrefix(m_addr, std::array<uint8_t, 12>{0x00, 0x64, 0xFF, 0x9B, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00});
}
bool CNetAddr::IsRFC4380() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 && GetByte(12) == 0);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 4>{0x20, 0x01, 0x00, 0x00});
}
bool CNetAddr::IsRFC4862() const
{
- static const unsigned char pchRFC4862[] = {0xFE,0x80,0,0,0,0,0,0};
- return (memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 8>{0xFE, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00});
}
bool CNetAddr::IsRFC4193() const
{
- return ((GetByte(15) & 0xFE) == 0xFC);
+ return IsIPv6() && (m_addr[0] & 0xFE) == 0xFC;
}
bool CNetAddr::IsRFC6145() const
{
- static const unsigned char pchRFC6145[] = {0,0,0,0,0,0,0,0,0xFF,0xFF,0,0};
- return (memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0);
+ return IsIPv6() &&
+ HasPrefix(m_addr, std::array<uint8_t, 12>{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00});
}
bool CNetAddr::IsRFC4843() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 3>{0x20, 0x01, 0x00}) &&
+ (m_addr[3] & 0xF0) == 0x10;
}
bool CNetAddr::IsRFC7343() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 3>{0x20, 0x01, 0x00}) &&
+ (m_addr[3] & 0xF0) == 0x20;
}
bool CNetAddr::IsHeNet() const
{
- return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70);
+ return IsIPv6() && HasPrefix(m_addr, std::array<uint8_t, 4>{0x20, 0x01, 0x04, 0x70});
}
/**
@@ -222,21 +240,20 @@ bool CNetAddr::IsHeNet() const
*
* @see CNetAddr::SetSpecial(const std::string &)
*/
-bool CNetAddr::IsTor() const
-{
- return (memcmp(ip, pchOnionCat, sizeof(pchOnionCat)) == 0);
-}
+bool CNetAddr::IsTor() const { return m_net == NET_ONION; }
bool CNetAddr::IsLocal() const
{
// IPv4 loopback (127.0.0.0/8 or 0.0.0.0/8)
- if (IsIPv4() && (GetByte(3) == 127 || GetByte(3) == 0))
+ if (IsIPv4() && (m_addr[0] == 127 || m_addr[0] == 0)) {
return true;
+ }
// IPv6 loopback (::1/128)
static const unsigned char pchLocal[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1};
- if (memcmp(ip, pchLocal, 16) == 0)
+ if (IsIPv6() && memcmp(m_addr.data(), pchLocal, sizeof(pchLocal)) == 0) {
return true;
+ }
return false;
}
@@ -259,13 +276,16 @@ bool CNetAddr::IsValid() const
// header20 vectorlen3 addr26 addr26 addr26 header20 vectorlen3 addr26 addr26 addr26...
// so if the first length field is garbled, it reads the second batch
// of addr misaligned by 3 bytes.
- if (memcmp(ip, pchIPv4+3, sizeof(pchIPv4)-3) == 0)
+ if (IsIPv6() && memcmp(m_addr.data(), IPV4_IN_IPV6_PREFIX.data() + 3,
+ sizeof(IPV4_IN_IPV6_PREFIX) - 3) == 0) {
return false;
+ }
// unspecified IPv6 address (::/128)
unsigned char ipNone6[16] = {};
- if (memcmp(ip, ipNone6, 16) == 0)
+ if (IsIPv6() && memcmp(m_addr.data(), ipNone6, sizeof(ipNone6)) == 0) {
return false;
+ }
// documentation IPv6 address
if (IsRFC3849())
@@ -274,17 +294,11 @@ bool CNetAddr::IsValid() const
if (IsInternal())
return false;
- if (IsIPv4())
- {
- // INADDR_NONE
- uint32_t ipNone = INADDR_NONE;
- if (memcmp(ip+12, &ipNone, 4) == 0)
- return false;
-
- // 0
- ipNone = 0;
- if (memcmp(ip+12, &ipNone, 4) == 0)
+ if (IsIPv4()) {
+ const uint32_t addr = ReadBE32(m_addr.data());
+ if (addr == INADDR_ANY || addr == INADDR_NONE) {
return false;
+ }
}
return true;
@@ -305,13 +319,13 @@ bool CNetAddr::IsRoutable() const
}
/**
- * @returns Whether or not this is a dummy address that maps a name into IPv6.
+ * @returns Whether or not this is a dummy address that represents a name.
*
* @see CNetAddr::SetInternal(const std::string &)
*/
bool CNetAddr::IsInternal() const
{
- return memcmp(ip, g_internal_prefix, sizeof(g_internal_prefix)) == 0;
+ return m_net == NET_INTERNAL;
}
enum Network CNetAddr::GetNetwork() const
@@ -322,21 +336,15 @@ enum Network CNetAddr::GetNetwork() const
if (!IsRoutable())
return NET_UNROUTABLE;
- if (IsIPv4())
- return NET_IPV4;
-
- if (IsTor())
- return NET_ONION;
-
- return NET_IPV6;
+ return m_net;
}
std::string CNetAddr::ToStringIP() const
{
if (IsTor())
- return EncodeBase32(&ip[6], 10) + ".onion";
+ return EncodeBase32(m_addr) + ".onion";
if (IsInternal())
- return EncodeBase32(ip + sizeof(g_internal_prefix), sizeof(ip) - sizeof(g_internal_prefix)) + ".internal";
+ return EncodeBase32(m_addr) + ".internal";
CService serv(*this, 0);
struct sockaddr_storage sockaddr;
socklen_t socklen = sizeof(sockaddr);
@@ -346,13 +354,13 @@ std::string CNetAddr::ToStringIP() const
return std::string(name);
}
if (IsIPv4())
- return strprintf("%u.%u.%u.%u", GetByte(3), GetByte(2), GetByte(1), GetByte(0));
- else
- return strprintf("%x:%x:%x:%x:%x:%x:%x:%x",
- GetByte(15) << 8 | GetByte(14), GetByte(13) << 8 | GetByte(12),
- GetByte(11) << 8 | GetByte(10), GetByte(9) << 8 | GetByte(8),
- GetByte(7) << 8 | GetByte(6), GetByte(5) << 8 | GetByte(4),
- GetByte(3) << 8 | GetByte(2), GetByte(1) << 8 | GetByte(0));
+ return strprintf("%u.%u.%u.%u", m_addr[0], m_addr[1], m_addr[2], m_addr[3]);
+ assert(IsIPv6());
+ return strprintf("%x:%x:%x:%x:%x:%x:%x:%x",
+ m_addr[0] << 8 | m_addr[1], m_addr[2] << 8 | m_addr[3],
+ m_addr[4] << 8 | m_addr[5], m_addr[6] << 8 | m_addr[7],
+ m_addr[8] << 8 | m_addr[9], m_addr[10] << 8 | m_addr[11],
+ m_addr[12] << 8 | m_addr[13], m_addr[14] << 8 | m_addr[15]);
}
std::string CNetAddr::ToString() const
@@ -362,12 +370,12 @@ std::string CNetAddr::ToString() const
bool operator==(const CNetAddr& a, const CNetAddr& b)
{
- return (memcmp(a.ip, b.ip, 16) == 0);
+ return a.m_net == b.m_net && a.m_addr == b.m_addr;
}
bool operator<(const CNetAddr& a, const CNetAddr& b)
{
- return (memcmp(a.ip, b.ip, 16) < 0);
+ return std::tie(a.m_net, a.m_addr) < std::tie(b.m_net, b.m_addr);
}
/**
@@ -384,7 +392,8 @@ bool CNetAddr::GetInAddr(struct in_addr* pipv4Addr) const
{
if (!IsIPv4())
return false;
- memcpy(pipv4Addr, ip+12, 4);
+ assert(sizeof(*pipv4Addr) == m_addr.size());
+ memcpy(pipv4Addr, m_addr.data(), m_addr.size());
return true;
}
@@ -403,7 +412,8 @@ bool CNetAddr::GetIn6Addr(struct in6_addr* pipv6Addr) const
if (!IsIPv6()) {
return false;
}
- memcpy(pipv6Addr, ip, 16);
+ assert(sizeof(*pipv6Addr) == m_addr.size());
+ memcpy(pipv6Addr, m_addr.data(), m_addr.size());
return true;
}
@@ -414,15 +424,17 @@ bool CNetAddr::HasLinkedIPv4() const
uint32_t CNetAddr::GetLinkedIPv4() const
{
- if (IsIPv4() || IsRFC6145() || IsRFC6052()) {
- // IPv4, mapped IPv4, SIIT translated IPv4: the IPv4 address is the last 4 bytes of the address
- return ReadBE32(ip + 12);
+ if (IsIPv4()) {
+ return ReadBE32(m_addr.data());
+ } else if (IsRFC6052() || IsRFC6145()) {
+ // mapped IPv4, SIIT translated IPv4: the IPv4 address is the last 4 bytes of the address
+ return ReadBE32(MakeSpan(m_addr).last(ADDR_IPV4_SIZE).data());
} else if (IsRFC3964()) {
// 6to4 tunneled IPv4: the IPv4 address is in bytes 2-6
- return ReadBE32(ip + 2);
+ return ReadBE32(MakeSpan(m_addr).subspan(2, ADDR_IPV4_SIZE).data());
} else if (IsRFC4380()) {
// Teredo tunneled IPv4: the IPv4 address is in the last 4 bytes of the address, but bitflipped
- return ~ReadBE32(ip + 12);
+ return ~ReadBE32(MakeSpan(m_addr).last(ADDR_IPV4_SIZE).data());
}
assert(false);
}
@@ -451,10 +463,10 @@ uint32_t CNetAddr::GetMappedAS(const std::vector<bool> &asmap) const {
}
std::vector<bool> ip_bits(128);
if (HasLinkedIPv4()) {
- // For lookup, treat as if it was just an IPv4 address (pchIPv4 prefix + IPv4 bits)
+ // For lookup, treat as if it was just an IPv4 address (IPV4_IN_IPV6_PREFIX + IPv4 bits)
for (int8_t byte_i = 0; byte_i < 12; ++byte_i) {
for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) {
- ip_bits[byte_i * 8 + bit_i] = (pchIPv4[byte_i] >> (7 - bit_i)) & 1;
+ ip_bits[byte_i * 8 + bit_i] = (IPV4_IN_IPV6_PREFIX[byte_i] >> (7 - bit_i)) & 1;
}
}
uint32_t ipv4 = GetLinkedIPv4();
@@ -463,8 +475,9 @@ uint32_t CNetAddr::GetMappedAS(const std::vector<bool> &asmap) const {
}
} else {
// Use all 128 bits of the IPv6 address otherwise
+ assert(IsIPv6());
for (int8_t byte_i = 0; byte_i < 16; ++byte_i) {
- uint8_t cur_byte = GetByte(15 - byte_i);
+ uint8_t cur_byte = m_addr[byte_i];
for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) {
ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1;
}
@@ -500,19 +513,15 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co
}
vchRet.push_back(net_class);
- int nStartByte = 0;
- int nBits = 16;
+ int nBits{0};
if (IsLocal()) {
// all local addresses belong to the same group
- nBits = 0;
} else if (IsInternal()) {
// all internal-usage addresses get their own group
- nStartByte = sizeof(g_internal_prefix);
- nBits = (sizeof(ip) - sizeof(g_internal_prefix)) * 8;
+ nBits = ADDR_INTERNAL_SIZE * 8;
} else if (!IsRoutable()) {
// all other unroutable addresses belong to the same group
- nBits = 0;
} else if (HasLinkedIPv4()) {
// IPv4 addresses (and mapped IPv4 addresses) use /16 groups
uint32_t ipv4 = GetLinkedIPv4();
@@ -520,7 +529,6 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co
vchRet.push_back((ipv4 >> 16) & 0xFF);
return vchRet;
} else if (IsTor()) {
- nStartByte = 6;
nBits = 4;
} else if (IsHeNet()) {
// for he.net, use /36 groups
@@ -530,23 +538,29 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co
nBits = 32;
}
- // push our ip onto vchRet byte by byte...
- while (nBits >= 8)
- {
- vchRet.push_back(GetByte(15 - nStartByte));
- nStartByte++;
- nBits -= 8;
- }
+ // Push our address onto vchRet.
+ const size_t num_bytes = nBits / 8;
+ vchRet.insert(vchRet.end(), m_addr.begin(), m_addr.begin() + num_bytes);
+ nBits %= 8;
// ...for the last byte, push nBits and for the rest of the byte push 1's
- if (nBits > 0)
- vchRet.push_back(GetByte(15 - nStartByte) | ((1 << (8 - nBits)) - 1));
+ if (nBits > 0) {
+ assert(num_bytes < m_addr.size());
+ vchRet.push_back(m_addr[num_bytes] | ((1 << (8 - nBits)) - 1));
+ }
return vchRet;
}
+std::vector<unsigned char> CNetAddr::GetAddrBytes() const
+{
+ uint8_t serialized[V1_SERIALIZATION_SIZE];
+ SerializeV1Array(serialized);
+ return {std::begin(serialized), std::end(serialized)};
+}
+
uint64_t CNetAddr::GetHash() const
{
- uint256 hash = Hash(&ip[0], &ip[16]);
+ uint256 hash = Hash(m_addr);
uint64_t nRet;
memcpy(&nRet, &hash, sizeof(nRet));
return nRet;
@@ -757,68 +771,25 @@ CSubNet::CSubNet():
memset(netmask, 0, sizeof(netmask));
}
-CSubNet::CSubNet(const CNetAddr &addr, int32_t mask)
-{
- valid = true;
- network = addr;
- // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address
- memset(netmask, 255, sizeof(netmask));
-
- // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n
- const int astartofs = network.IsIPv4() ? 12 : 0;
-
- int32_t n = mask;
- if(n >= 0 && n <= (128 - astartofs*8)) // Only valid if in range of bits of address
- {
- n += astartofs*8;
- // Clear bits [n..127]
- for (; n < 128; ++n)
- netmask[n>>3] &= ~(1<<(7-(n&7)));
- } else
- valid = false;
-
- // Normalize network according to netmask
- for(int x=0; x<16; ++x)
- network.ip[x] &= netmask[x];
-}
-
-CSubNet::CSubNet(const CNetAddr &addr, const CNetAddr &mask)
+CSubNet::CSubNet(const CNetAddr& addr, uint8_t mask) : CSubNet()
{
- valid = true;
- network = addr;
- // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address
- memset(netmask, 255, sizeof(netmask));
-
- // IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n
- const int astartofs = network.IsIPv4() ? 12 : 0;
-
- for(int x=astartofs; x<16; ++x)
- netmask[x] = mask.ip[x];
+ valid = (addr.IsIPv4() && mask <= ADDR_IPV4_SIZE * 8) ||
+ (addr.IsIPv6() && mask <= ADDR_IPV6_SIZE * 8);
+ if (!valid) {
+ return;
+ }
- // Normalize network according to netmask
- for(int x=0; x<16; ++x)
- network.ip[x] &= netmask[x];
-}
+ assert(mask <= sizeof(netmask) * 8);
-CSubNet::CSubNet(const CNetAddr &addr):
- valid(addr.IsValid())
-{
- memset(netmask, 255, sizeof(netmask));
network = addr;
-}
-/**
- * @returns True if this subnet is valid, the specified address is valid, and
- * the specified address belongs in this subnet.
- */
-bool CSubNet::Match(const CNetAddr &addr) const
-{
- if (!valid || !addr.IsValid())
- return false;
- for(int x=0; x<16; ++x)
- if ((addr.ip[x] & netmask[x]) != network.ip[x])
- return false;
- return true;
+ uint8_t n = mask;
+ for (size_t i = 0; i < network.m_addr.size(); ++i) {
+ const uint8_t bits = n < 8 ? n : 8;
+ netmask[i] = (uint8_t)((uint8_t)0xFF << (8 - bits)); // Set first bits.
+ network.m_addr[i] &= netmask[i]; // Normalize network according to netmask.
+ n -= bits;
+ }
}
/**
@@ -841,42 +812,82 @@ static inline int NetmaskBits(uint8_t x)
}
}
+CSubNet::CSubNet(const CNetAddr& addr, const CNetAddr& mask) : CSubNet()
+{
+ valid = (addr.IsIPv4() || addr.IsIPv6()) && addr.m_net == mask.m_net;
+ if (!valid) {
+ return;
+ }
+ // Check if `mask` contains 1-bits after 0-bits (which is an invalid netmask).
+ bool zeros_found = false;
+ for (auto b : mask.m_addr) {
+ const int num_bits = NetmaskBits(b);
+ if (num_bits == -1 || (zeros_found && num_bits != 0)) {
+ valid = false;
+ return;
+ }
+ if (num_bits < 8) {
+ zeros_found = true;
+ }
+ }
+
+ assert(mask.m_addr.size() <= sizeof(netmask));
+
+ memcpy(netmask, mask.m_addr.data(), mask.m_addr.size());
+
+ network = addr;
+
+ // Normalize network according to netmask
+ for (size_t x = 0; x < network.m_addr.size(); ++x) {
+ network.m_addr[x] &= netmask[x];
+ }
+}
+
+CSubNet::CSubNet(const CNetAddr& addr) : CSubNet()
+{
+ valid = addr.IsIPv4() || addr.IsIPv6();
+ if (!valid) {
+ return;
+ }
+
+ assert(addr.m_addr.size() <= sizeof(netmask));
+
+ memset(netmask, 0xFF, addr.m_addr.size());
+
+ network = addr;
+}
+
+/**
+ * @returns True if this subnet is valid, the specified address is valid, and
+ * the specified address belongs in this subnet.
+ */
+bool CSubNet::Match(const CNetAddr &addr) const
+{
+ if (!valid || !addr.IsValid() || network.m_net != addr.m_net)
+ return false;
+ assert(network.m_addr.size() == addr.m_addr.size());
+ for (size_t x = 0; x < addr.m_addr.size(); ++x) {
+ if ((addr.m_addr[x] & netmask[x]) != network.m_addr[x]) {
+ return false;
+ }
+ }
+ return true;
+}
+
std::string CSubNet::ToString() const
{
- /* Parse binary 1{n}0{N-n} to see if mask can be represented as /n */
- int cidr = 0;
- bool valid_cidr = true;
- int n = network.IsIPv4() ? 12 : 0;
- for (; n < 16 && netmask[n] == 0xff; ++n)
- cidr += 8;
- if (n < 16) {
- int bits = NetmaskBits(netmask[n]);
- if (bits < 0)
- valid_cidr = false;
- else
- cidr += bits;
- ++n;
- }
- for (; n < 16 && valid_cidr; ++n)
- if (netmask[n] != 0x00)
- valid_cidr = false;
-
- /* Format output */
- std::string strNetmask;
- if (valid_cidr) {
- strNetmask = strprintf("%u", cidr);
- } else {
- if (network.IsIPv4())
- strNetmask = strprintf("%u.%u.%u.%u", netmask[12], netmask[13], netmask[14], netmask[15]);
- else
- strNetmask = strprintf("%x:%x:%x:%x:%x:%x:%x:%x",
- netmask[0] << 8 | netmask[1], netmask[2] << 8 | netmask[3],
- netmask[4] << 8 | netmask[5], netmask[6] << 8 | netmask[7],
- netmask[8] << 8 | netmask[9], netmask[10] << 8 | netmask[11],
- netmask[12] << 8 | netmask[13], netmask[14] << 8 | netmask[15]);
+ assert(network.m_addr.size() <= sizeof(netmask));
+
+ uint8_t cidr = 0;
+
+ for (size_t i = 0; i < network.m_addr.size(); ++i) {
+ if (netmask[i] == 0x00) {
+ break;
+ }
+ cidr += NetmaskBits(netmask[i]);
}
- return network.ToString() + "/" + strNetmask;
+ return network.ToString() + strprintf("/%u", cidr);
}
bool CSubNet::IsValid() const
diff --git a/src/netaddress.h b/src/netaddress.h
index f2daad7fb6..d00f5a6f55 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -9,29 +9,99 @@
#include <config/bitcoin-config.h>
#endif
+#include <attributes.h>
#include <compat.h>
+#include <prevector.h>
#include <serialize.h>
+#include <array>
#include <cstdint>
#include <string>
#include <vector>
+/**
+ * A network type.
+ * @note An address may belong to more than one network, for example `10.0.0.1`
+ * belongs to both `NET_UNROUTABLE` and `NET_IPV4`.
+ * Keep these sequential starting from 0 and `NET_MAX` as the last entry.
+ * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate
+ * over all enum values and also `GetExtNetwork()` "extends" this enum by
+ * introducing standalone constants starting from `NET_MAX`.
+ */
enum Network
{
+ /// Addresses from these networks are not publicly routable on the global Internet.
NET_UNROUTABLE = 0,
+
+ /// IPv4
NET_IPV4,
+
+ /// IPv6
NET_IPV6,
+
+ /// TORv2
NET_ONION,
+
+ /// A set of addresses that represent the hash of a string or FQDN. We use
+ /// them in CAddrMan to keep track of which DNS seeds were used.
NET_INTERNAL,
+ /// Dummy value to indicate the number of NET_* constants.
NET_MAX,
};
-/** IP address (IPv6, or IPv4 using mapped IPv6 range (::FFFF:0:0/96)) */
+/// Prefix of an IPv6 address when it contains an embedded IPv4 address.
+/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
+static const std::array<uint8_t, 12> IPV4_IN_IPV6_PREFIX{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF
+};
+
+/// Prefix of an IPv6 address when it contains an embedded TORv2 address.
+/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
+/// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they
+/// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses.
+static const std::array<uint8_t, 6> TORV2_IN_IPV6_PREFIX{
+ 0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43
+};
+
+/// Prefix of an IPv6 address when it contains an embedded "internal" address.
+/// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155).
+/// The prefix comes from 0xFD + SHA256("bitcoin")[0:5].
+/// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they
+/// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses.
+static const std::array<uint8_t, 6> INTERNAL_IN_IPV6_PREFIX{
+ 0xFD, 0x6B, 0x88, 0xC0, 0x87, 0x24 // 0xFD + sha256("bitcoin")[0:5].
+};
+
+/// Size of IPv4 address (in bytes).
+static constexpr size_t ADDR_IPV4_SIZE = 4;
+
+/// Size of IPv6 address (in bytes).
+static constexpr size_t ADDR_IPV6_SIZE = 16;
+
+/// Size of TORv2 address (in bytes).
+static constexpr size_t ADDR_TORV2_SIZE = 10;
+
+/// Size of "internal" (NET_INTERNAL) address (in bytes).
+static constexpr size_t ADDR_INTERNAL_SIZE = 10;
+
+/**
+ * Network address.
+ */
class CNetAddr
{
protected:
- unsigned char ip[16]; // in network byte order
+ /**
+ * Raw representation of the network address.
+ * In network byte order (big endian) for IPv4 and IPv6.
+ */
+ prevector<ADDR_IPV6_SIZE, uint8_t> m_addr{ADDR_IPV6_SIZE, 0x0};
+
+ /**
+ * Network to which this address belongs.
+ */
+ Network m_net{NET_IPV6};
+
uint32_t scopeId{0}; // for scoped/link-local ipv6 addresses
public:
@@ -40,10 +110,12 @@ class CNetAddr
void SetIP(const CNetAddr& ip);
/**
- * Set raw IPv4 or IPv6 address (in network byte order)
- * @note Only NET_IPV4 and NET_IPV6 are allowed for network.
+ * Set from a legacy IPv6 address.
+ * Legacy IPv6 address may be a normal IPv6 address, or another address
+ * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy
+ * `addr` encoding.
*/
- void SetRaw(Network network, const uint8_t *data);
+ void SetLegacyIPv6(Span<const uint8_t> ipv6);
bool SetInternal(const std::string& name);
@@ -74,7 +146,6 @@ class CNetAddr
enum Network GetNetwork() const;
std::string ToString() const;
std::string ToStringIP() const;
- unsigned int GetByte(int n) const;
uint64_t GetHash() const;
bool GetInAddr(struct in_addr* pipv4Addr) const;
uint32_t GetNetClass() const;
@@ -90,7 +161,7 @@ class CNetAddr
uint32_t GetMappedAS(const std::vector<bool> &asmap) const;
std::vector<unsigned char> GetGroup(const std::vector<bool> &asmap) const;
- std::vector<unsigned char> GetAddrBytes() const { return {std::begin(ip), std::end(ip)}; }
+ std::vector<unsigned char> GetAddrBytes() const;
int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const;
explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0);
@@ -100,9 +171,107 @@ class CNetAddr
friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); }
friend bool operator<(const CNetAddr& a, const CNetAddr& b);
- SERIALIZE_METHODS(CNetAddr, obj) { READWRITE(obj.ip); }
+ /**
+ * Serialize to a stream.
+ */
+ template <typename Stream>
+ void Serialize(Stream& s) const
+ {
+ SerializeV1Stream(s);
+ }
+
+ /**
+ * Unserialize from a stream.
+ */
+ template <typename Stream>
+ void Unserialize(Stream& s)
+ {
+ UnserializeV1Stream(s);
+ }
friend class CSubNet;
+
+ private:
+ /**
+ * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes).
+ */
+ static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE;
+
+ /**
+ * Serialize in pre-ADDRv2/BIP155 format to an array.
+ * Some addresses (e.g. TORv3) cannot be serialized in pre-BIP155 format.
+ */
+ void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const
+ {
+ size_t prefix_size;
+
+ switch (m_net) {
+ case NET_IPV6:
+ assert(m_addr.size() == sizeof(arr));
+ memcpy(arr, m_addr.data(), m_addr.size());
+ return;
+ case NET_IPV4:
+ prefix_size = sizeof(IPV4_IN_IPV6_PREFIX);
+ assert(prefix_size + m_addr.size() == sizeof(arr));
+ memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size);
+ memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
+ return;
+ case NET_ONION:
+ prefix_size = sizeof(TORV2_IN_IPV6_PREFIX);
+ assert(prefix_size + m_addr.size() == sizeof(arr));
+ memcpy(arr, TORV2_IN_IPV6_PREFIX.data(), prefix_size);
+ memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
+ return;
+ case NET_INTERNAL:
+ prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX);
+ assert(prefix_size + m_addr.size() == sizeof(arr));
+ memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size);
+ memcpy(arr + prefix_size, m_addr.data(), m_addr.size());
+ return;
+ case NET_UNROUTABLE:
+ case NET_MAX:
+ assert(false);
+ } // no default case, so the compiler can warn about missing cases
+
+ assert(false);
+ }
+
+ /**
+ * Serialize in pre-ADDRv2/BIP155 format to a stream.
+ * Some addresses (e.g. TORv3) cannot be serialized in pre-BIP155 format.
+ */
+ template <typename Stream>
+ void SerializeV1Stream(Stream& s) const
+ {
+ uint8_t serialized[V1_SERIALIZATION_SIZE];
+
+ SerializeV1Array(serialized);
+
+ s << serialized;
+ }
+
+ /**
+ * Unserialize from a pre-ADDRv2/BIP155 format from an array.
+ */
+ void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE])
+ {
+ // Use SetLegacyIPv6() so that m_net is set correctly. For example
+ // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4).
+ SetLegacyIPv6(arr);
+ }
+
+ /**
+ * Unserialize from a pre-ADDRv2/BIP155 format from a stream.
+ */
+ template <typename Stream>
+ void UnserializeV1Stream(Stream& s)
+ {
+ uint8_t serialized[V1_SERIALIZATION_SIZE];
+
+ s >> serialized;
+
+ UnserializeV1Array(serialized);
+ }
};
class CSubNet
@@ -117,11 +286,11 @@ class CSubNet
public:
CSubNet();
- CSubNet(const CNetAddr &addr, int32_t mask);
- CSubNet(const CNetAddr &addr, const CNetAddr &mask);
+ CSubNet(const CNetAddr& addr, uint8_t mask);
+ CSubNet(const CNetAddr& addr, const CNetAddr& mask);
//constructor for single ip subnet (<ipv4>/32 or <ipv6>/128)
- explicit CSubNet(const CNetAddr &addr);
+ explicit CSubNet(const CNetAddr& addr);
bool Match(const CNetAddr &addr) const;
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 3a3b5f3e66..0273839017 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -13,6 +13,7 @@
#include <atomic>
#include <cstdint>
+#include <limits>
#ifndef WIN32
#include <fcntl.h>
@@ -838,8 +839,8 @@ bool LookupSubNet(const std::string& strSubnet, CSubNet& ret)
if (slash != strSubnet.npos)
{
std::string strNetmask = strSubnet.substr(slash + 1);
- int32_t n;
- if (ParseInt32(strNetmask, &n)) {
+ uint8_t n;
+ if (ParseUInt8(strNetmask, &n)) {
// If valid number, assume CIDR variable-length subnet masking
ret = CSubNet(network, n);
return ret.IsValid();
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index c56abaf6c9..0e9820da1e 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -152,6 +152,8 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR
* script can be anything; an attacker could use a very
* expensive-to-check-upon-redemption script like:
* DUP CHECKSIG DROP ... repeated 100 times... OP_1
+ *
+ * Note that only the non-witness portion of the transaction is checked here.
*/
bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
{
@@ -164,7 +166,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
std::vector<std::vector<unsigned char> > vSolutions;
TxoutType whichType = Solver(prev.scriptPubKey, vSolutions);
- if (whichType == TxoutType::NONSTANDARD) {
+ if (whichType == TxoutType::NONSTANDARD || whichType == TxoutType::WITNESS_UNKNOWN) {
+ // WITNESS_UNKNOWN failures are typically also caught with a policy
+ // flag in the script interpreter, but it can be helpful to catch
+ // this type of NONSTANDARD transaction earlier in transaction
+ // validation.
return false;
} else if (whichType == TxoutType::SCRIPTHASH) {
std::vector<std::vector<unsigned char> > stack;
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 4514db578a..544bab6d9b 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -12,6 +12,8 @@
#include <serialize.h>
#include <uint256.h>
+#include <tuple>
+
static const int SERIALIZE_TRANSACTION_NO_WITNESS = 0x40000000;
/** An outpoint - a combination of a transaction hash and an index n into its vout */
@@ -388,4 +390,17 @@ typedef std::shared_ptr<const CTransaction> CTransactionRef;
static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); }
template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); }
+/** A generic txid reference (txid or wtxid). */
+class GenTxid
+{
+ const bool m_is_wtxid;
+ const uint256 m_hash;
+public:
+ GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {}
+ bool IsWtxid() const { return m_is_wtxid; }
+ const uint256& GetHash() const { return m_hash; }
+ friend bool operator==(const GenTxid& a, const GenTxid& b) { return a.m_is_wtxid == b.m_is_wtxid && a.m_hash == b.m_hash; }
+ friend bool operator<(const GenTxid& a, const GenTxid& b) { return std::tie(a.m_is_wtxid, a.m_hash) < std::tie(b.m_is_wtxid, b.m_hash); }
+};
+
#endif // BITCOIN_PRIMITIVES_TRANSACTION_H
diff --git a/src/protocol.cpp b/src/protocol.cpp
index ee77ca3b94..c989aa3902 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -217,6 +217,7 @@ static std::string serviceFlagToStr(size_t bit)
case NODE_GETUTXO: return "GETUTXO";
case NODE_BLOOM: return "BLOOM";
case NODE_WITNESS: return "WITNESS";
+ case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS";
case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED";
// Not using default, so we get warned when a case is missing
}
@@ -241,3 +242,9 @@ std::vector<std::string> serviceFlagsToStr(uint64_t flags)
return str_flags;
}
+
+GenTxid ToGenTxid(const CInv& inv)
+{
+ assert(inv.IsGenTxMsg());
+ return {inv.IsMsgWtx(), inv.hash};
+}
diff --git a/src/protocol.h b/src/protocol.h
index d83da2034a..2e6c767cdd 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -11,6 +11,7 @@
#define BITCOIN_PROTOCOL_H
#include <netaddress.h>
+#include <primitives/transaction.h>
#include <serialize.h>
#include <uint256.h>
#include <version.h>
@@ -63,100 +64,84 @@ namespace NetMsgType {
/**
* The version message provides information about the transmitting node to the
* receiving node at the beginning of a connection.
- * @see https://bitcoin.org/en/developer-reference#version
*/
extern const char* VERSION;
/**
* The verack message acknowledges a previously-received version message,
* informing the connecting node that it can begin to send other messages.
- * @see https://bitcoin.org/en/developer-reference#verack
*/
extern const char* VERACK;
/**
* The addr (IP address) message relays connection information for peers on the
* network.
- * @see https://bitcoin.org/en/developer-reference#addr
*/
extern const char* ADDR;
/**
* The inv message (inventory message) transmits one or more inventories of
* objects known to the transmitting peer.
- * @see https://bitcoin.org/en/developer-reference#inv
*/
extern const char* INV;
/**
* The getdata message requests one or more data objects from another node.
- * @see https://bitcoin.org/en/developer-reference#getdata
*/
extern const char* GETDATA;
/**
* The merkleblock message is a reply to a getdata message which requested a
* block using the inventory type MSG_MERKLEBLOCK.
* @since protocol version 70001 as described by BIP37.
- * @see https://bitcoin.org/en/developer-reference#merkleblock
*/
extern const char* MERKLEBLOCK;
/**
* The getblocks message requests an inv message that provides block header
* hashes starting from a particular point in the block chain.
- * @see https://bitcoin.org/en/developer-reference#getblocks
*/
extern const char* GETBLOCKS;
/**
* The getheaders message requests a headers message that provides block
* headers starting from a particular point in the block chain.
* @since protocol version 31800.
- * @see https://bitcoin.org/en/developer-reference#getheaders
*/
extern const char* GETHEADERS;
/**
* The tx message transmits a single transaction.
- * @see https://bitcoin.org/en/developer-reference#tx
*/
extern const char* TX;
/**
* The headers message sends one or more block headers to a node which
* previously requested certain headers with a getheaders message.
* @since protocol version 31800.
- * @see https://bitcoin.org/en/developer-reference#headers
*/
extern const char* HEADERS;
/**
* The block message transmits a single serialized block.
- * @see https://bitcoin.org/en/developer-reference#block
*/
extern const char* BLOCK;
/**
* The getaddr message requests an addr message from the receiving node,
* preferably one with lots of IP addresses of other receiving nodes.
- * @see https://bitcoin.org/en/developer-reference#getaddr
*/
extern const char* GETADDR;
/**
* The mempool message requests the TXIDs of transactions that the receiving
* node has verified as valid but which have not yet appeared in a block.
* @since protocol version 60002.
- * @see https://bitcoin.org/en/developer-reference#mempool
*/
extern const char* MEMPOOL;
/**
* The ping message is sent periodically to help confirm that the receiving
* peer is still connected.
- * @see https://bitcoin.org/en/developer-reference#ping
*/
extern const char* PING;
/**
* The pong message replies to a ping message, proving to the pinging node that
* the ponging node is still alive.
* @since protocol version 60001 as described by BIP31.
- * @see https://bitcoin.org/en/developer-reference#pong
*/
extern const char* PONG;
/**
* The notfound message is a reply to a getdata message which requested an
* object the receiving node does not have available for relay.
* @since protocol version 70001.
- * @see https://bitcoin.org/en/developer-reference#notfound
*/
extern const char* NOTFOUND;
/**
@@ -165,7 +150,6 @@ extern const char* NOTFOUND;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filterload
*/
extern const char* FILTERLOAD;
/**
@@ -174,7 +158,6 @@ extern const char* FILTERLOAD;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filteradd
*/
extern const char* FILTERADD;
/**
@@ -183,14 +166,12 @@ extern const char* FILTERADD;
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
- * @see https://bitcoin.org/en/developer-reference#filterclear
*/
extern const char* FILTERCLEAR;
/**
* Indicates that a node prefers to receive new block announcements via a
* "headers" message rather than an "inv".
* @since protocol version 70012 as described by BIP130.
- * @see https://bitcoin.org/en/developer-reference#sendheaders
*/
extern const char* SENDHEADERS;
/**
@@ -291,6 +272,9 @@ enum ServiceFlags : uint64_t {
// NODE_WITNESS indicates that a node can be asked for blocks and transactions including
// witness data.
NODE_WITNESS = (1 << 3),
+ // NODE_COMPACT_FILTERS means the node will service basic block filter requests.
+ // See BIP157 and BIP158 for details on how this is implemented.
+ NODE_COMPACT_FILTERS = (1 << 6),
// NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation of only
// serving the last 288 (2 day) blocks
// See BIP159 for details on how this is implemented.
@@ -390,9 +374,10 @@ public:
READWRITEAS(CService, obj);
}
- ServiceFlags nServices{NODE_NONE};
// disk and network only
uint32_t nTime{TIME_INIT};
+
+ ServiceFlags nServices{NODE_NONE};
};
/** getdata message type flags */
@@ -413,7 +398,9 @@ enum GetDataMsg : uint32_t {
MSG_CMPCT_BLOCK = 4, //!< Defined in BIP152
MSG_WITNESS_BLOCK = MSG_BLOCK | MSG_WITNESS_FLAG, //!< Defined in BIP144
MSG_WITNESS_TX = MSG_TX | MSG_WITNESS_FLAG, //!< Defined in BIP144
- MSG_FILTERED_WITNESS_BLOCK = MSG_FILTERED_BLOCK | MSG_WITNESS_FLAG,
+ // MSG_FILTERED_WITNESS_BLOCK is defined in BIP144 as reserved for future
+ // use and remains unused.
+ // MSG_FILTERED_WITNESS_BLOCK = MSG_FILTERED_BLOCK | MSG_WITNESS_FLAG,
};
/** inv message data */
@@ -430,8 +417,19 @@ public:
std::string GetCommand() const;
std::string ToString() const;
+ // Single-message helper methods
+ bool IsMsgTx() const { return type == MSG_TX; }
+ bool IsMsgWtx() const { return type == MSG_WTX; }
+ bool IsMsgWitnessTx() const { return type == MSG_WITNESS_TX; }
+
+ // Combined-message helper methods
+ bool IsGenTxMsg() const { return type == MSG_TX || type == MSG_WTX || type == MSG_WITNESS_TX; }
+
int type;
uint256 hash;
};
+/** Convert a TX/WITNESS_TX/WTX CInv to a GenTxid. */
+GenTxid ToGenTxid(const CInv& inv);
+
#endif // BITCOIN_PROTOCOL_H
diff --git a/src/pubkey.h b/src/pubkey.h
index 4c28af4a4d..fcbc7e8416 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -157,13 +157,13 @@ public:
//! Get the KeyID of this public key (hash of its serialization)
CKeyID GetID() const
{
- return CKeyID(Hash160(vch, vch + size()));
+ return CKeyID(Hash160(MakeSpan(vch).first(size())));
}
//! Get the 256-bit hash of this public key.
uint256 GetHash() const
{
- return Hash(vch, vch + size());
+ return Hash(MakeSpan(vch).first(size()));
}
/*
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index ecb753a306..e63ffdfb36 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -27,8 +27,11 @@
#include <qt/walletmodel.h>
#endif // ENABLE_WALLET
+#include <init.h>
#include <interfaces/handler.h>
#include <interfaces/node.h>
+#include <node/context.h>
+#include <node/ui_interface.h>
#include <noui.h>
#include <uint256.h>
#include <util/system.h>
@@ -36,6 +39,7 @@
#include <util/translation.h>
#include <validation.h>
+#include <boost/signals2/connection.hpp>
#include <memory>
#include <QApplication>
@@ -80,6 +84,7 @@ static void RegisterMetaTypes()
qRegisterMetaType<std::function<void()>>("std::function<void()>");
qRegisterMetaType<QMessageBox::Icon>("QMessageBox::Icon");
+ qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo");
}
static QString GetLangTerritory()
@@ -163,8 +168,9 @@ void BitcoinCore::initialize()
{
qDebug() << __func__ << ": Running initialization in thread";
util::ThreadRename("qt-init");
- bool rv = m_node.appInitMain();
- Q_EMIT initializeResult(rv);
+ interfaces::BlockAndHeaderTipInfo tip_info;
+ bool rv = m_node.appInitMain(&tip_info);
+ Q_EMIT initializeResult(rv, tip_info);
} catch (const std::exception& e) {
handleRunawayException(&e);
} catch (...) {
@@ -190,10 +196,9 @@ void BitcoinCore::shutdown()
static int qt_argc = 1;
static const char* qt_argv = "bitcoin-qt";
-BitcoinApplication::BitcoinApplication(interfaces::Node& node):
+BitcoinApplication::BitcoinApplication():
QApplication(qt_argc, const_cast<char **>(&qt_argv)),
coreThread(nullptr),
- m_node(node),
optionsModel(nullptr),
clientModel(nullptr),
window(nullptr),
@@ -243,12 +248,12 @@ void BitcoinApplication::createPaymentServer()
void BitcoinApplication::createOptionsModel(bool resetSettings)
{
- optionsModel = new OptionsModel(m_node, this, resetSettings);
+ optionsModel = new OptionsModel(this, resetSettings);
}
void BitcoinApplication::createWindow(const NetworkStyle *networkStyle)
{
- window = new BitcoinGUI(m_node, platformStyle, networkStyle, nullptr);
+ window = new BitcoinGUI(node(), platformStyle, networkStyle, nullptr);
pollShutdownTimer = new QTimer(window);
connect(pollShutdownTimer, &QTimer::timeout, window, &BitcoinGUI::detectShutdown);
@@ -256,17 +261,26 @@ void BitcoinApplication::createWindow(const NetworkStyle *networkStyle)
void BitcoinApplication::createSplashScreen(const NetworkStyle *networkStyle)
{
- SplashScreen *splash = new SplashScreen(m_node, nullptr, networkStyle);
+ assert(!m_splash);
+ m_splash = new SplashScreen(nullptr, networkStyle);
// We don't hold a direct pointer to the splash screen after creation, but the splash
// screen will take care of deleting itself when finish() happens.
- splash->show();
- connect(this, &BitcoinApplication::splashFinished, splash, &SplashScreen::finish);
- connect(this, &BitcoinApplication::requestedShutdown, splash, &QWidget::close);
+ m_splash->show();
+ connect(this, &BitcoinApplication::splashFinished, m_splash, &SplashScreen::finish);
+ connect(this, &BitcoinApplication::requestedShutdown, m_splash, &QWidget::close);
+}
+
+void BitcoinApplication::setNode(interfaces::Node& node)
+{
+ assert(!m_node);
+ m_node = &node;
+ if (optionsModel) optionsModel->setNode(*m_node);
+ if (m_splash) m_splash->setNode(*m_node);
}
bool BitcoinApplication::baseInitialize()
{
- return m_node.baseInitialize();
+ return node().baseInitialize();
}
void BitcoinApplication::startThread()
@@ -274,7 +288,7 @@ void BitcoinApplication::startThread()
if(coreThread)
return;
coreThread = new QThread(this);
- BitcoinCore *executor = new BitcoinCore(m_node);
+ BitcoinCore *executor = new BitcoinCore(node());
executor->moveToThread(coreThread);
/* communication to and from thread */
@@ -295,8 +309,8 @@ void BitcoinApplication::parameterSetup()
// print to the console unnecessarily.
gArgs.SoftSetBoolArg("-printtoconsole", false);
- m_node.initLogging();
- m_node.initParameterInteraction();
+ InitLogging(gArgs);
+ InitParameterInteraction(gArgs);
}
void BitcoinApplication::InitializePruneSetting(bool prune)
@@ -328,7 +342,7 @@ void BitcoinApplication::requestShutdown()
window->unsubscribeFromCoreSignals();
// Request node shutdown, which can interrupt long operations, like
// rescanning a wallet.
- m_node.startShutdown();
+ node().startShutdown();
// Unsetting the client model can cause the current thread to wait for node
// to complete an operation, like wait for a RPC execution to complete.
window->setClientModel(nullptr);
@@ -341,7 +355,7 @@ void BitcoinApplication::requestShutdown()
Q_EMIT requestedShutdown();
}
-void BitcoinApplication::initializeResult(bool success)
+void BitcoinApplication::initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info)
{
qDebug() << __func__ << ": Initialization result: " << success;
// Set exit result.
@@ -350,8 +364,8 @@ void BitcoinApplication::initializeResult(bool success)
{
// Log this only after AppInitMain finishes, as then logging setup is guaranteed complete
qInfo() << "Platform customization:" << platformStyle->getName();
- clientModel = new ClientModel(m_node, optionsModel);
- window->setClientModel(clientModel);
+ clientModel = new ClientModel(node(), optionsModel);
+ window->setClientModel(clientModel, &tip_info);
#ifdef ENABLE_WALLET
if (WalletModel::isWalletEnabled()) {
m_wallet_controller = new WalletController(*clientModel, platformStyle, this);
@@ -411,14 +425,14 @@ WId BitcoinApplication::getMainWinId() const
return window->winId();
}
-static void SetupUIArgs()
+static void SetupUIArgs(ArgsManager& argsman)
{
- gArgs.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
- gArgs.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI);
+ argsman.AddArg("-choosedatadir", strprintf("Choose data directory on startup (default: %u)", DEFAULT_CHOOSE_DATADIR), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-lang=<lang>", "Set language, for example \"de_DE\" (default: system locale)", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-min", "Start minimized", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-resetguisettings", "Reset all settings changed in the GUI", ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-splash", strprintf("Show splash screen on startup (default: %u)", DEFAULT_SPLASHSCREEN), ArgsManager::ALLOW_ANY, OptionsCategory::GUI);
+ argsman.AddArg("-uiplatform", strprintf("Select platform to customize UI for (one of windows, macosx, other; default: %s)", BitcoinGUI::DEFAULT_UIPLATFORM), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::GUI);
}
int GuiMain(int argc, char* argv[])
@@ -430,12 +444,13 @@ int GuiMain(int argc, char* argv[])
SetupEnvironment();
util::ThreadSetInternalName("main");
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
// Subscribe to global signals from core
- std::unique_ptr<interfaces::Handler> handler_message_box = node->handleMessageBox(noui_ThreadSafeMessageBox);
- std::unique_ptr<interfaces::Handler> handler_question = node->handleQuestion(noui_ThreadSafeQuestion);
- std::unique_ptr<interfaces::Handler> handler_init_message = node->handleInitMessage(noui_InitMessage);
+ boost::signals2::scoped_connection handler_message_box = ::uiInterface.ThreadSafeMessageBox_connect(noui_ThreadSafeMessageBox);
+ boost::signals2::scoped_connection handler_question = ::uiInterface.ThreadSafeQuestion_connect(noui_ThreadSafeQuestion);
+ boost::signals2::scoped_connection handler_init_message = ::uiInterface.InitMessage_connect(noui_InitMessage);
// Do not refer to data directory yet, this can be overridden by Intro::pickDataDirectory
@@ -449,15 +464,15 @@ int GuiMain(int argc, char* argv[])
QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
#endif
- BitcoinApplication app(*node);
+ BitcoinApplication app;
/// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these
// Command-line options take precedence:
- node->setupServerArgs();
- SetupUIArgs();
+ SetupServerArgs(node_context);
+ SetupUIArgs(gArgs);
std::string error;
- if (!node->parseParameters(argc, argv, error)) {
- node->initError(strprintf(Untranslated("Error parsing command line arguments: %s\n"), error));
+ if (!gArgs.ParseParameters(argc, argv, error)) {
+ InitError(strprintf(Untranslated("Error parsing command line arguments: %s\n"), error));
// Create a message box, because the gui has neither been created nor has subscribed to core signals
QMessageBox::critical(nullptr, PACKAGE_NAME,
// message can not be translated because translations have not been initialized
@@ -483,7 +498,7 @@ int GuiMain(int argc, char* argv[])
// Show help message immediately after parsing command-line options (for "-lang") and setting locale,
// but before showing splash screen.
if (HelpRequested(gArgs) || gArgs.IsArgSet("-version")) {
- HelpMessageDialog help(*node, nullptr, gArgs.IsArgSet("-version"));
+ HelpMessageDialog help(nullptr, gArgs.IsArgSet("-version"));
help.showOrPrint();
return EXIT_SUCCESS;
}
@@ -493,18 +508,18 @@ int GuiMain(int argc, char* argv[])
bool did_show_intro = false;
bool prune = false; // Intro dialog prune check box
// Gracefully exit if the user cancels
- if (!Intro::showIfNeeded(*node, did_show_intro, prune)) return EXIT_SUCCESS;
+ if (!Intro::showIfNeeded(did_show_intro, prune)) return EXIT_SUCCESS;
/// 6. Determine availability of data directory and parse bitcoin.conf
/// - Do not call GetDataDir(true) before this step finishes
if (!CheckDataDirOption()) {
- node->initError(strprintf(Untranslated("Specified data directory \"%s\" does not exist.\n"), gArgs.GetArg("-datadir", "")));
+ InitError(strprintf(Untranslated("Specified data directory \"%s\" does not exist.\n"), gArgs.GetArg("-datadir", "")));
QMessageBox::critical(nullptr, PACKAGE_NAME,
QObject::tr("Error: Specified data directory \"%1\" does not exist.").arg(QString::fromStdString(gArgs.GetArg("-datadir", ""))));
return EXIT_FAILURE;
}
- if (!node->readConfigFiles(error)) {
- node->initError(strprintf(Untranslated("Error reading configuration file: %s\n"), error));
+ if (!gArgs.ReadConfigFiles(error, true)) {
+ InitError(strprintf(Untranslated("Error reading configuration file: %s\n"), error));
QMessageBox::critical(nullptr, PACKAGE_NAME,
QObject::tr("Error: Cannot parse configuration file: %1.").arg(QString::fromStdString(error)));
return EXIT_FAILURE;
@@ -518,18 +533,18 @@ int GuiMain(int argc, char* argv[])
// Check for -chain, -testnet or -regtest parameter (Params() calls are only valid after this clause)
try {
- node->selectParams(gArgs.GetChainName());
+ SelectParams(gArgs.GetChainName());
} catch(std::exception &e) {
- node->initError(Untranslated(strprintf("%s\n", e.what())));
+ InitError(Untranslated(strprintf("%s\n", e.what())));
QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error: %1").arg(e.what()));
return EXIT_FAILURE;
}
#ifdef ENABLE_WALLET
// Parse URIs on command line -- this can affect Params()
- PaymentServer::ipcParseCommandLine(*node, argc, argv);
+ PaymentServer::ipcParseCommandLine(argc, argv);
#endif
- if (!node->initSettings(error)) {
- node->initError(Untranslated(error));
+ if (!gArgs.InitSettings(error)) {
+ InitError(Untranslated(error));
QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error initializing settings: %1").arg(QString::fromStdString(error)));
return EXIT_FAILURE;
}
@@ -583,6 +598,8 @@ int GuiMain(int argc, char* argv[])
if (gArgs.GetBoolArg("-splash", DEFAULT_SPLASHSCREEN) && !gArgs.GetBoolArg("-min", false))
app.createSplashScreen(networkStyle.data());
+ app.setNode(*node);
+
int rv = EXIT_SUCCESS;
try
{
@@ -605,10 +622,10 @@ int GuiMain(int argc, char* argv[])
}
} catch (const std::exception& e) {
PrintExceptionContinue(&e, "Runaway exception");
- app.handleRunawayException(QString::fromStdString(node->getWarnings().translated));
+ app.handleRunawayException(QString::fromStdString(app.node().getWarnings().translated));
} catch (...) {
PrintExceptionContinue(nullptr, "Runaway exception");
- app.handleRunawayException(QString::fromStdString(node->getWarnings().translated));
+ app.handleRunawayException(QString::fromStdString(app.node().getWarnings().translated));
}
return rv;
}
diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h
index 077a37fde5..69e0a5921e 100644
--- a/src/qt/bitcoin.h
+++ b/src/qt/bitcoin.h
@@ -10,21 +10,21 @@
#endif
#include <QApplication>
+#include <assert.h>
#include <memory>
+#include <interfaces/node.h>
+
class BitcoinGUI;
class ClientModel;
class NetworkStyle;
class OptionsModel;
class PaymentServer;
class PlatformStyle;
+class SplashScreen;
class WalletController;
class WalletModel;
-namespace interfaces {
-class Handler;
-class Node;
-} // namespace interfaces
/** Class encapsulating Bitcoin Core startup and shutdown.
* Allows running startup and shutdown in a different thread from the UI thread.
@@ -40,7 +40,7 @@ public Q_SLOTS:
void shutdown();
Q_SIGNALS:
- void initializeResult(bool success);
+ void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info);
void shutdownResult();
void runawayException(const QString &message);
@@ -56,7 +56,7 @@ class BitcoinApplication: public QApplication
{
Q_OBJECT
public:
- explicit BitcoinApplication(interfaces::Node& node);
+ explicit BitcoinApplication();
~BitcoinApplication();
#ifdef ENABLE_WALLET
@@ -90,8 +90,11 @@ public:
/// Setup platform style
void setupPlatformStyle();
+ interfaces::Node& node() const { assert(m_node); return *m_node; }
+ void setNode(interfaces::Node& node);
+
public Q_SLOTS:
- void initializeResult(bool success);
+ void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info);
void shutdownResult();
/// Handle runaway exceptions. Shows a message box with the problem and quits the program.
void handleRunawayException(const QString &message);
@@ -104,7 +107,6 @@ Q_SIGNALS:
private:
QThread *coreThread;
- interfaces::Node& m_node;
OptionsModel *optionsModel;
ClientModel *clientModel;
BitcoinGUI *window;
@@ -116,6 +118,8 @@ private:
int returnValue;
const PlatformStyle *platformStyle;
std::unique_ptr<QWidget> shutdownWindow;
+ SplashScreen* m_splash = nullptr;
+ interfaces::Node* m_node = nullptr;
void startThread();
};
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index ebcc04a5eb..8935ff19bf 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -95,7 +95,7 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty
updateWindowTitle();
rpcConsole = new RPCConsole(node, _platformStyle, nullptr);
- helpMessageDialog = new HelpMessageDialog(node, this, false);
+ helpMessageDialog = new HelpMessageDialog(this, false);
#ifdef ENABLE_WALLET
if(enableWallet)
{
@@ -574,7 +574,7 @@ void BitcoinGUI::createToolBars()
}
}
-void BitcoinGUI::setClientModel(ClientModel *_clientModel)
+void BitcoinGUI::setClientModel(ClientModel *_clientModel, interfaces::BlockAndHeaderTipInfo* tip_info)
{
this->clientModel = _clientModel;
if(_clientModel)
@@ -588,8 +588,8 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel)
connect(_clientModel, &ClientModel::numConnectionsChanged, this, &BitcoinGUI::setNumConnections);
connect(_clientModel, &ClientModel::networkActiveChanged, this, &BitcoinGUI::setNetworkActive);
- modalOverlay->setKnownBestHeight(_clientModel->getHeaderTipHeight(), QDateTime::fromTime_t(_clientModel->getHeaderTipTime()));
- setNumBlocks(m_node.getNumBlocks(), QDateTime::fromTime_t(m_node.getLastBlockTime()), m_node.getVerificationProgress(), false, SynchronizationState::INIT_DOWNLOAD);
+ modalOverlay->setKnownBestHeight(tip_info->header_height, QDateTime::fromTime_t(tip_info->header_time));
+ setNumBlocks(tip_info->block_height, QDateTime::fromTime_t(tip_info->block_time), tip_info->verification_progress, false, SynchronizationState::INIT_DOWNLOAD);
connect(_clientModel, &ClientModel::numBlocksChanged, this, &BitcoinGUI::setNumBlocks);
// Receive and report messages from client model
@@ -600,7 +600,7 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel)
// Show progress dialog
connect(_clientModel, &ClientModel::showProgress, this, &BitcoinGUI::showProgress);
- rpcConsole->setClientModel(_clientModel);
+ rpcConsole->setClientModel(_clientModel, tip_info->block_height, tip_info->block_time, tip_info->verification_progress);
updateProxyIcon();
@@ -821,7 +821,7 @@ void BitcoinGUI::aboutClicked()
if(!clientModel)
return;
- HelpMessageDialog dlg(m_node, this, true);
+ HelpMessageDialog dlg(this, true);
dlg.exec();
}
diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h
index 697e83e772..4c55f28693 100644
--- a/src/qt/bitcoingui.h
+++ b/src/qt/bitcoingui.h
@@ -43,6 +43,7 @@ enum class SynchronizationState;
namespace interfaces {
class Handler;
class Node;
+struct BlockAndHeaderTipInfo;
}
QT_BEGIN_NAMESPACE
@@ -75,7 +76,7 @@ public:
/** Set the client model.
The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic.
*/
- void setClientModel(ClientModel *clientModel);
+ void setClientModel(ClientModel *clientModel = nullptr, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr);
#ifdef ENABLE_WALLET
void setWalletController(WalletController* wallet_controller);
#endif
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index fea759dee0..0016fb9739 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -459,10 +459,10 @@
<item>
<widget class="QCheckBox" name="connectSocksTor">
<property name="toolTip">
- <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor hidden services.</string>
+ <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</string>
</property>
<property name="text">
- <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</string>
+ <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</string>
</property>
</widget>
</item>
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 7f439fa45e..bab17562a6 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -21,11 +21,6 @@
#include <util/system.h>
#ifdef WIN32
-#ifdef _WIN32_IE
-#undef _WIN32_IE
-#endif
-#define _WIN32_IE 0x0501
-#define WIN32_LEAN_AND_MEAN 1
#ifndef NOMINMAX
#define NOMINMAX
#endif
@@ -94,7 +89,7 @@ static std::string DummyAddress(const CChainParams &params)
std::vector<unsigned char> sourcedata = params.Base58Prefix(CChainParams::PUBKEY_ADDRESS);
sourcedata.insert(sourcedata.end(), dummydata, dummydata + sizeof(dummydata));
for(int i=0; i<256; ++i) { // Try every trailing byte
- std::string s = EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size());
+ std::string s = EncodeBase58(sourcedata);
if (!IsValidDestinationString(s)) {
return s;
}
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index ad21dfc3ef..235722d091 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -6,6 +6,7 @@
#include <config/bitcoin-config.h>
#endif
+#include <chainparams.h>
#include <fs.h>
#include <qt/intro.h>
#include <qt/forms/ui_intro.h>
@@ -181,7 +182,7 @@ void Intro::setDataDirectory(const QString &dataDir)
}
}
-bool Intro::showIfNeeded(interfaces::Node& node, bool& did_show_intro, bool& prune)
+bool Intro::showIfNeeded(bool& did_show_intro, bool& prune)
{
did_show_intro = false;
@@ -199,13 +200,13 @@ bool Intro::showIfNeeded(interfaces::Node& node, bool& did_show_intro, bool& pru
{
/* Use selectParams here to guarantee Params() can be used by node interface */
try {
- node.selectParams(gArgs.GetChainName());
+ SelectParams(gArgs.GetChainName());
} catch (const std::exception&) {
return false;
}
/* If current default data directory does not exist, let the user choose one */
- Intro intro(0, node.getAssumedBlockchainSize(), node.getAssumedChainStateSize());
+ Intro intro(0, Params().AssumedBlockchainSize(), Params().AssumedChainStateSize());
intro.setDataDirectory(dataDir);
intro.setWindowIcon(QIcon(":icons/bitcoin"));
did_show_intro = true;
@@ -242,7 +243,7 @@ bool Intro::showIfNeeded(interfaces::Node& node, bool& did_show_intro, bool& pru
* (to be consistent with bitcoind behavior)
*/
if(dataDir != GUIUtil::getDefaultDataDirectory()) {
- node.softSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); // use OS locale for path setting
+ gArgs.SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); // use OS locale for path setting
}
return true;
}
diff --git a/src/qt/intro.h b/src/qt/intro.h
index 732393246e..51f42de7ac 100644
--- a/src/qt/intro.h
+++ b/src/qt/intro.h
@@ -47,7 +47,7 @@ public:
* @note do NOT call global GetDataDir() before calling this function, this
* will cause the wrong path to be cached.
*/
- static bool showIfNeeded(interfaces::Node& node, bool& did_show_intro, bool& prune);
+ static bool showIfNeeded(bool& did_show_intro, bool& prune);
Q_SIGNALS:
void requestCheck();
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index 58a7591c95..7e089b4f95 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -27,8 +27,8 @@ const char *DEFAULT_GUI_PROXY_HOST = "127.0.0.1";
static const QString GetDefaultProxyAddress();
-OptionsModel::OptionsModel(interfaces::Node& node, QObject *parent, bool resetSettings) :
- QAbstractListModel(parent), m_node(node)
+OptionsModel::OptionsModel(QObject *parent, bool resetSettings) :
+ QAbstractListModel(parent)
{
Init(resetSettings);
}
@@ -97,12 +97,12 @@ void OptionsModel::Init(bool resetSettings)
if (!settings.contains("nDatabaseCache"))
settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache);
- if (!m_node.softSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString()))
+ if (!gArgs.SoftSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString()))
addOverriddenOption("-dbcache");
if (!settings.contains("nThreadsScriptVerif"))
settings.setValue("nThreadsScriptVerif", DEFAULT_SCRIPTCHECK_THREADS);
- if (!m_node.softSetArg("-par", settings.value("nThreadsScriptVerif").toString().toStdString()))
+ if (!gArgs.SoftSetArg("-par", settings.value("nThreadsScriptVerif").toString().toStdString()))
addOverriddenOption("-par");
if (!settings.contains("strDataDir"))
@@ -112,19 +112,19 @@ void OptionsModel::Init(bool resetSettings)
#ifdef ENABLE_WALLET
if (!settings.contains("bSpendZeroConfChange"))
settings.setValue("bSpendZeroConfChange", true);
- if (!m_node.softSetBoolArg("-spendzeroconfchange", settings.value("bSpendZeroConfChange").toBool()))
+ if (!gArgs.SoftSetBoolArg("-spendzeroconfchange", settings.value("bSpendZeroConfChange").toBool()))
addOverriddenOption("-spendzeroconfchange");
#endif
// Network
if (!settings.contains("fUseUPnP"))
settings.setValue("fUseUPnP", DEFAULT_UPNP);
- if (!m_node.softSetBoolArg("-upnp", settings.value("fUseUPnP").toBool()))
+ if (!gArgs.SoftSetBoolArg("-upnp", settings.value("fUseUPnP").toBool()))
addOverriddenOption("-upnp");
if (!settings.contains("fListen"))
settings.setValue("fListen", DEFAULT_LISTEN);
- if (!m_node.softSetBoolArg("-listen", settings.value("fListen").toBool()))
+ if (!gArgs.SoftSetBoolArg("-listen", settings.value("fListen").toBool()))
addOverriddenOption("-listen");
if (!settings.contains("fUseProxy"))
@@ -132,7 +132,7 @@ void OptionsModel::Init(bool resetSettings)
if (!settings.contains("addrProxy"))
settings.setValue("addrProxy", GetDefaultProxyAddress());
// Only try to set -proxy, if user has enabled fUseProxy
- if (settings.value("fUseProxy").toBool() && !m_node.softSetArg("-proxy", settings.value("addrProxy").toString().toStdString()))
+ if ((settings.value("fUseProxy").toBool() && !gArgs.SoftSetArg("-proxy", settings.value("addrProxy").toString().toStdString())))
addOverriddenOption("-proxy");
else if(!settings.value("fUseProxy").toBool() && !gArgs.GetArg("-proxy", "").empty())
addOverriddenOption("-proxy");
@@ -142,7 +142,7 @@ void OptionsModel::Init(bool resetSettings)
if (!settings.contains("addrSeparateProxyTor"))
settings.setValue("addrSeparateProxyTor", GetDefaultProxyAddress());
// Only try to set -onion, if user has enabled fUseSeparateProxyTor
- if (settings.value("fUseSeparateProxyTor").toBool() && !m_node.softSetArg("-onion", settings.value("addrSeparateProxyTor").toString().toStdString()))
+ if ((settings.value("fUseSeparateProxyTor").toBool() && !gArgs.SoftSetArg("-onion", settings.value("addrSeparateProxyTor").toString().toStdString())))
addOverriddenOption("-onion");
else if(!settings.value("fUseSeparateProxyTor").toBool() && !gArgs.GetArg("-onion", "").empty())
addOverriddenOption("-onion");
@@ -150,7 +150,7 @@ void OptionsModel::Init(bool resetSettings)
// Display
if (!settings.contains("language"))
settings.setValue("language", "");
- if (!m_node.softSetArg("-lang", settings.value("language").toString().toStdString()))
+ if (!gArgs.SoftSetArg("-lang", settings.value("language").toString().toStdString()))
addOverriddenOption("-lang");
language = settings.value("language").toString();
@@ -244,10 +244,10 @@ void OptionsModel::SetPruneEnabled(bool prune, bool force)
const int64_t prune_target_mib = PruneGBtoMiB(settings.value("nPruneSize").toInt());
std::string prune_val = prune ? ToString(prune_target_mib) : "0";
if (force) {
- m_node.forceSetArg("-prune", prune_val);
+ gArgs.ForceSetArg("-prune", prune_val);
return;
}
- if (!m_node.softSetArg("-prune", prune_val)) {
+ if (!gArgs.SoftSetArg("-prune", prune_val)) {
addOverriddenOption("-prune");
}
}
@@ -353,7 +353,7 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
break;
case MapPortUPnP: // core option - can be changed on-the-fly
settings.setValue("fUseUPnP", value.toBool());
- m_node.mapPort(value.toBool());
+ node().mapPort(value.toBool());
break;
case MinimizeOnClose:
fMinimizeOnClose = value.toBool();
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index 14fdf9046e..3d9e7bbb80 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -11,6 +11,8 @@
#include <QAbstractListModel>
+#include <assert.h>
+
namespace interfaces {
class Node;
}
@@ -39,7 +41,7 @@ class OptionsModel : public QAbstractListModel
Q_OBJECT
public:
- explicit OptionsModel(interfaces::Node& node, QObject *parent = nullptr, bool resetSettings = false);
+ explicit OptionsModel(QObject *parent = nullptr, bool resetSettings = false);
enum OptionID {
StartAtStartup, // bool
@@ -92,10 +94,11 @@ public:
void setRestartRequired(bool fRequired);
bool isRestartRequired() const;
- interfaces::Node& node() const { return m_node; }
+ interfaces::Node& node() const { assert(m_node); return *m_node; }
+ void setNode(interfaces::Node& node) { assert(!m_node); m_node = &node; }
private:
- interfaces::Node& m_node;
+ interfaces::Node* m_node = nullptr;
/* Qt-only settings */
bool fHideTrayIcon;
bool fMinimizeToTray;
diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp
index a1da85bda7..8679ced685 100644
--- a/src/qt/paymentserver.cpp
+++ b/src/qt/paymentserver.cpp
@@ -74,7 +74,7 @@ static QSet<QString> savedPaymentRequests;
// Warning: ipcSendCommandLine() is called early in init,
// so don't use "Q_EMIT message()", but "QMessageBox::"!
//
-void PaymentServer::ipcParseCommandLine(interfaces::Node& node, int argc, char* argv[])
+void PaymentServer::ipcParseCommandLine(int argc, char* argv[])
{
for (int i = 1; i < argc; i++)
{
@@ -97,11 +97,11 @@ void PaymentServer::ipcParseCommandLine(interfaces::Node& node, int argc, char*
auto tempChainParams = CreateChainParams(CBaseChainParams::MAIN);
if (IsValidDestinationString(r.address.toStdString(), *tempChainParams)) {
- node.selectParams(CBaseChainParams::MAIN);
+ SelectParams(CBaseChainParams::MAIN);
} else {
tempChainParams = CreateChainParams(CBaseChainParams::TESTNET);
if (IsValidDestinationString(r.address.toStdString(), *tempChainParams)) {
- node.selectParams(CBaseChainParams::TESTNET);
+ SelectParams(CBaseChainParams::TESTNET);
}
}
}
diff --git a/src/qt/paymentserver.h b/src/qt/paymentserver.h
index 154f4a7ea6..eaf2bafe59 100644
--- a/src/qt/paymentserver.h
+++ b/src/qt/paymentserver.h
@@ -61,7 +61,7 @@ class PaymentServer : public QObject
public:
// Parse URIs on command line
// Returns false on error
- static void ipcParseCommandLine(interfaces::Node& node, int argc, char *argv[]);
+ static void ipcParseCommandLine(int argc, char *argv[]);
// Returns true if there were URIs on the command line
// which were successfully sent to an already-running
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 821a337a62..a14fae6460 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -556,7 +556,7 @@ bool RPCConsole::eventFilter(QObject* obj, QEvent *event)
return QWidget::eventFilter(obj, event);
}
-void RPCConsole::setClientModel(ClientModel *model)
+void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_t bestblock_date, double verification_progress)
{
clientModel = model;
@@ -576,13 +576,13 @@ void RPCConsole::setClientModel(ClientModel *model)
setNumConnections(model->getNumConnections());
connect(model, &ClientModel::numConnectionsChanged, this, &RPCConsole::setNumConnections);
- interfaces::Node& node = clientModel->node();
- setNumBlocks(node.getNumBlocks(), QDateTime::fromTime_t(node.getLastBlockTime()), node.getVerificationProgress(), false);
+ setNumBlocks(bestblock_height, QDateTime::fromTime_t(bestblock_date), verification_progress, false);
connect(model, &ClientModel::numBlocksChanged, this, &RPCConsole::setNumBlocks);
updateNetworkState();
connect(model, &ClientModel::networkActiveChanged, this, &RPCConsole::setNetworkActive);
+ interfaces::Node& node = clientModel->node();
updateTrafficStats(node.getTotalBytesRecv(), node.getTotalBytesSent());
connect(model, &ClientModel::bytesChanged, this, &RPCConsole::updateTrafficStats);
diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h
index de8e37cca2..280c5bd71a 100644
--- a/src/qt/rpcconsole.h
+++ b/src/qt/rpcconsole.h
@@ -46,7 +46,7 @@ public:
return RPCParseCommandLine(&node, strResult, strCommand, true, pstrFilteredOut, wallet_model);
}
- void setClientModel(ClientModel *model);
+ void setClientModel(ClientModel *model = nullptr, int bestblock_height = 0, int64_t bestblock_date = 0, double verification_progress = 0.0);
void addWallet(WalletModel * const walletModel);
void removeWallet(WalletModel* const walletModel);
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index 6e6b2b8466..bd63d6e7fb 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -24,8 +24,8 @@
#include <QScreen>
-SplashScreen::SplashScreen(interfaces::Node& node, Qt::WindowFlags f, const NetworkStyle *networkStyle) :
- QWidget(nullptr, f), curAlignment(0), m_node(node)
+SplashScreen::SplashScreen(Qt::WindowFlags f, const NetworkStyle *networkStyle) :
+ QWidget(nullptr, f), curAlignment(0)
{
// set reference point, paddings
int paddingRight = 50;
@@ -124,7 +124,6 @@ SplashScreen::SplashScreen(interfaces::Node& node, Qt::WindowFlags f, const Netw
setFixedSize(r.size());
move(QGuiApplication::primaryScreen()->geometry().center() - r.center());
- subscribeToCoreSignals();
installEventFilter(this);
GUIUtil::handleCloseWindowShortcut(this);
@@ -132,14 +131,28 @@ SplashScreen::SplashScreen(interfaces::Node& node, Qt::WindowFlags f, const Netw
SplashScreen::~SplashScreen()
{
- unsubscribeFromCoreSignals();
+ if (m_node) unsubscribeFromCoreSignals();
+}
+
+void SplashScreen::setNode(interfaces::Node& node)
+{
+ assert(!m_node);
+ m_node = &node;
+ subscribeToCoreSignals();
+ if (m_shutdown) m_node->startShutdown();
+}
+
+void SplashScreen::shutdown()
+{
+ m_shutdown = true;
+ if (m_node) m_node->startShutdown();
}
bool SplashScreen::eventFilter(QObject * obj, QEvent * ev) {
if (ev->type() == QEvent::KeyPress) {
QKeyEvent *keyEvent = static_cast<QKeyEvent *>(ev);
if (keyEvent->key() == Qt::Key_Q) {
- m_node.startShutdown();
+ shutdown();
}
}
return QObject::eventFilter(obj, ev);
@@ -183,10 +196,10 @@ void SplashScreen::ConnectWallet(std::unique_ptr<interfaces::Wallet> wallet)
void SplashScreen::subscribeToCoreSignals()
{
// Connect signals to client
- m_handler_init_message = m_node.handleInitMessage(std::bind(InitMessage, this, std::placeholders::_1));
- m_handler_show_progress = m_node.handleShowProgress(std::bind(ShowProgress, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+ m_handler_init_message = m_node->handleInitMessage(std::bind(InitMessage, this, std::placeholders::_1));
+ m_handler_show_progress = m_node->handleShowProgress(std::bind(ShowProgress, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
#ifdef ENABLE_WALLET
- m_handler_load_wallet = m_node.handleLoadWallet([this](std::unique_ptr<interfaces::Wallet> wallet) { ConnectWallet(std::move(wallet)); });
+ m_handler_load_wallet = m_node->handleLoadWallet([this](std::unique_ptr<interfaces::Wallet> wallet) { ConnectWallet(std::move(wallet)); });
#endif
}
@@ -221,6 +234,6 @@ void SplashScreen::paintEvent(QPaintEvent *event)
void SplashScreen::closeEvent(QCloseEvent *event)
{
- m_node.startShutdown(); // allows an "emergency" shutdown during startup
+ shutdown(); // allows an "emergency" shutdown during startup
event->ignore();
}
diff --git a/src/qt/splashscreen.h b/src/qt/splashscreen.h
index 3158524117..2213b02c55 100644
--- a/src/qt/splashscreen.h
+++ b/src/qt/splashscreen.h
@@ -28,8 +28,9 @@ class SplashScreen : public QWidget
Q_OBJECT
public:
- explicit SplashScreen(interfaces::Node& node, Qt::WindowFlags f, const NetworkStyle *networkStyle);
+ explicit SplashScreen(Qt::WindowFlags f, const NetworkStyle *networkStyle);
~SplashScreen();
+ void setNode(interfaces::Node& node);
protected:
void paintEvent(QPaintEvent *event) override;
@@ -50,6 +51,8 @@ private:
void subscribeToCoreSignals();
/** Disconnect core signals to splash screen */
void unsubscribeFromCoreSignals();
+ /** Initiate shutdown */
+ void shutdown();
/** Connect wallet signals to splash screen */
void ConnectWallet(std::unique_ptr<interfaces::Wallet> wallet);
@@ -58,7 +61,8 @@ private:
QColor curColor;
int curAlignment;
- interfaces::Node& m_node;
+ interfaces::Node* m_node = nullptr;
+ bool m_shutdown = false;
std::unique_ptr<interfaces::Handler> m_handler_init_message;
std::unique_ptr<interfaces::Handler> m_handler_show_progress;
std::unique_ptr<interfaces::Handler> m_handler_load_wallet;
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 9347ff9e42..84f981dff3 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -18,6 +18,7 @@
#include <key.h>
#include <key_io.h>
#include <wallet/wallet.h>
+#include <walletinitinterface.h>
#include <QApplication>
#include <QTimer>
@@ -59,6 +60,7 @@ void EditAddressAndSubmit(
void TestAddAddressesToSendBook(interfaces::Node& node)
{
TestChain100Setup test;
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
bool firstRun;
@@ -106,7 +108,7 @@ void TestAddAddressesToSendBook(interfaces::Node& node)
// Initialize relevant QT models.
std::unique_ptr<const PlatformStyle> platformStyle(PlatformStyle::instantiate("other"));
- OptionsModel optionsModel(node);
+ OptionsModel optionsModel;
ClientModel clientModel(node, &optionsModel);
AddWallet(wallet);
WalletModel walletModel(interfaces::MakeWallet(wallet), clientModel, platformStyle.get());
diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp
index 443e2d047d..0b5c341548 100644
--- a/src/qt/test/apptests.cpp
+++ b/src/qt/test/apptests.cpp
@@ -67,6 +67,7 @@ void AppTests::appTests()
return GetDataDir() / "blocks";
}());
+ qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo");
m_app.parameterSetup();
m_app.createOptionsModel(true /* reset settings */);
QScopedPointer<const NetworkStyle> style(NetworkStyle::instantiate(Params().NetworkIDString()));
@@ -83,7 +84,7 @@ void AppTests::appTests()
// Reset global state to avoid interfering with later tests.
LogInstance().DisconnectTestLogger();
AbortShutdown();
- UnloadBlockIndex();
+ UnloadBlockIndex(/* mempool */ nullptr);
WITH_LOCK(::cs_main, g_chainman.Reset());
}
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index 12efca2503..86356b43c8 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -52,7 +52,8 @@ int main(int argc, char* argv[])
BasicTestingSetup dummy{CBaseChainParams::REGTEST};
}
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
bool fInvalid = false;
@@ -67,11 +68,11 @@ int main(int argc, char* argv[])
// Don't remove this, it's needed to access
// QApplication:: and QCoreApplication:: in the tests
- BitcoinApplication app(*node);
+ BitcoinApplication app;
+ app.setNode(*node);
app.setApplicationName("Bitcoin-Qt-test");
- node->setupServerArgs(); // Make gArgs available in the NodeContext
- node->context()->args->ClearArgs(); // Clear added args again
+ app.node().context()->args = &gArgs; // Make gArgs available in the NodeContext
AppTests app_tests(app);
if (QTest::qExec(&app_tests) != 0) {
fInvalid = true;
@@ -80,7 +81,7 @@ int main(int argc, char* argv[])
if (QTest::qExec(&test1) != 0) {
fInvalid = true;
}
- RPCNestedTests test3(*node);
+ RPCNestedTests test3(app.node());
if (QTest::qExec(&test3) != 0) {
fInvalid = true;
}
@@ -89,11 +90,11 @@ int main(int argc, char* argv[])
fInvalid = true;
}
#ifdef ENABLE_WALLET
- WalletTests test5(*node);
+ WalletTests test5(app.node());
if (QTest::qExec(&test5) != 0) {
fInvalid = true;
}
- AddressBookTests test6(*node);
+ AddressBookTests test6(app.node());
if (QTest::qExec(&test6) != 0) {
fInvalid = true;
}
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 6648029bae..adcfe0d25c 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -138,8 +138,7 @@ void TestGUI(interfaces::Node& node)
for (int i = 0; i < 5; ++i) {
test.CreateAndProcessBlock({}, GetScriptForRawPubKey(test.coinbaseKey.GetPubKey()));
}
- node.context()->connman = std::move(test.m_node.connman);
- node.context()->mempool = std::move(test.m_node.mempool);
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
bool firstRun;
wallet->LoadWallet(firstRun);
@@ -164,7 +163,7 @@ void TestGUI(interfaces::Node& node)
std::unique_ptr<const PlatformStyle> platformStyle(PlatformStyle::instantiate("other"));
SendCoinsDialog sendCoinsDialog(platformStyle.get());
TransactionView transactionView(platformStyle.get());
- OptionsModel optionsModel(node);
+ OptionsModel optionsModel;
ClientModel clientModel(node, &optionsModel);
AddWallet(wallet);
WalletModel walletModel(interfaces::MakeWallet(wallet), clientModel, platformStyle.get());
diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp
index 01922cf996..b7f85446f4 100644
--- a/src/qt/utilitydialog.cpp
+++ b/src/qt/utilitydialog.cpp
@@ -28,7 +28,7 @@
#include <QVBoxLayout>
/** "Help message" or "About" dialog box */
-HelpMessageDialog::HelpMessageDialog(interfaces::Node& node, QWidget *parent, bool about) :
+HelpMessageDialog::HelpMessageDialog(QWidget *parent, bool about) :
QDialog(parent),
ui(new Ui::HelpMessageDialog)
{
diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h
index 425b468f40..d2a5d5f67f 100644
--- a/src/qt/utilitydialog.h
+++ b/src/qt/utilitydialog.h
@@ -12,10 +12,6 @@ QT_BEGIN_NAMESPACE
class QMainWindow;
QT_END_NAMESPACE
-namespace interfaces {
- class Node;
-}
-
namespace Ui {
class HelpMessageDialog;
}
@@ -26,7 +22,7 @@ class HelpMessageDialog : public QDialog
Q_OBJECT
public:
- explicit HelpMessageDialog(interfaces::Node& node, QWidget *parent, bool about);
+ explicit HelpMessageDialog(QWidget *parent, bool about);
~HelpMessageDialog();
void printToConsole();
diff --git a/src/random.cpp b/src/random.cpp
index 9c9a35709a..af9504e0ce 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -315,12 +315,16 @@ void GetOSRand(unsigned char *ent32)
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
RandFailure();
}
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
/* getentropy() is available on macOS 10.12 and later.
*/
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
RandFailure();
}
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#elif defined(HAVE_SYSCTL_ARND)
/* FreeBSD, NetBSD and similar. It is possible for the call to return less
* bytes than requested, so need to read in a loop.
@@ -334,6 +338,8 @@ void GetOSRand(unsigned char *ent32)
}
have += len;
} while (have < NUM_OS_RANDOM_BYTES);
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#else
/* Fall back to /dev/urandom if there is no specific method implemented to
* get system entropy for this OS.
diff --git a/src/rest.cpp b/src/rest.cpp
index 8cb594a03b..7130625d5c 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -68,13 +68,32 @@ static bool RESTERR(HTTPRequest* req, enum HTTPStatusCode status, std::string me
}
/**
- * Get the node context mempool.
+ * Get the node context.
*
- * Set the HTTP error and return nullptr if node context
- * mempool is not found.
+ * @param[in] req The HTTP request, whose status code will be set if node
+ * context is not found.
+ * @returns Pointer to the node context or nullptr if not found.
+ */
+static NodeContext* GetNodeContext(const util::Ref& context, HTTPRequest* req)
+{
+ NodeContext* node = context.Has<NodeContext>() ? &context.Get<NodeContext>() : nullptr;
+ if (!node) {
+ RESTERR(req, HTTP_INTERNAL_SERVER_ERROR,
+ strprintf("%s:%d (%s)\n"
+ "Internal bug detected: Node context not found!\n"
+ "You may report this issue here: %s\n",
+ __FILE__, __LINE__, __func__, PACKAGE_BUGREPORT));
+ return nullptr;
+ }
+ return node;
+}
+
+/**
+ * Get the node context mempool.
*
- * @param[in] req the HTTP request
- * return pointer to the mempool or nullptr if no mempool found
+ * @param[in] req The HTTP request, whose status code will be set if node
+ * context mempool is not found.
+ * @returns Pointer to the mempool or nullptr if no mempool found.
*/
static CTxMemPool* GetMemPool(const util::Ref& context, HTTPRequest* req)
{
@@ -371,10 +390,13 @@ static bool rest_tx(const util::Ref& context, HTTPRequest* req, const std::strin
g_txindex->BlockUntilSyncedToCurrentChain();
}
- CTransactionRef tx;
+ const NodeContext* const node = GetNodeContext(context, req);
+ if (!node) return false;
uint256 hashBlock = uint256();
- if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock))
+ const CTransactionRef tx = GetTransaction(/* block_index */ nullptr, node->mempool, hash, Params().GetConsensus(), hashBlock);
+ if (!tx) {
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
+ }
switch (rf) {
case RetFormat::BINARY: {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 2afc9a3d4a..033e00daf5 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -525,9 +525,9 @@ static UniValue getrawmempool(const JSONRPCRequest& request)
{RPCResult::Type::STR_HEX, "", "The transaction id"},
}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ, "", "",
+ RPCResult::Type::OBJ_DYN, "", "",
{
- {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
}},
},
RPCExamples{
@@ -556,7 +556,10 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
RPCResult::Type::ARR, "", "",
{{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ RPCResult::Type::OBJ_DYN, "", "",
+ {
+ {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
+ }},
},
RPCExamples{
HelpExampleCli("getmempoolancestors", "\"mytxid\"")
@@ -588,7 +591,6 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
for (CTxMemPool::txiter ancestorIt : setAncestors) {
o.push_back(ancestorIt->GetTx().GetHash().ToString());
}
-
return o;
} else {
UniValue o(UniValue::VOBJ);
@@ -616,9 +618,9 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request)
RPCResult::Type::ARR, "", "",
{{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}},
RPCResult{"for verbose = true",
- RPCResult::Type::OBJ, "", "",
+ RPCResult::Type::OBJ_DYN, "", "",
{
- {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()},
}},
},
RPCExamples{
@@ -674,7 +676,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request)
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"},
},
RPCResult{
- RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()},
+ RPCResult::Type::OBJ, "", "", MempoolEntryDescription()},
RPCExamples{
HelpExampleCli("getmempoolentry", "\"mytxid\"")
+ HelpExampleRpc("getmempoolentry", "\"mytxid\"")
@@ -1739,7 +1741,7 @@ static UniValue getblockstats(const JSONRPCRequest& request)
{RPCResult::Type::NUM, "total_size", "Total size of all non-coinbase transactions"},
{RPCResult::Type::NUM, "total_weight", "Total weight of all non-coinbase transactions divided by segwit scale factor (4)"},
{RPCResult::Type::NUM, "totalfee", "The fee total"},
- {RPCResult::Type::NUM, "txs", "The number of transactions (excluding coinbase)"},
+ {RPCResult::Type::NUM, "txs", "The number of transactions (including coinbase)"},
{RPCResult::Type::NUM, "utxo_increase", "The increase/decrease in the number of unspent outputs"},
{RPCResult::Type::NUM, "utxo_size_inc", "The increase/decrease in size for the utxo index (not discounting op_return and similar)"},
}},
@@ -2407,7 +2409,7 @@ static const CRPCCommand commands[] =
{ "hidden", "dumptxoutset", &dumptxoutset, {"path"} },
};
// clang-format on
-
- for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
- t.appendCommand(commands[vcidx].name, &commands[vcidx]);
+ for (const auto& c : commands) {
+ t.appendCommand(c.name, &c);
+ }
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 66ace7263a..4d08671bd2 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -151,6 +151,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getmempoolancestors", 1, "verbose" },
{ "getmempooldescendants", 1, "verbose" },
{ "bumpfee", 1, "options" },
+ { "psbtbumpfee", 1, "options" },
{ "logging", 0, "include" },
{ "logging", 1, "exclude" },
{ "disconnectnode", 1, "nodeid" },
@@ -172,7 +173,11 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "createwallet", 2, "blank"},
{ "createwallet", 4, "avoid_reuse"},
{ "createwallet", 5, "descriptors"},
+ { "createwallet", 6, "load_on_startup"},
+ { "loadwallet", 1, "load_on_startup"},
+ { "unloadwallet", 1, "load_on_startup"},
{ "getnodeaddresses", 0, "count"},
+ { "addpeeraddress", 1, "port"},
{ "stop", 0, "wait" },
};
// clang-format on
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index fee6a893eb..76aa9dbfc1 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -236,6 +236,17 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request)
return generateBlocks(chainman, mempool, coinbase_script, num_blocks, max_tries);
}
+static UniValue generate(const JSONRPCRequest& request)
+{
+ const std::string help_str{"generate ( nblocks maxtries ) has been replaced by the -generate cli option. Refer to -help for more information."};
+
+ if (request.fHelp) {
+ throw std::runtime_error(help_str);
+ } else {
+ throw JSONRPCError(RPC_METHOD_NOT_FOUND, help_str);
+ }
+}
+
static UniValue generatetoaddress(const JSONRPCRequest& request)
{
RPCHelpMan{"generatetoaddress",
@@ -1019,7 +1030,7 @@ static UniValue estimatesmartfee(const JSONRPCRequest& request)
RPCResult::Type::OBJ, "", "",
{
{RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB (only present if no errors were encountered)"},
- {RPCResult::Type::ARR, "errors", "Errors encountered during processing",
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing (if there are any)",
{
{RPCResult::Type::STR, "", "error"},
}},
@@ -1098,7 +1109,7 @@ static UniValue estimaterawfee(const JSONRPCRequest& request)
{
{RPCResult::Type::ELISION, "", ""},
}},
- {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing",
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing (if there are any)",
{
{RPCResult::Type::STR, "error", ""},
}},
@@ -1198,9 +1209,10 @@ static const CRPCCommand commands[] =
{ "util", "estimatesmartfee", &estimatesmartfee, {"conf_target", "estimate_mode"} },
{ "hidden", "estimaterawfee", &estimaterawfee, {"conf_target", "threshold"} },
+ { "hidden", "generate", &generate, {} },
};
// clang-format on
-
- for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
- t.appendCommand(commands[vcidx].name, &commands[vcidx]);
+ for (const auto& c : commands) {
+ t.appendCommand(c.name, &c);
+ }
}
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 53d38f4e11..0c982317f5 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -4,6 +4,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <httpserver.h>
+#include <index/blockfilterindex.h>
+#include <index/txindex.h>
#include <interfaces/chain.h>
#include <key_io.h>
#include <node/context.h>
@@ -27,9 +29,9 @@
#include <univalue.h>
-static UniValue validateaddress(const JSONRPCRequest& request)
+static RPCHelpMan validateaddress()
{
- RPCHelpMan{"validateaddress",
+ return RPCHelpMan{"validateaddress",
"\nReturn information about the given bitcoin address.\n",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to validate"},
@@ -50,8 +52,8 @@ static UniValue validateaddress(const JSONRPCRequest& request)
HelpExampleCli("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"") +
HelpExampleRpc("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
CTxDestination dest = DecodeDestination(request.params[0].get_str());
bool isValid = IsValidDestination(dest);
@@ -69,11 +71,13 @@ static UniValue validateaddress(const JSONRPCRequest& request)
ret.pushKVs(detail);
}
return ret;
+},
+ };
}
-static UniValue createmultisig(const JSONRPCRequest& request)
+static RPCHelpMan createmultisig()
{
- RPCHelpMan{"createmultisig",
+ return RPCHelpMan{"createmultisig",
"\nCreates a multi-signature address with n signature of m keys required.\n"
"It returns a json object with the address and redeemScript.\n",
{
@@ -98,8 +102,8 @@ static UniValue createmultisig(const JSONRPCRequest& request)
"\nAs a JSON-RPC call\n"
+ HelpExampleRpc("createmultisig", "2, \"[\\\"03789ed0bb717d88f7d321a368d905e7430207ebbd82bd342cf11ae157a7ace5fd\\\",\\\"03dbc6764b8884a92e871274b87583e6d5c2a58819473e17e107ef3f6aa5a61626\\\"]\"")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
int required = request.params[0].get_int();
// Get the public keys
@@ -135,11 +139,13 @@ static UniValue createmultisig(const JSONRPCRequest& request)
result.pushKV("descriptor", descriptor->ToString());
return result;
+},
+ };
}
-UniValue getdescriptorinfo(const JSONRPCRequest& request)
+static RPCHelpMan getdescriptorinfo()
{
- RPCHelpMan{"getdescriptorinfo",
+ return RPCHelpMan{"getdescriptorinfo",
{"\nAnalyses a descriptor.\n"},
{
{"descriptor", RPCArg::Type::STR, RPCArg::Optional::NO, "The descriptor."},
@@ -157,8 +163,9 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request)
RPCExamples{
"Analyse a descriptor\n" +
HelpExampleCli("getdescriptorinfo", "\"wpkh([d34db33f/84h/0h/0h]0279be667ef9dcbbac55a06295Ce870b07029Bfcdb2dce28d959f2815b16f81798)\"")
- }}.Check(request);
-
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
RPCTypeCheck(request.params, {UniValue::VSTR});
FlatSigningProvider provider;
@@ -175,11 +182,13 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request)
result.pushKV("issolvable", desc->IsSolvable());
result.pushKV("hasprivatekeys", provider.keys.size() > 0);
return result;
+},
+ };
}
-UniValue deriveaddresses(const JSONRPCRequest& request)
+static RPCHelpMan deriveaddresses()
{
- RPCHelpMan{"deriveaddresses",
+ return RPCHelpMan{"deriveaddresses",
{"\nDerives one or more addresses corresponding to an output descriptor.\n"
"Examples of output descriptors are:\n"
" pkh(<pubkey>) P2PKH outputs for the given pubkey\n"
@@ -202,8 +211,9 @@ UniValue deriveaddresses(const JSONRPCRequest& request)
RPCExamples{
"First three native segwit receive addresses\n" +
HelpExampleCli("deriveaddresses", "\"wpkh([d34db33f/84h/0h/0h]xpub6DJ2dNUysrn5Vt36jH2KLBT2i1auw1tTSSomg8PhqNiUtx8QX2SvC9nrHu81fT41fvDUnhMjEzQgXnQjKEu3oaqMSzhSrHMxyyoEAmUHQbY/0/*)#cjjspncu\" \"[0,2]\"")
- }}.Check(request);
-
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
RPCTypeCheck(request.params, {UniValue::VSTR, UniValueType()}); // Range argument is checked later
const std::string desc_str = request.params[0].get_str();
@@ -254,11 +264,13 @@ UniValue deriveaddresses(const JSONRPCRequest& request)
}
return addresses;
+},
+ };
}
-static UniValue verifymessage(const JSONRPCRequest& request)
+static RPCHelpMan verifymessage()
{
- RPCHelpMan{"verifymessage",
+ return RPCHelpMan{"verifymessage",
"\nVerify a signed message\n",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to use for the signature."},
@@ -278,8 +290,8 @@ static UniValue verifymessage(const JSONRPCRequest& request)
"\nAs a JSON-RPC call\n"
+ HelpExampleRpc("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"signature\", \"my message\"")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
LOCK(cs_main);
std::string strAddress = request.params[0].get_str();
@@ -301,11 +313,13 @@ static UniValue verifymessage(const JSONRPCRequest& request)
}
return false;
+},
+ };
}
-static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
+static RPCHelpMan signmessagewithprivkey()
{
- RPCHelpMan{"signmessagewithprivkey",
+ return RPCHelpMan{"signmessagewithprivkey",
"\nSign a message with the private key of an address\n",
{
{"privkey", RPCArg::Type::STR, RPCArg::Optional::NO, "The private key to sign the message with."},
@@ -322,8 +336,8 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
"\nAs a JSON-RPC call\n"
+ HelpExampleRpc("signmessagewithprivkey", "\"privkey\", \"my message\"")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
std::string strPrivkey = request.params[0].get_str();
std::string strMessage = request.params[1].get_str();
@@ -339,11 +353,13 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
}
return signature;
+},
+ };
}
-static UniValue setmocktime(const JSONRPCRequest& request)
+static RPCHelpMan setmocktime()
{
- RPCHelpMan{"setmocktime",
+ return RPCHelpMan{"setmocktime",
"\nSet the local time to given timestamp (-regtest only)\n",
{
{"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n"
@@ -351,8 +367,8 @@ static UniValue setmocktime(const JSONRPCRequest& request)
},
RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{""},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
if (!Params().IsMockableChain()) {
throw std::runtime_error("setmocktime is for regression testing (-regtest mode) only");
}
@@ -374,19 +390,21 @@ static UniValue setmocktime(const JSONRPCRequest& request)
}
return NullUniValue;
+},
+ };
}
-static UniValue mockscheduler(const JSONRPCRequest& request)
+static RPCHelpMan mockscheduler()
{
- RPCHelpMan{"mockscheduler",
+ return RPCHelpMan{"mockscheduler",
"\nBump the scheduler into the future (-regtest only)\n",
{
{"delta_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "Number of seconds to forward the scheduler into the future." },
},
RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{""},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
if (!Params().IsMockableChain()) {
throw std::runtime_error("mockscheduler is for regression testing (-regtest mode) only");
}
@@ -405,6 +423,8 @@ static UniValue mockscheduler(const JSONRPCRequest& request)
node.scheduler->MockForward(std::chrono::seconds(delta_seconds));
return NullUniValue;
+},
+ };
}
static UniValue RPCLockedMemoryInfo()
@@ -439,12 +459,12 @@ static std::string RPCMallocInfo()
}
#endif
-static UniValue getmemoryinfo(const JSONRPCRequest& request)
+static RPCHelpMan getmemoryinfo()
{
/* Please, avoid using the word "pool" here in the RPC interface or help,
* as users will undoubtedly confuse it with the other "memory pool"
*/
- RPCHelpMan{"getmemoryinfo",
+ return RPCHelpMan{"getmemoryinfo",
"Returns an object containing information about memory usage.\n",
{
{"mode", RPCArg::Type::STR, /* default */ "\"stats\"", "determines what kind of information is returned.\n"
@@ -474,8 +494,8 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request)
HelpExampleCli("getmemoryinfo", "")
+ HelpExampleRpc("getmemoryinfo", "")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
std::string mode = request.params[0].isNull() ? "stats" : request.params[0].get_str();
if (mode == "stats") {
UniValue obj(UniValue::VOBJ);
@@ -490,6 +510,8 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request)
} else {
throw JSONRPCError(RPC_INVALID_PARAMETER, "unknown mode " + mode);
}
+},
+ };
}
static void EnableOrDisableLogCategories(UniValue cats, bool enable) {
@@ -510,9 +532,9 @@ static void EnableOrDisableLogCategories(UniValue cats, bool enable) {
}
}
-UniValue logging(const JSONRPCRequest& request)
+static RPCHelpMan logging()
{
- RPCHelpMan{"logging",
+ return RPCHelpMan{"logging",
"Gets and sets the logging configuration.\n"
"When called without an argument, returns the list of categories with status that are currently being debug logged or not.\n"
"When called with arguments, adds or removes categories from debug logging and return the lists above.\n"
@@ -543,8 +565,8 @@ UniValue logging(const JSONRPCRequest& request)
HelpExampleCli("logging", "\"[\\\"all\\\"]\" \"[\\\"http\\\"]\"")
+ HelpExampleRpc("logging", "[\"all\"], [\"libevent\"]")
},
- }.Check(request);
-
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
uint32_t original_log_categories = LogInstance().GetCategoryMask();
if (request.params[0].isArray()) {
EnableOrDisableLogCategories(request.params[0], true);
@@ -575,26 +597,99 @@ UniValue logging(const JSONRPCRequest& request)
}
return result;
+},
+ };
}
-static UniValue echo(const JSONRPCRequest& request)
+static RPCHelpMan echo(const std::string& name)
{
- if (request.fHelp)
- throw std::runtime_error(
- RPCHelpMan{"echo|echojson ...",
+ return RPCHelpMan{name,
"\nSimply echo back the input arguments. This command is for testing.\n"
- "\nIt will return an internal bug report when exactly 100 arguments are passed.\n"
+ "\nIt will return an internal bug report when arg9='trigger_internal_bug' is passed.\n"
"\nThe difference between echo and echojson is that echojson has argument conversion enabled in the client-side table in "
"bitcoin-cli and the GUI. There is no server-side difference.",
- {},
+ {
+ {"arg0", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg1", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg2", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg3", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg4", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg5", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg6", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg7", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg8", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ {"arg9", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""},
+ },
RPCResult{RPCResult::Type::NONE, "", "Returns whatever was passed in"},
RPCExamples{""},
- }.ToString()
- );
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+ if (request.fHelp) throw std::runtime_error(self.ToString());
- CHECK_NONFATAL(request.params.size() != 100);
+ if (request.params[9].isStr()) {
+ CHECK_NONFATAL(request.params[9].get_str() != "trigger_internal_bug");
+ }
return request.params;
+},
+ };
+}
+
+static RPCHelpMan echo() { return echo("echo"); }
+static RPCHelpMan echojson() { return echo("echojson"); }
+
+static UniValue SummaryToJSON(const IndexSummary&& summary, std::string index_name)
+{
+ UniValue ret_summary(UniValue::VOBJ);
+ if (!index_name.empty() && index_name != summary.name) return ret_summary;
+
+ UniValue entry(UniValue::VOBJ);
+ entry.pushKV("synced", summary.synced);
+ entry.pushKV("best_block_height", summary.best_block_height);
+ ret_summary.pushKV(summary.name, entry);
+ return ret_summary;
+}
+
+static RPCHelpMan getindexinfo()
+{
+ return RPCHelpMan{"getindexinfo",
+ "\nReturns the status of one or all available indices currently running in the node.\n",
+ {
+ {"index_name", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Filter results for an index with a specific name."},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "", {
+ {
+ RPCResult::Type::OBJ, "name", "The name of the index",
+ {
+ {RPCResult::Type::BOOL, "synced", "Whether the index is synced or not"},
+ {RPCResult::Type::NUM, "best_block_height", "The block height to which the index is synced"},
+ }
+ },
+ },
+ },
+ RPCExamples{
+ HelpExampleCli("getindexinfo", "")
+ + HelpExampleRpc("getindexinfo", "")
+ + HelpExampleCli("getindexinfo", "txindex")
+ + HelpExampleRpc("getindexinfo", "txindex")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+ UniValue result(UniValue::VOBJ);
+ const std::string index_name = request.params[0].isNull() ? "" : request.params[0].get_str();
+
+ if (g_txindex) {
+ result.pushKVs(SummaryToJSON(g_txindex->GetSummary(), index_name));
+ }
+
+ ForEachBlockFilterIndex([&result, &index_name](const BlockFilterIndex& index) {
+ result.pushKVs(SummaryToJSON(index.GetSummary(), index_name));
+ });
+
+ return result;
+},
+ };
}
void RegisterMiscRPCCommands(CRPCTable &t)
@@ -611,15 +706,16 @@ static const CRPCCommand commands[] =
{ "util", "getdescriptorinfo", &getdescriptorinfo, {"descriptor"} },
{ "util", "verifymessage", &verifymessage, {"address","signature","message"} },
{ "util", "signmessagewithprivkey", &signmessagewithprivkey, {"privkey","message"} },
+ { "util", "getindexinfo", &getindexinfo, {"index_name"} },
/* Not shown in help */
{ "hidden", "setmocktime", &setmocktime, {"timestamp"}},
{ "hidden", "mockscheduler", &mockscheduler, {"delta_time"}},
{ "hidden", "echo", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
- { "hidden", "echojson", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
+ { "hidden", "echojson", &echojson, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
};
// clang-format on
-
- for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
- t.appendCommand(commands[vcidx].name, &commands[vcidx]);
+ for (const auto& c : commands) {
+ t.appendCommand(c.name, &c);
+ }
}
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 9981ea35df..e9343b3348 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -100,6 +100,8 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
{RPCResult::Type::BOOL, "relaytxes", "Whether peer has asked us to relay transactions to it"},
{RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"},
{RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"},
+ {RPCResult::Type::NUM_TIME, "last_transaction", "The " + UNIX_EPOCH_TIME + " of the last valid transaction received from this peer"},
+ {RPCResult::Type::NUM_TIME, "last_block", "The " + UNIX_EPOCH_TIME + " of the last block received from this peer"},
{RPCResult::Type::NUM, "bytessent", "The total bytes sent"},
{RPCResult::Type::NUM, "bytesrecv", "The total bytes received"},
{RPCResult::Type::NUM_TIME, "conntime", "The " + UNIX_EPOCH_TIME + " of the connection"},
@@ -169,6 +171,8 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
obj.pushKV("relaytxes", stats.fRelayTxes);
obj.pushKV("lastsend", stats.nLastSend);
obj.pushKV("lastrecv", stats.nLastRecv);
+ obj.pushKV("last_transaction", stats.nLastTXTime);
+ obj.pushKV("last_block", stats.nLastBlockTime);
obj.pushKV("bytessent", stats.nSendBytes);
obj.pushKV("bytesrecv", stats.nRecvBytes);
obj.pushKV("conntime", stats.nTimeConnected);
@@ -264,7 +268,7 @@ static UniValue addnode(const JSONRPCRequest& request)
if (strCommand == "onetry")
{
CAddress addr;
- node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
+ node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL);
return NullUniValue;
}
@@ -276,7 +280,7 @@ static UniValue addnode(const JSONRPCRequest& request)
else if(strCommand == "remove")
{
if(!node.connman->RemoveAddedNode(strNode))
- throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
+ throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node could not be removed. It has not been added previously.");
}
return NullUniValue;
@@ -727,7 +731,7 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
RPCHelpMan{"getnodeaddresses",
"\nReturn known addresses which can potentially be used to find new nodes in the network\n",
{
- {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + ToString(ADDRMAN_GETADDR_MAX) + " or " + ToString(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."},
+ {"count", RPCArg::Type::NUM, /* default */ "1", "The maximum number of addresses to return. Specify 0 to return all known addresses."},
},
RPCResult{
RPCResult::Type::ARR, "", "",
@@ -754,18 +758,16 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
int count = 1;
if (!request.params[0].isNull()) {
count = request.params[0].get_int();
- if (count <= 0) {
+ if (count < 0) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Address count out of range");
}
}
// returns a shuffled list of CAddress
- std::vector<CAddress> vAddr = node.connman->GetAddresses();
+ std::vector<CAddress> vAddr = node.connman->GetAddresses(count, /* max_pct */ 0);
UniValue ret(UniValue::VARR);
- int address_return_count = std::min<int>(count, vAddr.size());
- for (int i = 0; i < address_return_count; ++i) {
+ for (const CAddress& addr : vAddr) {
UniValue obj(UniValue::VOBJ);
- const CAddress& addr = vAddr[i];
obj.pushKV("time", (int)addr.nTime);
obj.pushKV("services", (uint64_t)addr.nServices);
obj.pushKV("address", addr.ToStringIP());
@@ -775,6 +777,54 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
return ret;
}
+static UniValue addpeeraddress(const JSONRPCRequest& request)
+{
+ RPCHelpMan{"addpeeraddress",
+ "\nAdd the address of a potential peer to the address manager. This RPC is for testing only.\n",
+ {
+ {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"},
+ {"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager"},
+ },
+ },
+ RPCExamples{
+ HelpExampleCli("addpeeraddress", "\"1.2.3.4\" 8333")
+ + HelpExampleRpc("addpeeraddress", "\"1.2.3.4\", 8333")
+ },
+ }.Check(request);
+
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (!node.connman) {
+ throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
+ }
+
+ UniValue obj(UniValue::VOBJ);
+
+ std::string addr_string = request.params[0].get_str();
+ uint16_t port = request.params[1].get_int();
+
+ CNetAddr net_addr;
+ if (!LookupHost(addr_string, net_addr, false)) {
+ obj.pushKV("success", false);
+ return obj;
+ }
+ CAddress address = CAddress({net_addr, port}, ServiceFlags(NODE_NETWORK|NODE_WITNESS));
+ address.nTime = GetAdjustedTime();
+ // The source address is set equal to the address. This is equivalent to the peer
+ // announcing itself.
+ if (!node.connman->AddNewAddresses({address}, address)) {
+ obj.pushKV("success", false);
+ return obj;
+ }
+
+ obj.pushKV("success", true);
+ return obj;
+}
+
void RegisterNetRPCCommands(CRPCTable &t)
{
// clang-format off
@@ -794,9 +844,10 @@ static const CRPCCommand commands[] =
{ "network", "clearbanned", &clearbanned, {} },
{ "network", "setnetworkactive", &setnetworkactive, {"state"} },
{ "network", "getnodeaddresses", &getnodeaddresses, {"count"} },
+ { "hidden", "addpeeraddress", &addpeeraddress, {"address", "port"} },
};
// clang-format on
-
- for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
- t.appendCommand(commands[vcidx].name, &commands[vcidx]);
+ for (const auto& c : commands) {
+ t.appendCommand(c.name, &c);
+ }
}
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index d5e902cadd..d6988ee3ac 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -157,6 +157,8 @@ static UniValue getrawtransaction(const JSONRPCRequest& request)
},
}.Check(request);
+ const NodeContext& node = EnsureNodeContext(request.context);
+
bool in_active_chain = true;
uint256 hash = ParseHashV(request.params[0], "parameter 1");
CBlockIndex* blockindex = nullptr;
@@ -188,9 +190,9 @@ static UniValue getrawtransaction(const JSONRPCRequest& request)
f_txindex_ready = g_txindex->BlockUntilSyncedToCurrentChain();
}
- CTransactionRef tx;
uint256 hash_block;
- if (!GetTransaction(hash, tx, Params().GetConsensus(), hash_block, blockindex)) {
+ const CTransactionRef tx = GetTransaction(blockindex, node.mempool, hash, Params().GetConsensus(), hash_block);
+ if (!tx) {
std::string errmsg;
if (blockindex) {
if (!(blockindex->nStatus & BLOCK_HAVE_DATA)) {
@@ -245,10 +247,11 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
for (unsigned int idx = 0; idx < txids.size(); idx++) {
const UniValue& txid = txids[idx];
uint256 hash(ParseHashV(txid, "txid"));
- if (setTxids.count(hash))
- throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated txid: ")+txid.get_str());
- setTxids.insert(hash);
- oneTxid = hash;
+ if (setTxids.count(hash)) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid parameter, duplicated txid: ") + txid.get_str());
+ }
+ setTxids.insert(hash);
+ oneTxid = hash;
}
CBlockIndex* pblockindex = nullptr;
@@ -281,11 +284,11 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
LOCK(cs_main);
- if (pblockindex == nullptr)
- {
- CTransactionRef tx;
- if (!GetTransaction(oneTxid, tx, Params().GetConsensus(), hashBlock) || hashBlock.IsNull())
+ if (pblockindex == nullptr) {
+ const CTransactionRef tx = GetTransaction(/* block_index */ nullptr, /* mempool */ nullptr, oneTxid, Params().GetConsensus(), hashBlock);
+ if (!tx || hashBlock.IsNull()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not yet in block");
+ }
pblockindex = LookupBlockIndex(hashBlock);
if (!pblockindex) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Transaction index corrupt");
@@ -293,15 +296,19 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
}
CBlock block;
- if(!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus()))
+ if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus())) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk");
+ }
unsigned int ntxFound = 0;
- for (const auto& tx : block.vtx)
- if (setTxids.count(tx->GetHash()))
+ for (const auto& tx : block.vtx) {
+ if (setTxids.count(tx->GetHash())) {
ntxFound++;
- if (ntxFound != setTxids.size())
+ }
+ }
+ if (ntxFound != setTxids.size()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Not all transactions found in specified or retrieved block");
+ }
CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
CMerkleBlock mb(block, setTxids);
@@ -594,7 +601,7 @@ static UniValue decodescript(const JSONRPCRequest& request)
UniValue sr(UniValue::VOBJ);
CScript segwitScr;
if (which_type == TxoutType::PUBKEY) {
- segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0].begin(), solutions_data[0].end())));
+ segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0])));
} else if (which_type == TxoutType::PUBKEYHASH) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]}));
} else {
@@ -737,7 +744,7 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request)
{
{RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
{RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
- {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Script verification errors (if there are any)",
{
{RPCResult::Type::OBJ, "", "",
{
@@ -1293,7 +1300,7 @@ UniValue combinepsbt(const JSONRPCRequest& request)
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << merged_psbt;
- return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
+ return EncodeBase64(MakeUCharSpan(ssTx));
}
UniValue finalizepsbt(const JSONRPCRequest& request)
@@ -1341,7 +1348,7 @@ UniValue finalizepsbt(const JSONRPCRequest& request)
if (complete && extract) {
ssTx << mtx;
- result_str = HexStr(ssTx.str());
+ result_str = HexStr(ssTx);
result.pushKV("hex", result_str);
} else {
ssTx << psbtx;
@@ -1428,7 +1435,7 @@ UniValue createpsbt(const JSONRPCRequest& request)
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << psbtx;
- return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
+ return EncodeBase64(MakeUCharSpan(ssTx));
}
UniValue converttopsbt(const JSONRPCRequest& request)
@@ -1495,7 +1502,7 @@ UniValue converttopsbt(const JSONRPCRequest& request)
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << psbtx;
- return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
+ return EncodeBase64(MakeUCharSpan(ssTx));
}
UniValue utxoupdatepsbt(const JSONRPCRequest& request)
@@ -1583,7 +1590,7 @@ UniValue utxoupdatepsbt(const JSONRPCRequest& request)
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << psbtx;
- return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
+ return EncodeBase64(MakeUCharSpan(ssTx));
}
UniValue joinpsbts(const JSONRPCRequest& request)
@@ -1676,7 +1683,7 @@ UniValue joinpsbts(const JSONRPCRequest& request)
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << shuffled_psbt;
- return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
+ return EncodeBase64(MakeUCharSpan(ssTx));
}
UniValue analyzepsbt(const JSONRPCRequest& request)
@@ -1715,7 +1722,7 @@ UniValue analyzepsbt(const JSONRPCRequest& request)
{RPCResult::Type::STR_AMOUNT, "estimated_feerate", /* optional */ true, "Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled"},
{RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled"},
{RPCResult::Type::STR, "next", "Role of the next person that this psbt needs to go to"},
- {RPCResult::Type::STR, "error", "Error message if there is one"},
+ {RPCResult::Type::STR, "error", /* optional */ true, "Error message (if there is one)"},
}
},
RPCExamples {
@@ -1814,7 +1821,7 @@ static const CRPCCommand commands[] =
{ "blockchain", "verifytxoutproof", &verifytxoutproof, {"proof"} },
};
// clang-format on
-
- for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
- t.appendCommand(commands[vcidx].name, &commands[vcidx]);
+ for (const auto& c : commands) {
+ t.appendCommand(c.name, &c);
+ }
}
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 7fef45f50e..d9ad70fa37 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -78,7 +78,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
const size_t COOKIE_SIZE = 32;
unsigned char rand_pwd[COOKIE_SIZE];
GetRandBytes(rand_pwd, COOKIE_SIZE);
- std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd, rand_pwd+COOKIE_SIZE);
+ std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd);
/** the umask determines what permissions are used to create this file -
* these are set to 077 in init.cpp unless overridden with -sysperms.
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index e5f6b1b9f1..9c8e7fe04a 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -256,13 +256,8 @@ static const CRPCCommand vRPCCommands[] =
CRPCTable::CRPCTable()
{
- unsigned int vcidx;
- for (vcidx = 0; vcidx < (sizeof(vRPCCommands) / sizeof(vRPCCommands[0])); vcidx++)
- {
- const CRPCCommand *pcmd;
-
- pcmd = &vRPCCommands[vcidx];
- mapCommands[pcmd->name].push_back(pcmd);
+ for (const auto& c : vRPCCommands) {
+ appendCommand(c.name, &c);
}
}
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 9f4c7bee9c..40dfdb587e 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -260,7 +260,7 @@ public:
UniValue obj(UniValue::VOBJ);
obj.pushKV("iswitness", true);
obj.pushKV("witness_version", (int)id.version);
- obj.pushKV("witness_program", HexStr(id.program, id.program + id.length));
+ obj.pushKV("witness_program", HexStr(Span<const unsigned char>(id.program, id.length)));
return obj;
}
};
@@ -504,7 +504,7 @@ std::string RPCHelpMan::ToString() const
ret += m_name;
bool was_optional{false};
for (const auto& arg : m_args) {
- if (arg.m_hidden) continue;
+ if (arg.m_hidden) break; // Any arg that follows is also hidden
const bool optional = arg.IsOptional();
ret += " ";
if (optional) {
@@ -526,7 +526,7 @@ std::string RPCHelpMan::ToString() const
Sections sections;
for (size_t i{0}; i < m_args.size(); ++i) {
const auto& arg = m_args.at(i);
- if (arg.m_hidden) continue;
+ if (arg.m_hidden) break; // Any arg that follows is also hidden
if (i == 0) ret += "\nArguments:\n";
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 5fa128d62d..6c0a98cca2 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -190,7 +190,7 @@ class OriginPubkeyProvider final : public PubkeyProvider
std::string OriginString() const
{
- return HexStr(std::begin(m_origin.fingerprint), std::end(m_origin.fingerprint)) + FormatHDKeypath(m_origin.path);
+ return HexStr(m_origin.fingerprint) + FormatHDKeypath(m_origin.path);
}
public:
@@ -825,8 +825,9 @@ std::unique_ptr<PubkeyProvider> ParsePubkey(uint32_t key_exp_index, const Span<c
return nullptr;
}
if (origin_split.size() == 1) return ParsePubkeyInner(key_exp_index, origin_split[0], permit_uncompressed, out, error);
- if (origin_split[0].size() < 1 || origin_split[0][0] != '[') {
- error = strprintf("Key origin start '[ character expected but not found, got '%c' instead", origin_split[0][0]);
+ if (origin_split[0].empty() || origin_split[0][0] != '[') {
+ error = strprintf("Key origin start '[ character expected but not found, got '%c' instead",
+ origin_split[0].empty() ? /** empty, implies split char */ ']' : origin_split[0][0]);
return nullptr;
}
auto slash_split = Split(origin_split[0].subspan(1), '/');
@@ -896,7 +897,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(uint32_t key_exp_index, Span<const c
providers.emplace_back(std::move(pk));
key_exp_index++;
}
- if (providers.size() < 1 || providers.size() > 16) {
+ if (providers.empty() || providers.size() > 16) {
error = strprintf("Cannot have %u keys in multisig; must have between 1 and 16 keys, inclusive", providers.size());
return nullptr;
} else if (thres < 1) {
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 9415bba585..7b2457a5e3 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -986,9 +986,9 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
else if (opcode == OP_SHA256)
CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_HASH160)
- CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data());
+ CHash160().Write(vch).Finalize(vchHash);
else if (opcode == OP_HASH256)
- CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
+ CHash256().Write(vch).Finalize(vchHash);
popstack(stack);
stack.push_back(vchHash);
}
@@ -1258,34 +1258,37 @@ public:
}
};
+/** Compute the (single) SHA256 of the concatenation of all prevouts of a tx. */
template <class T>
-uint256 GetPrevoutHash(const T& txTo)
+uint256 GetPrevoutsSHA256(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txin : txTo.vin) {
ss << txin.prevout;
}
- return ss.GetHash();
+ return ss.GetSHA256();
}
+/** Compute the (single) SHA256 of the concatenation of all nSequences of a tx. */
template <class T>
-uint256 GetSequenceHash(const T& txTo)
+uint256 GetSequencesSHA256(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txin : txTo.vin) {
ss << txin.nSequence;
}
- return ss.GetHash();
+ return ss.GetSHA256();
}
+/** Compute the (single) SHA256 of the concatenation of all txouts of a tx. */
template <class T>
-uint256 GetOutputsHash(const T& txTo)
+uint256 GetOutputsSHA256(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txout : txTo.vout) {
ss << txout;
}
- return ss.GetHash();
+ return ss.GetSHA256();
}
} // namespace
@@ -1297,9 +1300,9 @@ void PrecomputedTransactionData::Init(const T& txTo)
// Cache is calculated only for transactions with witness
if (txTo.HasWitness()) {
- hashPrevouts = GetPrevoutHash(txTo);
- hashSequence = GetSequenceHash(txTo);
- hashOutputs = GetOutputsHash(txTo);
+ hashPrevouts = SHA256Uint256(GetPrevoutsSHA256(txTo));
+ hashSequence = SHA256Uint256(GetSequencesSHA256(txTo));
+ hashOutputs = SHA256Uint256(GetOutputsSHA256(txTo));
}
m_ready = true;
@@ -1329,16 +1332,16 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
const bool cacheready = cache && cache->m_ready;
if (!(nHashType & SIGHASH_ANYONECANPAY)) {
- hashPrevouts = cacheready ? cache->hashPrevouts : GetPrevoutHash(txTo);
+ hashPrevouts = cacheready ? cache->hashPrevouts : SHA256Uint256(GetPrevoutsSHA256(txTo));
}
if (!(nHashType & SIGHASH_ANYONECANPAY) && (nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
- hashSequence = cacheready ? cache->hashSequence : GetSequenceHash(txTo);
+ hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo));
}
if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
- hashOutputs = cacheready ? cache->hashOutputs : GetOutputsHash(txTo);
+ hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo));
} else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) {
CHashWriter ss(SER_GETHASH, 0);
ss << txTo.vout[nIn];
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index f425215549..9b3f94f14d 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -186,6 +186,8 @@ static CScript PushAll(const std::vector<valtype>& values)
result << OP_0;
} else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) {
result << CScript::EncodeOP_N(v[0]);
+ } else if (v.size() == 1 && v[0] == 0x81) {
+ result << OP_1NEGATE;
} else {
result << v;
}
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 1c4990791c..96a3d311a6 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -16,10 +16,10 @@ typedef std::vector<unsigned char> valtype;
bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER;
unsigned nMaxDatacarrierBytes = MAX_OP_RETURN_RELAY;
-CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {}
+CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in)) {}
CScriptID::CScriptID(const ScriptHash& in) : BaseHash(static_cast<uint160>(in)) {}
-ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {}
+ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in)) {}
ScriptHash::ScriptHash(const CScriptID& in) : BaseHash(static_cast<uint160>(in)) {}
PKHash::PKHash(const CPubKey& pubkey) : BaseHash(pubkey.GetID()) {}
@@ -313,18 +313,6 @@ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys)
return script;
}
-CScript GetScriptForWitness(const CScript& redeemscript)
-{
- std::vector<std::vector<unsigned char> > vSolutions;
- TxoutType typ = Solver(redeemscript, vSolutions);
- if (typ == TxoutType::PUBKEY) {
- return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0].begin(), vSolutions[0].end())));
- } else if (typ == TxoutType::PUBKEYHASH) {
- return GetScriptForDestination(WitnessV0KeyHash(uint160{vSolutions[0]}));
- }
- return GetScriptForDestination(WitnessV0ScriptHash(redeemscript));
-}
-
bool IsValidDestination(const CTxDestination& dest) {
return dest.which() != 0;
}
diff --git a/src/script/standard.h b/src/script/standard.h
index fd29353886..6dbcd04968 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -79,6 +79,9 @@ public:
{
return m_hash.size();
}
+
+ unsigned char* data() { return m_hash.data(); }
+ const unsigned char* data() const { return m_hash.data(); }
};
/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
@@ -260,14 +263,4 @@ CScript GetScriptForRawPubKey(const CPubKey& pubkey);
/** Generate a multisig script. */
CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys);
-/**
- * Generate a pay-to-witness script for the given redeem script. If the redeem
- * script is P2PK or P2PKH, this returns a P2WPKH script, otherwise it returns a
- * P2WSH script.
- *
- * TODO: replace calls to GetScriptForWitness with GetScriptForDestination using
- * the various witness-specific CTxDestination subtypes.
- */
-CScript GetScriptForWitness(const CScript& redeemscript);
-
#endif // BITCOIN_SCRIPT_STANDARD_H
diff --git a/src/span.h b/src/span.h
index 841f1eadf7..4afb383a59 100644
--- a/src/span.h
+++ b/src/span.h
@@ -151,6 +151,7 @@ public:
return m_data[m_size - 1];
}
constexpr std::size_t size() const noexcept { return m_size; }
+ constexpr bool empty() const noexcept { return size() == 0; }
CONSTEXPR_IF_NOT_DEBUG C& operator[](std::size_t pos) const noexcept
{
ASSERT_IF_DEBUG(size() > pos);
@@ -206,4 +207,16 @@ T& SpanPopBack(Span<T>& span)
return back;
}
+// Helper functions to safely cast to unsigned char pointers.
+inline unsigned char* UCharCast(char* c) { return (unsigned char*)c; }
+inline unsigned char* UCharCast(unsigned char* c) { return c; }
+inline const unsigned char* UCharCast(const char* c) { return (unsigned char*)c; }
+inline const unsigned char* UCharCast(const unsigned char* c) { return c; }
+
+// Helper function to safely convert a Span to a Span<[const] unsigned char>.
+template <typename T> constexpr auto UCharSpanCast(Span<T> s) -> Span<typename std::remove_pointer<decltype(UCharCast(s.data()))>::type> { return {UCharCast(s.data()), s.size()}; }
+
+/** Like MakeSpan, but for (const) unsigned char member types only. Only works for (un)signed char containers. */
+template <typename V> constexpr auto MakeUCharSpan(V&& v) -> decltype(UCharSpanCast(MakeSpan(std::forward<V>(v)))) { return UCharSpanCast(MakeSpan(std::forward<V>(v))); }
+
#endif
diff --git a/src/streams.h b/src/streams.h
index e1d1b0eab2..6ce8065da8 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -814,18 +814,6 @@ public:
return true;
}
- bool Seek(uint64_t nPos) {
- long nLongPos = nPos;
- if (nPos != (uint64_t)nLongPos)
- return false;
- if (fseek(src, nLongPos, SEEK_SET))
- return false;
- nLongPos = ftell(src);
- nSrcPos = nLongPos;
- nReadPos = nLongPos;
- return true;
- }
-
//! prevent reading beyond a certain position
//! no argument removes the limit
bool SetLimit(uint64_t nPos = std::numeric_limits<uint64_t>::max()) {
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index b4f392116c..26de780f29 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -10,7 +10,6 @@
#endif
#ifdef WIN32
-#define WIN32_LEAN_AND_MEAN 1
#ifndef NOMINMAX
#define NOMINMAX
#endif
diff --git a/src/sync.cpp b/src/sync.cpp
index 10f0483189..4be13a3c48 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation)
const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, lock_stack);
const LockPair p2 = std::make_pair(c, i.first);
+ if (lockdata.lockorders.count(p2)) {
+ auto lock_stack_copy = lock_stack;
+ lock_stack.pop_back();
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
+ // potential_deadlock_detected() does not return.
+ }
+
+ lockdata.lockorders.emplace(p1, lock_stack);
lockdata.invlockorders.insert(p2);
- if (lockdata.lockorders.count(p2))
- potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
@@ -259,6 +264,17 @@ void DeleteLock(void* cs)
}
}
+bool LockStackEmpty()
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+ const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
+ if (it == lockdata.m_lock_stacks.end()) {
+ return true;
+ }
+ return it->second.empty();
+}
+
bool g_debug_lockorder_abort = true;
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 77327d8bfe..05ff2ee8a9 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -56,6 +56,7 @@ template <typename MutexType>
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
+bool LockStackEmpty();
/**
* Call abort() if a potential lock order deadlock bug is detected, instead of
@@ -64,13 +65,14 @@ void DeleteLock(void* cs);
*/
extern bool g_debug_lockorder_abort;
#else
-void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
-void static inline LeaveCritical() {}
-void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
+inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
+inline void LeaveCritical() {}
+inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
template <typename MutexType>
-void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
-void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
-void static inline DeleteLock(void* cs) {}
+inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
+inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+inline void DeleteLock(void* cs) {}
+inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index bc6b38c682..25fdd64568 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
// Test: Sanity check, GetAddr should never return anything if addrman
// is empty.
BOOST_CHECK_EQUAL(addrman.size(), 0U);
- std::vector<CAddress> vAddr1 = addrman.GetAddr();
+ std::vector<CAddress> vAddr1 = addrman.GetAddr(/* max_addresses */ 0, /* max_pct */0);
BOOST_CHECK_EQUAL(vAddr1.size(), 0U);
CAddress addr1 = CAddress(ResolveService("250.250.2.1", 8333), NODE_NONE);
@@ -415,13 +415,15 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
BOOST_CHECK(addrman.Add(addr4, source2));
BOOST_CHECK(addrman.Add(addr5, source1));
- // GetAddr returns 23% of addresses, 23% of 5 is 1 rounded down.
- BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U);
+ BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U);
+ // Net processing asks for 23% of addresses. 23% of 5 is 1 rounded down.
+ BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U);
// Test: Ensure GetAddr works with new and tried addresses.
addrman.Good(CAddress(addr1, NODE_NONE));
addrman.Good(CAddress(addr2, NODE_NONE));
- BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U);
+ BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U);
+ BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U);
// Test: Ensure GetAddr still returns 23% when addrman has many addrs.
for (unsigned int i = 1; i < (8 * 256); i++) {
@@ -436,7 +438,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
if (i % 8 == 0)
addrman.Good(addr);
}
- std::vector<CAddress> vAddr = addrman.GetAddr();
+ std::vector<CAddress> vAddr = addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23);
size_t percent23 = (addrman.size() * 23) / 100;
BOOST_CHECK_EQUAL(vAddr.size(), percent23);
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index 57559fa687..6a636f2574 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -33,7 +33,7 @@ BOOST_AUTO_TEST_CASE(base58_EncodeBase58)
std::vector<unsigned char> sourcedata = ParseHex(test[0].get_str());
std::string base58string = test[1].get_str();
BOOST_CHECK_MESSAGE(
- EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size()) == base58string,
+ EncodeBase58(sourcedata) == base58string,
strTest);
}
}
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index f64251fe32..b3cc8cefd9 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -183,7 +183,7 @@ static void TestHKDF_SHA256_32(const std::string &ikm_hex, const std::string &sa
CHKDF_HMAC_SHA256_L32 hkdf32(initial_key_material.data(), initial_key_material.size(), salt_stringified);
unsigned char out[32];
hkdf32.Expand32(info_stringified, out);
- BOOST_CHECK(HexStr(out, out + 32) == okm_check_hex);
+ BOOST_CHECK(HexStr(out) == okm_check_hex);
}
static std::string LongTestString()
@@ -743,7 +743,7 @@ BOOST_AUTO_TEST_CASE(sha256d64)
in[j] = InsecureRandBits(8);
}
for (int j = 0; j < i; ++j) {
- CHash256().Write(in + 64 * j, 64).Finalize(out1 + 32 * j);
+ CHash256().Write({in + 64 * j, 64}).Finalize({out1 + 32 * j, 32});
}
SHA256D64(out2, in, i);
BOOST_CHECK(memcmp(out1, out2, 32 * i) == 0);
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index b1a635d9da..c0a2fca9ca 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -80,11 +80,11 @@ BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
{
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(*connman, nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false);
+ CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
@@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic, CConnmanTest* connman)
{
CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE);
- vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false));
+ vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND));
CNode &node = *vNodes.back();
node.SetSendVersion(PROTOCOL_VERSION);
@@ -150,7 +150,7 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidat
BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
auto connman = MakeUnique<CConnmanTest>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(*connman, nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
const Consensus::Params& consensusParams = Params().GetConsensus();
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
@@ -223,11 +223,11 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(*connman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", true);
+ CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
dummyNode1.nVersion = 1;
@@ -244,7 +244,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged
CAddress addr2(ip(0xa0b0c002), NODE_NONE);
- CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", true);
+ CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND);
dummyNode2.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode2);
dummyNode2.nVersion = 1;
@@ -279,14 +279,14 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(*connman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
banman->ClearBanned();
int64_t nStartTime = GetTime();
SetMockTime(nStartTime); // Overrides future calls to GetTime()
CAddress addr(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", true);
+ CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND);
dummyNode.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode);
dummyNode.nVersion = 1;
diff --git a/src/test/fuzz/asmap.cpp b/src/test/fuzz/asmap.cpp
index 40ca01bd9f..e3aefa18a3 100644
--- a/src/test/fuzz/asmap.cpp
+++ b/src/test/fuzz/asmap.cpp
@@ -33,7 +33,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
if (buffer.size() < 1 + 3 + 4) return;
int asmap_size = 3 + (buffer[0] & 127);
bool ipv6 = buffer[0] & 128;
- int addr_size = ipv6 ? 16 : 4;
+ const size_t addr_size = ipv6 ? ADDR_IPV6_SIZE : ADDR_IPV4_SIZE;
if (buffer.size() < size_t(1 + asmap_size + addr_size)) return;
std::vector<bool> asmap = ipv6 ? IPV6_PREFIX_ASMAP : IPV4_PREFIX_ASMAP;
asmap.reserve(asmap.size() + 8 * asmap_size);
@@ -43,7 +43,17 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
}
if (!SanityCheckASMap(asmap)) return;
+
+ const uint8_t* addr_data = buffer.data() + 1 + asmap_size;
CNetAddr net_addr;
- net_addr.SetRaw(ipv6 ? NET_IPV6 : NET_IPV4, buffer.data() + 1 + asmap_size);
+ if (ipv6) {
+ assert(addr_size == ADDR_IPV6_SIZE);
+ net_addr.SetLegacyIPv6(Span<const uint8_t>(addr_data, addr_size));
+ } else {
+ assert(addr_size == ADDR_IPV4_SIZE);
+ in_addr ipv4;
+ memcpy(&ipv4, addr_data, addr_size);
+ net_addr.SetIP(CNetAddr{ipv4});
+ }
(void)net_addr.GetMappedAS(asmap);
}
diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp
index 6bbd13eb5c..e575640be5 100644
--- a/src/test/fuzz/buffered_file.cpp
+++ b/src/test/fuzz/buffered_file.cpp
@@ -31,7 +31,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
if (opt_buffered_file && fuzzed_file != nullptr) {
bool setpos_fail = false;
while (fuzzed_data_provider.ConsumeBool()) {
- switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 5)) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 4)) {
case 0: {
std::array<uint8_t, 4096> arr{};
try {
@@ -41,20 +41,16 @@ void test_one_input(const std::vector<uint8_t>& buffer)
break;
}
case 1: {
- opt_buffered_file->Seek(fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096));
- break;
- }
- case 2: {
opt_buffered_file->SetLimit(fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096));
break;
}
- case 3: {
+ case 2: {
if (!opt_buffered_file->SetPos(fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(0, 4096))) {
setpos_fail = true;
}
break;
}
- case 4: {
+ case 3: {
if (setpos_fail) {
// Calling FindByte(...) after a failed SetPos(...) call may result in an infinite loop.
break;
@@ -65,7 +61,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
break;
}
- case 5: {
+ case 4: {
ReadFromStream(fuzzed_data_provider, *opt_buffered_file);
break;
}
diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp
index 595cdf9abb..3edcf96495 100644
--- a/src/test/fuzz/crypto.cpp
+++ b/src/test/fuzz/crypto.cpp
@@ -44,8 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
}
- (void)hash160.Write(data.data(), data.size());
- (void)hash256.Write(data.data(), data.size());
+ (void)hash160.Write(data);
+ (void)hash256.Write(data);
(void)hmac_sha256.Write(data.data(), data.size());
(void)hmac_sha512.Write(data.data(), data.size());
(void)ripemd160.Write(data.data(), data.size());
@@ -54,9 +54,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)sha512.Write(data.data(), data.size());
(void)sip_hasher.Write(data.data(), data.size());
- (void)Hash(data.begin(), data.end());
+ (void)Hash(data);
(void)Hash160(data);
- (void)Hash160(data.begin(), data.end());
(void)sha512.Size();
break;
}
@@ -73,12 +72,12 @@ void test_one_input(const std::vector<uint8_t>& buffer)
switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 8)) {
case 0: {
data.resize(CHash160::OUTPUT_SIZE);
- hash160.Finalize(data.data());
+ hash160.Finalize(data);
break;
}
case 1: {
data.resize(CHash256::OUTPUT_SIZE);
- hash256.Finalize(data.data());
+ hash256.Finalize(data);
break;
}
case 2: {
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
index c746374c61..955b954700 100644
--- a/src/test/fuzz/key.cpp
+++ b/src/test/fuzz/key.cpp
@@ -85,7 +85,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
assert(negated_key == key);
}
- const uint256 random_uint256 = Hash(buffer.begin(), buffer.end());
+ const uint256 random_uint256 = Hash(buffer);
{
CKey child_key;
diff --git a/src/test/fuzz/locale.cpp b/src/test/fuzz/locale.cpp
index 3597f51e51..2b181c6da1 100644
--- a/src/test/fuzz/locale.cpp
+++ b/src/test/fuzz/locale.cpp
@@ -52,7 +52,6 @@ void test_one_input(const std::vector<uint8_t>& buffer)
const bool parseint64_without_locale = ParseInt64(random_string, &parseint64_out_without_locale);
const int64_t atoi64_without_locale = atoi64(random_string);
const int atoi_without_locale = atoi(random_string);
- const int64_t atoi64c_without_locale = atoi64(random_string.c_str());
const int64_t random_int64 = fuzzed_data_provider.ConsumeIntegral<int64_t>();
const std::string tostring_without_locale = ToString(random_int64);
// The variable `random_int32` is no longer used, but the harness still needs to
@@ -80,8 +79,6 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
const int64_t atoi64_with_locale = atoi64(random_string);
assert(atoi64_without_locale == atoi64_with_locale);
- const int64_t atoi64c_with_locale = atoi64(random_string.c_str());
- assert(atoi64c_without_locale == atoi64c_with_locale);
const int atoi_with_locale = atoi(random_string);
assert(atoi_without_locale == atoi_with_locale);
const std::string tostring_with_locale = ToString(random_int64);
diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp
index ae531f4462..8a674ac1e9 100644
--- a/src/test/fuzz/net_permissions.cpp
+++ b/src/test/fuzz/net_permissions.cpp
@@ -24,6 +24,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
NetPermissionFlags::PF_FORCERELAY,
NetPermissionFlags::PF_NOBAN,
NetPermissionFlags::PF_MEMPOOL,
+ NetPermissionFlags::PF_ADDR,
NetPermissionFlags::PF_ISIMPLICIT,
NetPermissionFlags::PF_ALL,
}) :
diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp
index 2901c704f6..8252f38726 100644
--- a/src/test/fuzz/netaddress.cpp
+++ b/src/test/fuzz/netaddress.cpp
@@ -17,9 +17,6 @@ void test_one_input(const std::vector<uint8_t>& buffer)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
const CNetAddr net_addr = ConsumeNetAddr(fuzzed_data_provider);
- for (int i = 0; i < 15; ++i) {
- (void)net_addr.GetByte(i);
- }
(void)net_addr.GetHash();
(void)net_addr.GetNetClass();
if (net_addr.GetNetwork() == Network::NET_IPV4) {
@@ -78,7 +75,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)net_addr.ToString();
(void)net_addr.ToStringIP();
- const CSubNet sub_net{net_addr, fuzzed_data_provider.ConsumeIntegral<int32_t>()};
+ const CSubNet sub_net{net_addr, fuzzed_data_provider.ConsumeIntegral<uint8_t>()};
(void)sub_net.IsValid();
(void)sub_net.ToString();
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index 9e40d5cd55..ec09acc6c6 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -30,18 +30,6 @@
#include <string>
#include <vector>
-void ProcessMessage(
- CNode& pfrom,
- const std::string& msg_type,
- CDataStream& vRecv,
- const std::chrono::microseconds time_received,
- const CChainParams& chainparams,
- ChainstateManager& chainman,
- CTxMemPool& mempool,
- CConnman& connman,
- BanMan* banman,
- const std::atomic<bool>& interruptMsgProc);
-
namespace {
#ifdef MESSAGE_TYPE
@@ -80,17 +68,16 @@ void test_one_input(const std::vector<uint8_t>& buffer)
return;
}
CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION};
- CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false).release();
+ CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND).release();
p2p_node.fSuccessfullyConnected = true;
p2p_node.nVersion = PROTOCOL_VERSION;
p2p_node.SetSendVersion(PROTOCOL_VERSION);
connman.AddTestNode(p2p_node);
g_setup->m_node.peer_logic->InitializeNode(&p2p_node);
try {
- ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream, GetTime<std::chrono::microseconds>(),
- Params(), *g_setup->m_node.chainman, *g_setup->m_node.mempool,
- *g_setup->m_node.connman, g_setup->m_node.banman.get(),
- std::atomic<bool>{false});
+ g_setup->m_node.peer_logic->ProcessMessage(p2p_node, random_message_type, random_bytes_data_stream,
+ GetTime<std::chrono::microseconds>(), Params(),
+ std::atomic<bool>{false});
} catch (const std::ios_base::failure&) {
}
SyncWithValidationInterfaceQueue();
diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp
index 91ebf9fb1b..ef427442e9 100644
--- a/src/test/fuzz/process_messages.cpp
+++ b/src/test/fuzz/process_messages.cpp
@@ -44,9 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3);
for (int i = 0; i < num_peers_to_add; ++i) {
const ServiceFlags service_flags = ServiceFlags(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
- const bool inbound{fuzzed_data_provider.ConsumeBool()};
- const bool block_relay_only{fuzzed_data_provider.ConsumeBool()};
- peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, inbound, block_relay_only).release());
+ const ConnectionType conn_type = fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH});
+ peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, conn_type).release());
CNode& p2p_node = *peers.back();
p2p_node.fSuccessfullyConnected = true;
diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp
index 85aac6ac7a..4274fa4351 100644
--- a/src/test/fuzz/script.cpp
+++ b/src/test/fuzz/script.cpp
@@ -63,8 +63,6 @@ void test_one_input(const std::vector<uint8_t>& buffer)
int required_ret;
(void)ExtractDestinations(script, type_ret, addresses, required_ret);
- (void)GetScriptForWitness(script);
-
const FlatSigningProvider signing_provider;
(void)InferDescriptor(script, signing_provider);
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 9f9552edb9..ed6093a8a8 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -257,7 +257,7 @@ CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept
CSubNet ConsumeSubNet(FuzzedDataProvider& fuzzed_data_provider) noexcept
{
- return {ConsumeNetAddr(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int32_t>()};
+ return {ConsumeNetAddr(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<uint8_t>()};
}
void InitializeFuzzingContext(const std::string& chain_name = CBaseChainParams::REGTEST)
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index fd35537c77..4e4c44266a 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(key_test1)
for (int n=0; n<16; n++)
{
std::string strMsg = strprintf("Very secret message %i: 11", n);
- uint256 hashMsg = Hash(strMsg.begin(), strMsg.end());
+ uint256 hashMsg = Hash(strMsg);
// normal signatures
@@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(key_test1)
std::vector<unsigned char> detsig, detsigc;
std::string strMsg = "Very deterministic message";
- uint256 hashMsg = Hash(strMsg.begin(), strMsg.end());
+ uint256 hashMsg = Hash(strMsg);
BOOST_CHECK(key1.Sign(hashMsg, detsig));
BOOST_CHECK(key1C.Sign(hashMsg, detsigc));
BOOST_CHECK(detsig == detsigc);
@@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests)
// When entropy is specified, we should see at least one high R signature within 20 signatures
CKey key = DecodeSecret(strSecret1);
std::string msg = "A message to be signed";
- uint256 msg_hash = Hash(msg.begin(), msg.end());
+ uint256 msg_hash = Hash(msg);
std::vector<unsigned char> sig;
bool found = false;
@@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests)
for (int i = 0; i < 256; ++i) {
sig.clear();
std::string msg = "A message to be signed" + ToString(i);
- msg_hash = Hash(msg.begin(), msg.end());
+ msg_hash = Hash(msg);
BOOST_CHECK(key.Sign(msg_hash, sig));
found = sig[3] == 0x20;
BOOST_CHECK(sig.size() <= 70);
@@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation)
std::string str = "Bitcoin key verification\n";
GetRandBytes(rnd, sizeof(rnd));
uint256 hash;
- CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin());
+ CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash);
// import the static test key
CKey key = DecodeSecret(strSecret1C);
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 03dce552fc..9bc7cc5dab 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -13,9 +13,9 @@ static uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vecto
uint256 hash = leaf;
for (std::vector<uint256>::const_iterator it = vMerkleBranch.begin(); it != vMerkleBranch.end(); ++it) {
if (nIndex & 1) {
- hash = Hash(it->begin(), it->end(), hash.begin(), hash.end());
+ hash = Hash(*it, hash);
} else {
- hash = Hash(hash.begin(), hash.end(), it->begin(), it->end());
+ hash = Hash(hash, *it);
}
nIndex >>= 1;
}
@@ -60,7 +60,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
}
}
mutated |= (inner[level] == h);
- CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(inner[level]).Write(h).Finalize(h);
}
// Store the resulting hash at inner position level.
inner[level] = h;
@@ -86,7 +86,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
if (pbranch && matchh) {
pbranch->push_back(h);
}
- CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(h).Write(h).Finalize(h);
// Increment count to the value it would have if two entries at this
// level had existed.
count += (((uint32_t)1) << level);
@@ -101,7 +101,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot
matchh = true;
}
}
- CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin());
+ CHash256().Write(inner[level]).Write(h).Finalize(h);
level++;
}
}
@@ -144,8 +144,7 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve
// Two identical hashes at the end of the list at a particular level.
mutated = true;
}
- vMerkleTree.push_back(Hash(vMerkleTree[j+i].begin(), vMerkleTree[j+i].end(),
- vMerkleTree[j+i2].begin(), vMerkleTree[j+i2].end()));
+ vMerkleTree.push_back(Hash(vMerkleTree[j+i], vMerkleTree[j+i2]));
}
j += nSize;
}
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index ab42be21bd..917ae571f5 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -13,8 +13,10 @@
#include <streams.h>
#include <test/util/setup_common.h>
#include <util/memory.h>
+#include <util/strencodings.h>
#include <util/string.h>
#include <util/system.h>
+#include <version.h>
#include <boost/test/unit_test.hpp>
@@ -180,17 +182,84 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK);
std::string pszDest;
- bool fInboundIn = false;
- // Test that fFeeler is false by default.
- std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, fInboundIn);
- BOOST_CHECK(pnode1->fInbound == false);
- BOOST_CHECK(pnode1->fFeeler == false);
+ std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, ConnectionType::OUTBOUND);
+ BOOST_CHECK(pnode1->IsInboundConn() == false);
- fInboundIn = true;
- std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, fInboundIn);
- BOOST_CHECK(pnode2->fInbound == true);
- BOOST_CHECK(pnode2->fFeeler == false);
+ std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, ConnectionType::INBOUND);
+ BOOST_CHECK(pnode2->IsInboundConn() == true);
+}
+
+BOOST_AUTO_TEST_CASE(cnetaddr_basic)
+{
+ CNetAddr addr;
+
+ // IPv4, INADDR_ANY
+ BOOST_REQUIRE(LookupHost("0.0.0.0", addr, false));
+ BOOST_REQUIRE(!addr.IsValid());
+ BOOST_REQUIRE(addr.IsIPv4());
+
+ BOOST_CHECK(addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "0.0.0.0");
+
+ // IPv4, INADDR_NONE
+ BOOST_REQUIRE(LookupHost("255.255.255.255", addr, false));
+ BOOST_REQUIRE(!addr.IsValid());
+ BOOST_REQUIRE(addr.IsIPv4());
+
+ BOOST_CHECK(!addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "255.255.255.255");
+
+ // IPv4, casual
+ BOOST_REQUIRE(LookupHost("12.34.56.78", addr, false));
+ BOOST_REQUIRE(addr.IsValid());
+ BOOST_REQUIRE(addr.IsIPv4());
+
+ BOOST_CHECK(!addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "12.34.56.78");
+
+ // IPv6, in6addr_any
+ BOOST_REQUIRE(LookupHost("::", addr, false));
+ BOOST_REQUIRE(!addr.IsValid());
+ BOOST_REQUIRE(addr.IsIPv6());
+
+ BOOST_CHECK(addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "::");
+
+ // IPv6, casual
+ BOOST_REQUIRE(LookupHost("1122:3344:5566:7788:9900:aabb:ccdd:eeff", addr, false));
+ BOOST_REQUIRE(addr.IsValid());
+ BOOST_REQUIRE(addr.IsIPv6());
+
+ BOOST_CHECK(!addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "1122:3344:5566:7788:9900:aabb:ccdd:eeff");
+
+ // TORv2
+ addr.SetSpecial("6hzph5hv6337r6p2.onion");
+ BOOST_REQUIRE(addr.IsValid());
+ BOOST_REQUIRE(addr.IsTor());
+
+ BOOST_CHECK(!addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "6hzph5hv6337r6p2.onion");
+
+ // Internal
+ addr.SetInternal("esffpp");
+ BOOST_REQUIRE(!addr.IsValid()); // "internal" is considered invalid
+ BOOST_REQUIRE(addr.IsInternal());
+
+ BOOST_CHECK(!addr.IsBindAny());
+ BOOST_CHECK_EQUAL(addr.ToString(), "esffpvrt3wpeaygy.internal");
+}
+
+BOOST_AUTO_TEST_CASE(cnetaddr_serialize)
+{
+ CNetAddr addr;
+ CDataStream s(SER_NETWORK, PROTOCOL_VERSION);
+
+ addr.SetInternal("a");
+ s << addr;
+ BOOST_CHECK_EQUAL(HexStr(s), "fd6b88c08724ca978112ca1bbdcafac2");
+ s.clear();
}
// prior to PR #14728, this test triggers an undefined behavior
@@ -214,7 +283,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test)
in_addr ipv4AddrPeer;
ipv4AddrPeer.s_addr = 0xa0b0c001;
CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK);
- std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, false);
+ std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND);
pnode->fSuccessfullyConnected.store(true);
// the peer claims to be reaching us via IPv6
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index ea3e633cc2..6681c92bb5 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -138,6 +138,14 @@ BOOST_AUTO_TEST_CASE(onioncat_test)
}
+BOOST_AUTO_TEST_CASE(embedded_test)
+{
+ CNetAddr addr1(ResolveIP("1.2.3.4"));
+ CNetAddr addr2(ResolveIP("::FFFF:0102:0304"));
+ BOOST_CHECK(addr2.IsIPv4());
+ BOOST_CHECK_EQUAL(addr1.ToString(), addr2.ToString());
+}
+
BOOST_AUTO_TEST_CASE(subnet_test)
{
@@ -158,12 +166,13 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_CHECK(ResolveSubNet("1.2.2.1/24").Match(ResolveIP("1.2.2.4")));
BOOST_CHECK(ResolveSubNet("1.2.2.110/31").Match(ResolveIP("1.2.2.111")));
BOOST_CHECK(ResolveSubNet("1.2.2.20/26").Match(ResolveIP("1.2.2.63")));
- // All-Matching IPv6 Matches arbitrary IPv4 and IPv6
+ // All-Matching IPv6 Matches arbitrary IPv6
BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1:2:3:4:5:6:7:1234")));
// But not `::` or `0.0.0.0` because they are considered invalid addresses
BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("::")));
BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("0.0.0.0")));
- BOOST_CHECK(ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4")));
+ // Addresses from one network (IPv4) don't belong to subnets of another network (IPv6)
+ BOOST_CHECK(!ResolveSubNet("::/0").Match(ResolveIP("1.2.3.4")));
// All-Matching IPv4 does not Match IPv6
BOOST_CHECK(!ResolveSubNet("0.0.0.0/0").Match(ResolveIP("1:2:3:4:5:6:7:1234")));
// Invalid subnets Match nothing (not even invalid addresses)
@@ -176,6 +185,7 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_CHECK(!ResolveSubNet("1.2.3.0/-1").IsValid());
BOOST_CHECK(ResolveSubNet("1.2.3.0/32").IsValid());
BOOST_CHECK(!ResolveSubNet("1.2.3.0/33").IsValid());
+ BOOST_CHECK(!ResolveSubNet("1.2.3.0/300").IsValid());
BOOST_CHECK(ResolveSubNet("1:2:3:4:5:6:7:8/0").IsValid());
BOOST_CHECK(ResolveSubNet("1:2:3:4:5:6:7:8/33").IsValid());
BOOST_CHECK(!ResolveSubNet("1:2:3:4:5:6:7:8/-1").IsValid());
@@ -207,6 +217,11 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_CHECK(CSubNet(ResolveIP("1:2:3:4:5:6:7:8")).Match(ResolveIP("1:2:3:4:5:6:7:8")));
BOOST_CHECK(!CSubNet(ResolveIP("1:2:3:4:5:6:7:8")).Match(ResolveIP("1:2:3:4:5:6:7:9")));
BOOST_CHECK(CSubNet(ResolveIP("1:2:3:4:5:6:7:8")).ToString() == "1:2:3:4:5:6:7:8/128");
+ // IPv4 address with IPv6 netmask or the other way around.
+ BOOST_CHECK(!CSubNet(ResolveIP("1.1.1.1"), ResolveIP("ffff::")).IsValid());
+ BOOST_CHECK(!CSubNet(ResolveIP("::1"), ResolveIP("255.0.0.0")).IsValid());
+ // Can't subnet TOR (or any other non-IPv4 and non-IPv6 network).
+ BOOST_CHECK(!CSubNet(ResolveIP("5wyqrzbvrdsumnok.onion"), ResolveIP("255.0.0.0")).IsValid());
subnet = ResolveSubNet("1.2.3.4/255.255.255.255");
BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.4/32");
@@ -281,11 +296,13 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_CHECK_EQUAL(subnet.ToString(), "1::/16");
subnet = ResolveSubNet("1:2:3:4:5:6:7:8/0000:0000:0000:0000:0000:0000:0000:0000");
BOOST_CHECK_EQUAL(subnet.ToString(), "::/0");
+ // Invalid netmasks (with 1-bits after 0-bits)
subnet = ResolveSubNet("1.2.3.4/255.255.232.0");
- BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/255.255.232.0");
+ BOOST_CHECK(!subnet.IsValid());
+ subnet = ResolveSubNet("1.2.3.4/255.0.255.255");
+ BOOST_CHECK(!subnet.IsValid());
subnet = ResolveSubNet("1:2:3:4:5:6:7:8/ffff:ffff:ffff:fffe:ffff:ffff:ffff:ff0f");
- BOOST_CHECK_EQUAL(subnet.ToString(), "1:2:3:4:5:6:7:8/ffff:ffff:ffff:fffe:ffff:ffff:ffff:ff0f");
-
+ BOOST_CHECK(!subnet.IsValid());
}
BOOST_AUTO_TEST_CASE(netbase_getgroup)
@@ -397,13 +414,14 @@ BOOST_AUTO_TEST_CASE(netpermissions_test)
BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, error));
const auto strings = NetPermissions::ToStrings(PF_ALL);
- BOOST_CHECK_EQUAL(strings.size(), 6U);
+ BOOST_CHECK_EQUAL(strings.size(), 7U);
BOOST_CHECK(std::find(strings.begin(), strings.end(), "bloomfilter") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "forcerelay") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "relay") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "noban") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "mempool") != strings.end());
BOOST_CHECK(std::find(strings.begin(), strings.end(), "download") != strings.end());
+ BOOST_CHECK(std::find(strings.begin(), strings.end(), "addr") != strings.end());
}
BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters)
@@ -418,7 +436,8 @@ BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters)
BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0", 11), ret));
BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0example.com", 22), ret));
BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0example.com\0", 23), ret));
- BOOST_CHECK(LookupSubNet(std::string("5wyqrzbvrdsumnok.onion", 22), ret));
+ // We only do subnetting for IPv4 and IPv6
+ BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion", 22), ret));
BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0", 23), ret));
BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0example.com", 34), ret));
BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0example.com\0", 35), ret));
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index 77d748241b..1d6bcadf69 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -216,7 +216,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
s << OP_0 << ToByteVector(pubkey.GetID());
BOOST_CHECK(ExtractDestination(s, address));
WitnessV0KeyHash keyhash;
- CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(keyhash.begin());
+ CHash160().Write(pubkey).Finalize(keyhash);
BOOST_CHECK(boost::get<WitnessV0KeyHash>(&address) && *boost::get<WitnessV0KeyHash>(&address) == keyhash);
// TxoutType::WITNESS_V0_SCRIPTHASH
@@ -349,21 +349,16 @@ BOOST_AUTO_TEST_CASE(script_standard_GetScriptFor_)
result = GetScriptForMultisig(2, std::vector<CPubKey>(pubkeys, pubkeys + 3));
BOOST_CHECK(result == expected);
- // GetScriptForWitness
- CScript witnessScript;
-
- witnessScript << ToByteVector(pubkeys[0]) << OP_CHECKSIG;
+ // WitnessV0KeyHash
expected.clear();
expected << OP_0 << ToByteVector(pubkeys[0].GetID());
- result = GetScriptForWitness(witnessScript);
+ result = GetScriptForDestination(WitnessV0KeyHash(Hash160(ToByteVector(pubkeys[0]))));
BOOST_CHECK(result == expected);
-
- witnessScript.clear();
- witnessScript << OP_DUP << OP_HASH160 << ToByteVector(pubkeys[0].GetID()) << OP_EQUALVERIFY << OP_CHECKSIG;
- result = GetScriptForWitness(witnessScript);
+ result = GetScriptForDestination(WitnessV0KeyHash(pubkeys[0].GetID()));
BOOST_CHECK(result == expected);
- witnessScript.clear();
+ // WitnessV0ScriptHash (multisig)
+ CScript witnessScript;
witnessScript << OP_1 << ToByteVector(pubkeys[0]) << OP_1 << OP_CHECKMULTISIG;
uint256 scriptHash;
@@ -372,7 +367,7 @@ BOOST_AUTO_TEST_CASE(script_standard_GetScriptFor_)
expected.clear();
expected << OP_0 << ToByteVector(scriptHash);
- result = GetScriptForWitness(witnessScript);
+ result = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
BOOST_CHECK(result == expected);
}
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index cb3ae290d1..0830743d61 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -282,7 +282,7 @@ public:
CScript scriptPubKey = script;
if (wm == WitnessMode::PKH) {
uint160 hash;
- CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin());
+ CHash160().Write(MakeSpan(script).subspan(1)).Finalize(hash);
script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG;
scriptPubKey = CScript() << witnessversion << ToByteVector(hash);
} else if (wm == WitnessMode::SH) {
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index c2328f931c..f625b67c2a 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -145,7 +145,7 @@ BOOST_AUTO_TEST_CASE(floats)
for (int i = 0; i < 1000; i++) {
ss << float(i);
}
- BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c"));
+ BOOST_CHECK(Hash(ss) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c"));
// decode
for (int i = 0; i < 1000; i++) {
@@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(doubles)
for (int i = 0; i < 1000; i++) {
ss << double(i);
}
- BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96"));
+ BOOST_CHECK(Hash(ss) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96"));
// decode
for (int i = 0; i < 1000; i++) {
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 1a2d775f49..548fd020a6 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -228,7 +228,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup)
if (OnlyHasDefaultSectionSetting(settings, network, name)) desc += " ignored";
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
@@ -241,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index 6e36bce7a1..7e5274450d 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -154,8 +154,7 @@ BOOST_AUTO_TEST_CASE(GetTxSigOpCost)
// P2WPKH witness program
{
- CScript p2pk = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
- CScript scriptPubKey = GetScriptForWitness(p2pk);
+ CScript scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(pubkey));
CScript scriptSig = CScript();
CScriptWitness scriptWitness;
scriptWitness.stack.push_back(std::vector<unsigned char>(0));
@@ -183,8 +182,7 @@ BOOST_AUTO_TEST_CASE(GetTxSigOpCost)
// P2WPKH nested in P2SH
{
- CScript p2pk = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
- CScript scriptSig = GetScriptForWitness(p2pk);
+ CScript scriptSig = GetScriptForDestination(WitnessV0KeyHash(pubkey));
CScript scriptPubKey = GetScriptForDestination(ScriptHash(scriptSig));
scriptSig = CScript() << ToByteVector(scriptSig);
CScriptWitness scriptWitness;
@@ -199,7 +197,7 @@ BOOST_AUTO_TEST_CASE(GetTxSigOpCost)
// P2WSH witness program
{
CScript witnessScript = CScript() << 1 << ToByteVector(pubkey) << ToByteVector(pubkey) << 2 << OP_CHECKMULTISIGVERIFY;
- CScript scriptPubKey = GetScriptForWitness(witnessScript);
+ CScript scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
CScript scriptSig = CScript();
CScriptWitness scriptWitness;
scriptWitness.stack.push_back(std::vector<unsigned char>(0));
@@ -215,7 +213,7 @@ BOOST_AUTO_TEST_CASE(GetTxSigOpCost)
// P2WSH nested in P2SH
{
CScript witnessScript = CScript() << 1 << ToByteVector(pubkey) << ToByteVector(pubkey) << 2 << OP_CHECKMULTISIGVERIFY;
- CScript redeemScript = GetScriptForWitness(witnessScript);
+ CScript redeemScript = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
CScript scriptPubKey = GetScriptForDestination(ScriptHash(redeemScript));
CScript scriptSig = CScript() << ToByteVector(redeemScript);
CScriptWitness scriptWitness;
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 3ea8714f3a..19029ebd3c 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
{
LOCK2(mutex1, mutex2);
}
+ BOOST_CHECK(LockStackEmpty());
bool error_thrown = false;
try {
LOCK2(mutex2, mutex1);
@@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1");
error_thrown = true;
}
+ BOOST_CHECK(LockStackEmpty());
#ifdef DEBUG_LOCKORDER
BOOST_CHECK(error_thrown);
#else
@@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
TestPotentialDeadLockDetected(mutex1, mutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(mutex1, mutex2);
#ifdef DEBUG_LOCKORDER
g_debug_lockorder_abort = prev;
diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp
new file mode 100644
index 0000000000..a55145c738
--- /dev/null
+++ b/src/test/system_tests.cpp
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <test/util/setup_common.h>
+#include <util/system.h>
+#include <univalue.h>
+
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup)
+
+// At least one test is required (in case HAVE_BOOST_PROCESS is not defined).
+// Workaround for https://github.com/bitcoin/bitcoin/issues/19128
+BOOST_AUTO_TEST_CASE(dummy)
+{
+ BOOST_CHECK(true);
+}
+
+#ifdef HAVE_BOOST_PROCESS
+
+bool checkMessage(const std::runtime_error& ex)
+{
+ // On Linux & Mac: "No such file or directory"
+ // On Windows: "The system cannot find the file specified."
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("file") != std::string::npos);
+ return true;
+}
+
+bool checkMessageFalse(const std::runtime_error& ex)
+{
+ BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n"));
+ return true;
+}
+
+bool checkMessageStdErr(const std::runtime_error& ex)
+{
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos);
+ return checkMessage(ex);
+}
+
+BOOST_AUTO_TEST_CASE(run_command)
+{
+ {
+ const UniValue result = RunCommandParseJSON("");
+ BOOST_CHECK(result.isNull());
+ }
+ {
+#ifdef WIN32
+ // Windows requires single quotes to prevent escaping double quotes from the JSON...
+ const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'");
+#else
+ // ... but Linux and macOS echo a single quote if it's used
+ const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\"");
+#endif
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+ {
+ // An invalid command is handled by Boost
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed
+ }
+ {
+ // Return non-zero exit code, no output to stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse);
+ }
+ {
+ // Return non-zero exit code, with error message for stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr);
+ }
+ {
+ BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON
+ }
+ // Test std::in, except for Windows
+#ifndef WIN32
+ {
+ const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}");
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+#endif
+}
+#endif // HAVE_BOOST_PROCESS
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 4bf6e734ce..94b5dba913 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -361,6 +361,8 @@ static CScript PushAll(const std::vector<valtype>& values)
result << OP_0;
} else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) {
result << CScript::EncodeOP_N(v[0]);
+ } else if (v.size() == 1 && v[0] == 0x81) {
+ result << OP_1NEGATE;
} else {
result << v;
}
@@ -499,13 +501,19 @@ BOOST_AUTO_TEST_CASE(test_witness)
BOOST_CHECK(keystore.AddCScript(scriptPubkey1L));
BOOST_CHECK(keystore.AddCScript(scriptPubkey2L));
BOOST_CHECK(keystore.AddCScript(scriptMulti));
- BOOST_CHECK(keystore.AddCScript(GetScriptForWitness(scriptPubkey1)));
- BOOST_CHECK(keystore.AddCScript(GetScriptForWitness(scriptPubkey2)));
- BOOST_CHECK(keystore.AddCScript(GetScriptForWitness(scriptPubkey1L)));
- BOOST_CHECK(keystore.AddCScript(GetScriptForWitness(scriptPubkey2L)));
- BOOST_CHECK(keystore.AddCScript(GetScriptForWitness(scriptMulti)));
+ CScript destination_script_1, destination_script_2, destination_script_1L, destination_script_2L, destination_script_multi;
+ destination_script_1 = GetScriptForDestination(WitnessV0KeyHash(pubkey1));
+ destination_script_2 = GetScriptForDestination(WitnessV0KeyHash(pubkey2));
+ destination_script_1L = GetScriptForDestination(WitnessV0KeyHash(pubkey1L));
+ destination_script_2L = GetScriptForDestination(WitnessV0KeyHash(pubkey2L));
+ destination_script_multi = GetScriptForDestination(WitnessV0ScriptHash(scriptMulti));
+ BOOST_CHECK(keystore.AddCScript(destination_script_1));
+ BOOST_CHECK(keystore.AddCScript(destination_script_2));
+ BOOST_CHECK(keystore.AddCScript(destination_script_1L));
+ BOOST_CHECK(keystore.AddCScript(destination_script_2L));
+ BOOST_CHECK(keystore.AddCScript(destination_script_multi));
BOOST_CHECK(keystore2.AddCScript(scriptMulti));
- BOOST_CHECK(keystore2.AddCScript(GetScriptForWitness(scriptMulti)));
+ BOOST_CHECK(keystore2.AddCScript(destination_script_multi));
BOOST_CHECK(keystore2.AddKeyPubKey(key3, pubkey3));
CTransactionRef output1, output2;
@@ -537,8 +545,8 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
// Witness pay-to-compressed-pubkey (v0).
- CreateCreditAndSpend(keystore, GetScriptForWitness(scriptPubkey1), output1, input1);
- CreateCreditAndSpend(keystore, GetScriptForWitness(scriptPubkey2), output2, input2);
+ CreateCreditAndSpend(keystore, destination_script_1, output1, input1);
+ CreateCreditAndSpend(keystore, destination_script_2, output2, input2);
CheckWithFlag(output1, input1, 0, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
@@ -549,9 +557,9 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
// P2SH witness pay-to-compressed-pubkey (v0).
- CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptPubkey1))), output1, input1);
- CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptPubkey2))), output2, input2);
- ReplaceRedeemScript(input2.vin[0].scriptSig, GetScriptForWitness(scriptPubkey1));
+ CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_1)), output1, input1);
+ CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_2)), output2, input2);
+ ReplaceRedeemScript(input2.vin[0].scriptSig, destination_script_1);
CheckWithFlag(output1, input1, 0, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
@@ -587,12 +595,12 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
// Signing disabled for witness pay-to-uncompressed-pubkey (v1).
- CreateCreditAndSpend(keystore, GetScriptForWitness(scriptPubkey1L), output1, input1, false);
- CreateCreditAndSpend(keystore, GetScriptForWitness(scriptPubkey2L), output2, input2, false);
+ CreateCreditAndSpend(keystore, destination_script_1L, output1, input1, false);
+ CreateCreditAndSpend(keystore, destination_script_2L, output2, input2, false);
// Signing disabled for P2SH witness pay-to-uncompressed-pubkey (v1).
- CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptPubkey1L))), output1, input1, false);
- CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptPubkey2L))), output2, input2, false);
+ CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_1L)), output1, input1, false);
+ CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_2L)), output2, input2, false);
// Normal 2-of-2 multisig
CreateCreditAndSpend(keystore, scriptMulti, output1, input1, false);
@@ -616,10 +624,10 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// Witness 2-of-2 multisig
- CreateCreditAndSpend(keystore, GetScriptForWitness(scriptMulti), output1, input1, false);
+ CreateCreditAndSpend(keystore, destination_script_multi, output1, input1, false);
CheckWithFlag(output1, input1, 0, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
- CreateCreditAndSpend(keystore2, GetScriptForWitness(scriptMulti), output2, input2, false);
+ CreateCreditAndSpend(keystore2, destination_script_multi, output2, input2, false);
CheckWithFlag(output2, input2, 0, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
@@ -628,10 +636,10 @@ BOOST_AUTO_TEST_CASE(test_witness)
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// P2SH witness 2-of-2 multisig
- CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptMulti))), output1, input1, false);
+ CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_multi)), output1, input1, false);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
- CreateCreditAndSpend(keystore2, GetScriptForDestination(ScriptHash(GetScriptForWitness(scriptMulti))), output2, input2, false);
+ CreateCreditAndSpend(keystore2, GetScriptForDestination(ScriptHash(destination_script_multi)), output2, input2, false);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index cdef7dcc3c..034577aa2c 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -157,7 +157,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
CScript p2pk_scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
CScript p2sh_scriptPubKey = GetScriptForDestination(ScriptHash(p2pk_scriptPubKey));
CScript p2pkh_scriptPubKey = GetScriptForDestination(PKHash(coinbaseKey.GetPubKey()));
- CScript p2wpkh_scriptPubKey = GetScriptForWitness(p2pkh_scriptPubKey);
+ CScript p2wpkh_scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(coinbaseKey.GetPubKey()));
FillableSigningProvider keystore;
BOOST_CHECK(keystore.AddKey(coinbaseKey));
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 24c0d6382b..d9a00c2205 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -11,6 +11,7 @@
#include <consensus/validation.h>
#include <crypto/sha256.h>
#include <init.h>
+#include <interfaces/chain.h>
#include <miner.h>
#include <net.h>
#include <net_processing.h>
@@ -32,6 +33,7 @@
#include <util/vector.h>
#include <validation.h>
#include <validationinterface.h>
+#include <walletinitinterface.h>
#include <functional>
@@ -95,8 +97,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
SelectParams(chainName);
SeedInsecureRand();
if (G_TEST_LOG_FUN) LogInstance().PushBackCallback(G_TEST_LOG_FUN);
- InitLogging();
- AppInitParameterInteraction();
+ InitLogging(*m_node.args);
+ AppInitParameterInteraction(*m_node.args);
LogInstance().StartLogging();
SHA256AutoDetect();
ECC_Start();
@@ -104,6 +106,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
SetupNetworking();
InitSignatureCache();
InitScriptExecutionCache();
+ m_node.chain = interfaces::MakeChain(m_node);
+ g_wallet_init_interface.Construct(m_node);
fCheckBlockIndex = true;
static bool noui_connected = false;
if (!noui_connected) {
@@ -142,7 +146,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
::ChainstateActive().InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
assert(!::ChainstateActive().CanFlushToDisk());
- ::ChainstateActive().InitCoinsCache();
+ ::ChainstateActive().InitCoinsCache(1 << 23);
assert(::ChainstateActive().CanFlushToDisk());
if (!LoadGenesisBlock(chainparams)) {
throw std::runtime_error("LoadGenesisBlock failed.");
@@ -164,7 +168,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
m_node.mempool->setSanityCheck(1.0);
m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
- m_node.peer_logic = MakeUnique<PeerLogicValidation>(m_node.connman.get(), m_node.banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ m_node.peer_logic = MakeUnique<PeerLogicValidation>(*m_node.connman, m_node.banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
{
CConnman::Options options;
options.m_msgproc = m_node.peer_logic.get();
@@ -182,9 +186,9 @@ TestingSetup::~TestingSetup()
m_node.connman.reset();
m_node.banman.reset();
m_node.args = nullptr;
+ UnloadBlockIndex(m_node.mempool);
m_node.mempool = nullptr;
m_node.scheduler.reset();
- UnloadBlockIndex();
m_node.chainman->Reset();
m_node.chainman = nullptr;
pblocktree.reset();
@@ -192,49 +196,34 @@ TestingSetup::~TestingSetup()
TestChain100Setup::TestChain100Setup()
{
- // CreateAndProcessBlock() does not support building SegWit blocks, so don't activate in these tests.
- // TODO: fix the code to support SegWit blocks.
- gArgs.ForceSetArg("-segwitheight", "432");
- // Need to recreate chainparams
- SelectParams(CBaseChainParams::REGTEST);
-
// Generate a 100-block chain:
coinbaseKey.MakeNewKey(true);
- CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
- for (int i = 0; i < COINBASE_MATURITY; i++)
- {
+ CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
+ for (int i = 0; i < COINBASE_MATURITY; i++) {
std::vector<CMutableTransaction> noTxns;
CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey);
m_coinbase_txns.push_back(b.vtx[0]);
}
}
-// Create a new block with just given transactions, coinbase paying to
-// scriptPubKey, and try to add it to the current chain.
CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey)
{
const CChainParams& chainparams = Params();
- std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(*m_node.mempool, chainparams).CreateNewBlock(scriptPubKey);
- CBlock& block = pblocktemplate->block;
+ CTxMemPool empty_pool;
+ CBlock block = BlockAssembler(empty_pool, chainparams).CreateNewBlock(scriptPubKey)->block;
- // Replace mempool-selected txns with just coinbase plus passed-in txns:
- block.vtx.resize(1);
- for (const CMutableTransaction& tx : txns)
+ Assert(block.vtx.size() == 1);
+ for (const CMutableTransaction& tx : txns) {
block.vtx.push_back(MakeTransactionRef(tx));
- // IncrementExtraNonce creates a valid coinbase and merkleRoot
- {
- LOCK(cs_main);
- unsigned int extraNonce = 0;
- IncrementExtraNonce(&block, ::ChainActive().Tip(), extraNonce);
}
+ RegenerateCommitments(block);
while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce;
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
Assert(m_node.chainman)->ProcessNewBlock(chainparams, shared_pblock, true, nullptr);
- CBlock result = block;
- return result;
+ return block;
}
TestChain100Setup::~TestChain100Setup()
@@ -242,8 +231,8 @@ TestChain100Setup::~TestChain100Setup()
gArgs.ForceSetArg("-segwitheight", "0");
}
-
-CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx) {
+CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction& tx)
+{
return FromTx(MakeTransactionRef(tx));
}
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 78b279e42a..22f5d6d936 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -102,15 +102,16 @@ class CBlock;
struct CMutableTransaction;
class CScript;
-//
-// Testing fixture that pre-creates a
-// 100-block REGTEST-mode block chain
-//
+/**
+ * Testing fixture that pre-creates a 100-block REGTEST-mode block chain
+ */
struct TestChain100Setup : public RegTestingSetup {
TestChain100Setup();
- // Create a new block with just given transactions, coinbase paying to
- // scriptPubKey, and try to add it to the current chain.
+ /**
+ * Create a new block with just given transactions, coinbase paying to
+ * scriptPubKey, and try to add it to the current chain.
+ */
CBlock CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns,
const CScript& scriptPubKey);
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index a30e366028..bf7c6c3e3e 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -105,47 +105,24 @@ BOOST_AUTO_TEST_CASE(util_ParseHex)
BOOST_AUTO_TEST_CASE(util_HexStr)
{
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected)),
+ HexStr(ParseHex_expected),
"04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f");
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected + sizeof(ParseHex_expected),
- ParseHex_expected + sizeof(ParseHex_expected)),
+ HexStr(Span<const unsigned char>(
+ ParseHex_expected + sizeof(ParseHex_expected),
+ ParseHex_expected + sizeof(ParseHex_expected))),
"");
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected, ParseHex_expected),
+ HexStr(Span<const unsigned char>(ParseHex_expected, ParseHex_expected)),
"");
std::vector<unsigned char> ParseHex_vec(ParseHex_expected, ParseHex_expected + 5);
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_vec.rbegin(), ParseHex_vec.rend()),
- "b0fd8a6704"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- ""
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 1),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "04"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 5),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "b0fd8a6704"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 65),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "5f1df16b2b704c8a578d0bbaf74d385cde12c11ee50455f3c438ef4c3fbcf649b6de611feae06279a60939e028a8d65c10b73071a6f16719274855feb0fd8a6704"
+ HexStr(ParseHex_vec),
+ "04678afdb0"
);
}
@@ -573,57 +550,52 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream)
BOOST_CHECK(test_args.m_settings.ro_config["sec1"].size() == 3);
BOOST_CHECK(test_args.m_settings.ro_config["sec2"].size() == 2);
- BOOST_CHECK(test_args.m_settings.ro_config[""].count("a")
- && test_args.m_settings.ro_config[""].count("b")
- && test_args.m_settings.ro_config[""].count("ccc")
- && test_args.m_settings.ro_config[""].count("d")
- && test_args.m_settings.ro_config[""].count("fff")
- && test_args.m_settings.ro_config[""].count("ggg")
- && test_args.m_settings.ro_config[""].count("h")
- && test_args.m_settings.ro_config[""].count("i")
- );
- BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("ccc")
- && test_args.m_settings.ro_config["sec1"].count("h")
- && test_args.m_settings.ro_config["sec2"].count("ccc")
- && test_args.m_settings.ro_config["sec2"].count("iii")
- );
-
- BOOST_CHECK(test_args.IsArgSet("-a")
- && test_args.IsArgSet("-b")
- && test_args.IsArgSet("-ccc")
- && test_args.IsArgSet("-d")
- && test_args.IsArgSet("-fff")
- && test_args.IsArgSet("-ggg")
- && test_args.IsArgSet("-h")
- && test_args.IsArgSet("-i")
- && !test_args.IsArgSet("-zzz")
- && !test_args.IsArgSet("-iii")
- );
-
- BOOST_CHECK(test_args.GetArg("-a", "xxx") == ""
- && test_args.GetArg("-b", "xxx") == "1"
- && test_args.GetArg("-ccc", "xxx") == "argument"
- && test_args.GetArg("-d", "xxx") == "e"
- && test_args.GetArg("-fff", "xxx") == "0"
- && test_args.GetArg("-ggg", "xxx") == "1"
- && test_args.GetArg("-h", "xxx") == "0"
- && test_args.GetArg("-i", "xxx") == "1"
- && test_args.GetArg("-zzz", "xxx") == "xxx"
- && test_args.GetArg("-iii", "xxx") == "xxx"
- );
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("a"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("b"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("ccc"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("d"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("fff"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("ggg"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("h"));
+ BOOST_CHECK(test_args.m_settings.ro_config[""].count("i"));
+ BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("ccc"));
+ BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("h"));
+ BOOST_CHECK(test_args.m_settings.ro_config["sec2"].count("ccc"));
+ BOOST_CHECK(test_args.m_settings.ro_config["sec2"].count("iii"));
+
+ BOOST_CHECK(test_args.IsArgSet("-a"));
+ BOOST_CHECK(test_args.IsArgSet("-b"));
+ BOOST_CHECK(test_args.IsArgSet("-ccc"));
+ BOOST_CHECK(test_args.IsArgSet("-d"));
+ BOOST_CHECK(test_args.IsArgSet("-fff"));
+ BOOST_CHECK(test_args.IsArgSet("-ggg"));
+ BOOST_CHECK(test_args.IsArgSet("-h"));
+ BOOST_CHECK(test_args.IsArgSet("-i"));
+ BOOST_CHECK(!test_args.IsArgSet("-zzz"));
+ BOOST_CHECK(!test_args.IsArgSet("-iii"));
+
+ BOOST_CHECK_EQUAL(test_args.GetArg("-a", "xxx"), "");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-b", "xxx"), "1");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-ccc", "xxx"), "argument");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-d", "xxx"), "e");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-fff", "xxx"), "0");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-ggg", "xxx"), "1");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-h", "xxx"), "0");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-i", "xxx"), "1");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-zzz", "xxx"), "xxx");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-iii", "xxx"), "xxx");
for (const bool def : {false, true}) {
- BOOST_CHECK(test_args.GetBoolArg("-a", def)
- && test_args.GetBoolArg("-b", def)
- && !test_args.GetBoolArg("-ccc", def)
- && !test_args.GetBoolArg("-d", def)
- && !test_args.GetBoolArg("-fff", def)
- && test_args.GetBoolArg("-ggg", def)
- && !test_args.GetBoolArg("-h", def)
- && test_args.GetBoolArg("-i", def)
- && test_args.GetBoolArg("-zzz", def) == def
- && test_args.GetBoolArg("-iii", def) == def
- );
+ BOOST_CHECK(test_args.GetBoolArg("-a", def));
+ BOOST_CHECK(test_args.GetBoolArg("-b", def));
+ BOOST_CHECK(!test_args.GetBoolArg("-ccc", def));
+ BOOST_CHECK(!test_args.GetBoolArg("-d", def));
+ BOOST_CHECK(!test_args.GetBoolArg("-fff", def));
+ BOOST_CHECK(test_args.GetBoolArg("-ggg", def));
+ BOOST_CHECK(!test_args.GetBoolArg("-h", def));
+ BOOST_CHECK(test_args.GetBoolArg("-i", def));
+ BOOST_CHECK(test_args.GetBoolArg("-zzz", def) == def);
+ BOOST_CHECK(test_args.GetBoolArg("-iii", def) == def);
}
BOOST_CHECK(test_args.GetArgs("-a").size() == 1
@@ -659,13 +631,12 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream)
test_args.SelectConfigNetwork("sec1");
// same as original
- BOOST_CHECK(test_args.GetArg("-a", "xxx") == ""
- && test_args.GetArg("-b", "xxx") == "1"
- && test_args.GetArg("-fff", "xxx") == "0"
- && test_args.GetArg("-ggg", "xxx") == "1"
- && test_args.GetArg("-zzz", "xxx") == "xxx"
- && test_args.GetArg("-iii", "xxx") == "xxx"
- );
+ BOOST_CHECK_EQUAL(test_args.GetArg("-a", "xxx"), "");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-b", "xxx"), "1");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-fff", "xxx"), "0");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-ggg", "xxx"), "1");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-zzz", "xxx"), "xxx");
+ BOOST_CHECK_EQUAL(test_args.GetArg("-iii", "xxx"), "xxx");
// d is overridden
BOOST_CHECK(test_args.GetArg("-d", "xxx") == "eee");
// section-specific setting
@@ -680,14 +651,13 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream)
test_args.SelectConfigNetwork("sec2");
// same as original
- BOOST_CHECK(test_args.GetArg("-a", "xxx") == ""
- && test_args.GetArg("-b", "xxx") == "1"
- && test_args.GetArg("-d", "xxx") == "e"
- && test_args.GetArg("-fff", "xxx") == "0"
- && test_args.GetArg("-ggg", "xxx") == "1"
- && test_args.GetArg("-zzz", "xxx") == "xxx"
- && test_args.GetArg("-h", "xxx") == "0"
- );
+ BOOST_CHECK(test_args.GetArg("-a", "xxx") == "");
+ BOOST_CHECK(test_args.GetArg("-b", "xxx") == "1");
+ BOOST_CHECK(test_args.GetArg("-d", "xxx") == "e");
+ BOOST_CHECK(test_args.GetArg("-fff", "xxx") == "0");
+ BOOST_CHECK(test_args.GetArg("-ggg", "xxx") == "1");
+ BOOST_CHECK(test_args.GetArg("-zzz", "xxx") == "xxx");
+ BOOST_CHECK(test_args.GetArg("-h", "xxx") == "0");
// section-specific setting
BOOST_CHECK(test_args.GetArg("-iii", "xxx") == "2");
// section takes priority for multiple values
@@ -1009,7 +979,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
@@ -1022,7 +992,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
@@ -1112,7 +1082,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup)
}
desc += "\n";
- out_sha.Write((const unsigned char*)desc.data(), desc.size());
+ out_sha.Write(MakeUCharSpan(desc));
if (out_file) {
BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size());
}
@@ -1125,7 +1095,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
@@ -2186,8 +2156,8 @@ BOOST_AUTO_TEST_CASE(message_hash)
std::string(1, (char)unsigned_tx.length()) +
unsigned_tx;
- const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end());
- const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end());
+ const uint256 signature_hash = Hash(unsigned_tx);
+ const uint256 message_hash1 = Hash(prefixed_message);
const uint256 message_hash2 = MessageHash(unsigned_tx);
BOOST_CHECK_EQUAL(message_hash1, message_hash2);
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
new file mode 100644
index 0000000000..2076a1096a
--- /dev/null
+++ b/src/test/validation_chainstate_tests.cpp
@@ -0,0 +1,74 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <random.h>
+#include <uint256.h>
+#include <consensus/validation.h>
+#include <sync.h>
+#include <test/util/setup_common.h>
+#include <validation.h>
+
+#include <vector>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(validation_chainstate_tests, TestingSetup)
+
+//! Test resizing coins-related CChainState caches during runtime.
+//!
+BOOST_AUTO_TEST_CASE(validation_chainstate_resize_caches)
+{
+ ChainstateManager manager;
+
+ //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
+ auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
+ Coin newcoin;
+ uint256 txid = InsecureRand256();
+ COutPoint outp{txid, 0};
+ newcoin.nHeight = 1;
+ newcoin.out.nValue = InsecureRand32();
+ newcoin.out.scriptPubKey.assign((uint32_t)56, 1);
+ coins_view.AddCoin(outp, std::move(newcoin), false);
+
+ return outp;
+ };
+
+ CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate());
+ c1.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+ WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23));
+
+ // Add a coin to the in-memory cache, upsize once, then downsize.
+ {
+ LOCK(::cs_main);
+ auto outpoint = add_coin(c1.CoinsTip());
+
+ // Set a meaningless bestblock value in the coinsview cache - otherwise we won't
+ // flush during ResizecoinsCaches() and will subsequently hit an assertion.
+ c1.CoinsTip().SetBestBlock(InsecureRand256());
+
+ BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint));
+
+ c1.ResizeCoinsCaches(
+ 1 << 24, // upsizing the coinsview cache
+ 1 << 22 // downsizing the coinsdb cache
+ );
+
+ // View should still have the coin cached, since we haven't destructed the cache on upsize.
+ BOOST_CHECK(c1.CoinsTip().HaveCoinInCache(outpoint));
+
+ c1.ResizeCoinsCaches(
+ 1 << 22, // downsizing the coinsview cache
+ 1 << 23 // upsizing the coinsdb cache
+ );
+
+ // The view cache should be empty since we had to destruct to downsize.
+ BOOST_CHECK(!c1.CoinsTip().HaveCoinInCache(outpoint));
+ }
+
+ // Avoid triggering the address sanitizer.
+ WITH_LOCK(::cs_main, manager.Unload());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 0d149285ad..887a48124f 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -28,13 +28,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
// Create a legacy (IBD) chainstate.
//
- ENTER_CRITICAL_SECTION(cs_main);
- CChainState& c1 = manager.InitializeChainstate();
- LEAVE_CRITICAL_SECTION(cs_main);
+ CChainState& c1 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate());
chainstates.push_back(&c1);
c1.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
- WITH_LOCK(::cs_main, c1.InitCoinsCache());
+ WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23));
BOOST_CHECK(!manager.IsSnapshotActive());
BOOST_CHECK(!manager.IsSnapshotValidated());
@@ -56,13 +54,11 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
// Create a snapshot-based chainstate.
//
- ENTER_CRITICAL_SECTION(cs_main);
- CChainState& c2 = manager.InitializeChainstate(GetRandHash());
- LEAVE_CRITICAL_SECTION(cs_main);
+ CChainState& c2 = *WITH_LOCK(::cs_main, return &manager.InitializeChainstate(GetRandHash()));
chainstates.push_back(&c2);
c2.InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
- WITH_LOCK(::cs_main, c2.InitCoinsCache());
+ WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23));
// Unlike c1, which doesn't have any blocks. Gets us different tip, height.
c2.LoadGenesisBlock(chainparams);
BlockValidationState _;
@@ -104,4 +100,54 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
WITH_LOCK(::cs_main, manager.Unload());
}
+//! Test rebalancing the caches associated with each chainstate.
+BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
+{
+ ChainstateManager manager;
+ size_t max_cache = 10000;
+ manager.m_total_coinsdb_cache = max_cache;
+ manager.m_total_coinstip_cache = max_cache;
+
+ std::vector<CChainState*> chainstates;
+
+ // Create a legacy (IBD) chainstate.
+ //
+ CChainState& c1 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate());
+ chainstates.push_back(&c1);
+ c1.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+
+ {
+ LOCK(::cs_main);
+ c1.InitCoinsCache(1 << 23);
+ c1.CoinsTip().SetBestBlock(InsecureRand256());
+ manager.MaybeRebalanceCaches();
+ }
+
+ BOOST_CHECK_EQUAL(c1.m_coinstip_cache_size_bytes, max_cache);
+ BOOST_CHECK_EQUAL(c1.m_coinsdb_cache_size_bytes, max_cache);
+
+ // Create a snapshot-based chainstate.
+ //
+ CChainState& c2 = *WITH_LOCK(cs_main, return &manager.InitializeChainstate(GetRandHash()));
+ chainstates.push_back(&c2);
+ c2.InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+
+ {
+ LOCK(::cs_main);
+ c2.InitCoinsCache(1 << 23);
+ c2.CoinsTip().SetBestBlock(InsecureRand256());
+ manager.MaybeRebalanceCaches();
+ }
+
+ // Since both chainstates are considered to be in initial block download,
+ // the snapshot chainstate should take priority.
+ BOOST_CHECK_CLOSE(c1.m_coinstip_cache_size_bytes, max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(c1.m_coinsdb_cache_size_bytes, max_cache * 0.05, 1);
+ BOOST_CHECK_CLOSE(c2.m_coinstip_cache_size_bytes, max_cache * 0.95, 1);
+ BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1);
+
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index a863e3a4d5..8bac914f05 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -21,7 +21,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
BlockManager blockman{};
CChainState chainstate{blockman};
chainstate.InitCoinsDB(/*cache_size_bytes*/ 1 << 10, /*in_memory*/ true, /*should_wipe*/ false);
- WITH_LOCK(::cs_main, chainstate.InitCoinsCache());
+ WITH_LOCK(::cs_main, chainstate.InitCoinsCache(1 << 10));
CTxMemPool tx_pool{};
constexpr bool is_64_bit = sizeof(void*) == 8;
@@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// Without any coins in the cache, we shouldn't need to flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
// If the initial memory allocations of cacheCoins don't match these common
@@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
BOOST_TEST_MESSAGE("Exiting cache flush tests early due to unsupported arch");
@@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::OK);
}
@@ -100,26 +100,26 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 4; ++i) {
add_coin(view);
print_view_mem_usage(view);
- if (chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
+ if (chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
CoinsCacheSizeState::CRITICAL) {
break;
}
}
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
CoinsCacheSizeState::CRITICAL);
// Passing non-zero max mempool usage should allow us more headroom.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
for (int i{0}; i < 3; ++i) {
add_coin(view);
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
CoinsCacheSizeState::OK);
}
@@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
BOOST_CHECK(usage_percentage >= 0.9);
BOOST_CHECK(usage_percentage < 1);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
CoinsCacheSizeState::LARGE);
}
@@ -143,7 +143,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
for (int i{0}; i < 1000; ++i) {
add_coin(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool),
+ chainstate.GetCoinsCacheSizeState(&tx_pool),
CoinsCacheSizeState::OK);
}
@@ -151,7 +151,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// preallocated memory that doesn't get reclaimed even after flush.
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
view.SetBestBlock(InsecureRand256());
@@ -159,7 +159,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
print_view_mem_usage(view);
BOOST_CHECK_EQUAL(
- chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ chainstate.GetCoinsCacheSizeState(&tx_pool, MAX_COINS_CACHE_BYTES, 0),
CoinsCacheSizeState::CRITICAL);
}
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index 84118b36ef..5d56d1ff89 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -405,7 +405,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data)
/****** Bitcoin specific TorController implementation ********/
/** Controller that connects to Tor control socket, authenticate, then create
- * and maintain an ephemeral hidden service.
+ * and maintain an ephemeral onion service.
*/
class TorController
{
@@ -534,7 +534,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
// Finally - now create the service
if (private_key.empty()) // No private key, generate one
private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214
- // Request hidden service, redirect port.
+ // Request onion service, redirect port.
// Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports.
_conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, Params().GetDefaultPort(), GetListenPort()),
std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2));
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 047560f45d..72460e7c69 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -10,6 +10,7 @@
#include <random.h>
#include <shutdown.h>
#include <uint256.h>
+#include <util/memory.h>
#include <util/system.h>
#include <util/translation.h>
#include <util/vector.h>
@@ -39,35 +40,45 @@ struct CoinEntry {
}
-CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) : db(ldb_path, nCacheSize, fMemory, fWipe, true)
+CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, bool fWipe) :
+ m_db(MakeUnique<CDBWrapper>(ldb_path, nCacheSize, fMemory, fWipe, true)),
+ m_ldb_path(ldb_path),
+ m_is_memory(fMemory) { }
+
+void CCoinsViewDB::ResizeCache(size_t new_cache_size)
{
+ // Have to do a reset first to get the original `m_db` state to release its
+ // filesystem lock.
+ m_db.reset();
+ m_db = MakeUnique<CDBWrapper>(
+ m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true);
}
bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const {
- return db.Read(CoinEntry(&outpoint), coin);
+ return m_db->Read(CoinEntry(&outpoint), coin);
}
bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const {
- return db.Exists(CoinEntry(&outpoint));
+ return m_db->Exists(CoinEntry(&outpoint));
}
uint256 CCoinsViewDB::GetBestBlock() const {
uint256 hashBestChain;
- if (!db.Read(DB_BEST_BLOCK, hashBestChain))
+ if (!m_db->Read(DB_BEST_BLOCK, hashBestChain))
return uint256();
return hashBestChain;
}
std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
std::vector<uint256> vhashHeadBlocks;
- if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
+ if (!m_db->Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
return std::vector<uint256>();
}
return vhashHeadBlocks;
}
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
- CDBBatch batch(db);
+ CDBBatch batch(*m_db);
size_t count = 0;
size_t changed = 0;
size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize);
@@ -105,7 +116,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
mapCoins.erase(itOld);
if (batch.SizeEstimate() > batch_size) {
LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
- db.WriteBatch(batch);
+ m_db->WriteBatch(batch);
batch.Clear();
if (crash_simulate) {
static FastRandomContext rng;
@@ -122,14 +133,14 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
batch.Write(DB_BEST_BLOCK, hashBlock);
LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
- bool ret = db.WriteBatch(batch);
+ bool ret = m_db->WriteBatch(batch);
LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return ret;
}
size_t CCoinsViewDB::EstimateSize() const
{
- return db.EstimateSize(DB_COIN, (char)(DB_COIN+1));
+ return m_db->EstimateSize(DB_COIN, (char)(DB_COIN+1));
}
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {
@@ -156,7 +167,7 @@ bool CBlockTreeDB::ReadLastBlockFile(int &nFile) {
CCoinsViewCursor *CCoinsViewDB::Cursor() const
{
- CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(db).NewIterator(), GetBestBlock());
+ CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock());
/* It seems that there are no "const iterators" for LevelDB. Since we
only need read operations on it, use a const-cast to get around
that restriction. */
@@ -335,7 +346,7 @@ public:
* Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout.
*/
bool CCoinsViewDB::Upgrade() {
- std::unique_ptr<CDBIterator> pcursor(db.NewIterator());
+ std::unique_ptr<CDBIterator> pcursor(m_db->NewIterator());
pcursor->Seek(std::make_pair(DB_COINS, uint256()));
if (!pcursor->Valid()) {
return true;
@@ -346,7 +357,7 @@ bool CCoinsViewDB::Upgrade() {
LogPrintf("[0%%]..."); /* Continued */
uiInterface.ShowProgress(_("Upgrading UTXO database").translated, 0, true);
size_t batch_size = 1 << 24;
- CDBBatch batch(db);
+ CDBBatch batch(*m_db);
int reportDone = 0;
std::pair<unsigned char, uint256> key;
std::pair<unsigned char, uint256> prev_key = {DB_COINS, uint256()};
@@ -380,9 +391,9 @@ bool CCoinsViewDB::Upgrade() {
}
batch.Erase(key);
if (batch.SizeEstimate() > batch_size) {
- db.WriteBatch(batch);
+ m_db->WriteBatch(batch);
batch.Clear();
- db.CompactRange(prev_key, key);
+ m_db->CompactRange(prev_key, key);
prev_key = key;
}
pcursor->Next();
@@ -390,8 +401,8 @@ bool CCoinsViewDB::Upgrade() {
break;
}
}
- db.WriteBatch(batch);
- db.CompactRange({DB_COINS, uint256()}, key);
+ m_db->WriteBatch(batch);
+ m_db->CompactRange({DB_COINS, uint256()}, key);
uiInterface.ShowProgress("", 100, false);
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return !ShutdownRequested();
diff --git a/src/txdb.h b/src/txdb.h
index 488c24f935..0cf7e2f1b8 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -39,11 +39,16 @@ static const int64_t max_filter_index_cache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
+// Actually declared in validation.cpp; can't include because of circular dependency.
+extern RecursiveMutex cs_main;
+
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
protected:
- CDBWrapper db;
+ std::unique_ptr<CDBWrapper> m_db;
+ fs::path m_ldb_path;
+ bool m_is_memory;
public:
/**
* @param[in] ldb_path Location in the filesystem where leveldb data will be stored.
@@ -60,6 +65,9 @@ public:
//! Attempt to update from an older database format. Returns whether an error occurred.
bool Upgrade();
size_t EstimateSize() const override;
+
+ //! Dynamically alter the underlying leveldb cache size.
+ void ResizeCache(size_t new_cache_size) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
};
/** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 1d9f6a4a46..de1a3ec68f 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -811,15 +811,17 @@ CTransactionRef CTxMemPool::get(const uint256& hash) const
return i->GetSharedTx();
}
-TxMempoolInfo CTxMemPool::info(const uint256& hash, bool wtxid) const
+TxMempoolInfo CTxMemPool::info(const GenTxid& gtxid) const
{
LOCK(cs);
- indexed_transaction_set::const_iterator i = (wtxid ? get_iter_from_wtxid(hash) : mapTx.find(hash));
+ indexed_transaction_set::const_iterator i = (gtxid.IsWtxid() ? get_iter_from_wtxid(gtxid.GetHash()) : mapTx.find(gtxid.GetHash()));
if (i == mapTx.end())
return TxMempoolInfo();
return GetInfo(i);
}
+TxMempoolInfo CTxMemPool::info(const uint256& txid) const { return info(GenTxid{false, txid}); }
+
void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeDelta)
{
{
diff --git a/src/txmempool.h b/src/txmempool.h
index d4e9845942..4743e1b63a 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -716,14 +716,15 @@ public:
return totalTxSize;
}
- bool exists(const uint256& hash, bool wtxid=false) const
+ bool exists(const GenTxid& gtxid) const
{
LOCK(cs);
- if (wtxid) {
- return (mapTx.get<index_by_wtxid>().count(hash) != 0);
+ if (gtxid.IsWtxid()) {
+ return (mapTx.get<index_by_wtxid>().count(gtxid.GetHash()) != 0);
}
- return (mapTx.count(hash) != 0);
+ return (mapTx.count(gtxid.GetHash()) != 0);
}
+ bool exists(const uint256& txid) const { return exists(GenTxid{false, txid}); }
CTransactionRef get(const uint256& hash) const;
txiter get_iter_from_wtxid(const uint256& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
@@ -731,7 +732,8 @@ public:
AssertLockHeld(cs);
return mapTx.project<0>(mapTx.get<index_by_wtxid>().find(wtxid));
}
- TxMempoolInfo info(const uint256& hash, bool wtxid=false) const;
+ TxMempoolInfo info(const uint256& hash) const;
+ TxMempoolInfo info(const GenTxid& gtxid) const;
std::vector<TxMempoolInfo> infoAll() const;
size_t DynamicMemoryUsage() const;
diff --git a/src/uint256.cpp b/src/uint256.cpp
index a943e71062..ee1b34eadd 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -12,20 +12,24 @@
template <unsigned int BITS>
base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch)
{
- assert(vch.size() == sizeof(data));
- memcpy(data, vch.data(), sizeof(data));
+ assert(vch.size() == sizeof(m_data));
+ memcpy(m_data, vch.data(), sizeof(m_data));
}
template <unsigned int BITS>
std::string base_blob<BITS>::GetHex() const
{
- return HexStr(std::reverse_iterator<const uint8_t*>(data + sizeof(data)), std::reverse_iterator<const uint8_t*>(data));
+ uint8_t m_data_rev[WIDTH];
+ for (int i = 0; i < WIDTH; ++i) {
+ m_data_rev[i] = m_data[WIDTH - 1 - i];
+ }
+ return HexStr(m_data_rev);
}
template <unsigned int BITS>
void base_blob<BITS>::SetHex(const char* psz)
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
// skip leading spaces
while (IsSpace(*psz))
@@ -39,7 +43,7 @@ void base_blob<BITS>::SetHex(const char* psz)
size_t digits = 0;
while (::HexDigit(psz[digits]) != -1)
digits++;
- unsigned char* p1 = (unsigned char*)data;
+ unsigned char* p1 = (unsigned char*)m_data;
unsigned char* pend = p1 + WIDTH;
while (digits > 0 && p1 < pend) {
*p1 = ::HexDigit(psz[--digits]);
diff --git a/src/uint256.h b/src/uint256.h
index b36598f572..8ab747ef49 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -18,11 +18,11 @@ class base_blob
{
protected:
static constexpr int WIDTH = BITS / 8;
- uint8_t data[WIDTH];
+ uint8_t m_data[WIDTH];
public:
base_blob()
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
}
explicit base_blob(const std::vector<unsigned char>& vch);
@@ -30,17 +30,17 @@ public:
bool IsNull() const
{
for (int i = 0; i < WIDTH; i++)
- if (data[i] != 0)
+ if (m_data[i] != 0)
return false;
return true;
}
void SetNull()
{
- memset(data, 0, sizeof(data));
+ memset(m_data, 0, sizeof(m_data));
}
- inline int Compare(const base_blob& other) const { return memcmp(data, other.data, sizeof(data)); }
+ inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); }
friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
@@ -51,34 +51,37 @@ public:
void SetHex(const std::string& str);
std::string ToString() const;
+ const unsigned char* data() const { return m_data; }
+ unsigned char* data() { return m_data; }
+
unsigned char* begin()
{
- return &data[0];
+ return &m_data[0];
}
unsigned char* end()
{
- return &data[WIDTH];
+ return &m_data[WIDTH];
}
const unsigned char* begin() const
{
- return &data[0];
+ return &m_data[0];
}
const unsigned char* end() const
{
- return &data[WIDTH];
+ return &m_data[WIDTH];
}
unsigned int size() const
{
- return sizeof(data);
+ return sizeof(m_data);
}
uint64_t GetUint64(int pos) const
{
- const uint8_t* ptr = data + pos * 8;
+ const uint8_t* ptr = m_data + pos * 8;
return ((uint64_t)ptr[0]) | \
((uint64_t)ptr[1]) << 8 | \
((uint64_t)ptr[2]) << 16 | \
@@ -92,13 +95,13 @@ public:
template<typename Stream>
void Serialize(Stream& s) const
{
- s.write((char*)data, sizeof(data));
+ s.write((char*)m_data, sizeof(m_data));
}
template<typename Stream>
void Unserialize(Stream& s)
{
- s.read((char*)data, sizeof(data));
+ s.read((char*)m_data, sizeof(m_data));
}
};
diff --git a/src/util/message.cpp b/src/util/message.cpp
index 1e7128d225..e1d5cff48c 100644
--- a/src/util/message.cpp
+++ b/src/util/message.cpp
@@ -64,7 +64,7 @@ bool MessageSign(
return false;
}
- signature = EncodeBase64(signature_bytes.data(), signature_bytes.size());
+ signature = EncodeBase64(signature_bytes);
return true;
}
diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp
index 3a903b6897..079a4529a3 100644
--- a/src/util/strencodings.cpp
+++ b/src/util/strencodings.cpp
@@ -126,20 +126,20 @@ void SplitHostPort(std::string in, int &portOut, std::string &hostOut) {
hostOut = in;
}
-std::string EncodeBase64(const unsigned char* pch, size_t len)
+std::string EncodeBase64(Span<const unsigned char> input)
{
static const char *pbase64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
std::string str;
- str.reserve(((len + 2) / 3) * 4);
- ConvertBits<8, 6, true>([&](int v) { str += pbase64[v]; }, pch, pch + len);
+ str.reserve(((input.size() + 2) / 3) * 4);
+ ConvertBits<8, 6, true>([&](int v) { str += pbase64[v]; }, input.begin(), input.end());
while (str.size() % 4) str += '=';
return str;
}
std::string EncodeBase64(const std::string& str)
{
- return EncodeBase64((const unsigned char*)str.data(), str.size());
+ return EncodeBase64(MakeUCharSpan(str));
}
std::vector<unsigned char> DecodeBase64(const char* p, bool* pf_invalid)
@@ -201,20 +201,20 @@ std::string DecodeBase64(const std::string& str, bool* pf_invalid)
return std::string((const char*)vchRet.data(), vchRet.size());
}
-std::string EncodeBase32(const unsigned char* pch, size_t len)
+std::string EncodeBase32(Span<const unsigned char> input)
{
static const char *pbase32 = "abcdefghijklmnopqrstuvwxyz234567";
std::string str;
- str.reserve(((len + 4) / 5) * 8);
- ConvertBits<8, 5, true>([&](int v) { str += pbase32[v]; }, pch, pch + len);
+ str.reserve(((input.size() + 4) / 5) * 8);
+ ConvertBits<8, 5, true>([&](int v) { str += pbase32[v]; }, input.begin(), input.end());
while (str.size() % 8) str += '=';
return str;
}
std::string EncodeBase32(const std::string& str)
{
- return EncodeBase32((const unsigned char*)str.data(), str.size());
+ return EncodeBase32(MakeUCharSpan(str));
}
std::vector<unsigned char> DecodeBase32(const char* p, bool* pf_invalid)
@@ -318,6 +318,18 @@ bool ParseInt64(const std::string& str, int64_t *out)
n <= std::numeric_limits<int64_t>::max();
}
+bool ParseUInt8(const std::string& str, uint8_t *out)
+{
+ uint32_t u32;
+ if (!ParseUInt32(str, &u32) || u32 > std::numeric_limits<uint8_t>::max()) {
+ return false;
+ }
+ if (out != nullptr) {
+ *out = static_cast<uint8_t>(u32);
+ }
+ return true;
+}
+
bool ParseUInt32(const std::string& str, uint32_t *out)
{
if (!ParsePrechecks(str))
@@ -407,15 +419,6 @@ std::string FormatParagraph(const std::string& in, size_t width, size_t indent)
return out.str();
}
-int64_t atoi64(const char* psz)
-{
-#ifdef _MSC_VER
- return _atoi64(psz);
-#else
- return strtoll(psz, nullptr, 10);
-#endif
-}
-
int64_t atoi64(const std::string& str)
{
#ifdef _MSC_VER
@@ -569,3 +572,16 @@ std::string Capitalize(std::string str)
str[0] = ToUpper(str.front());
return str;
}
+
+std::string HexStr(const Span<const uint8_t> s)
+{
+ std::string rv;
+ static constexpr char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+ rv.reserve(s.size() * 2);
+ for (uint8_t v: s) {
+ rv.push_back(hexmap[v >> 4]);
+ rv.push_back(hexmap[v & 15]);
+ }
+ return rv;
+}
diff --git a/src/util/strencodings.h b/src/util/strencodings.h
index bd988f1410..1519214140 100644
--- a/src/util/strencodings.h
+++ b/src/util/strencodings.h
@@ -10,6 +10,7 @@
#define BITCOIN_UTIL_STRENCODINGS_H
#include <attributes.h>
+#include <span.h>
#include <cstdint>
#include <iterator>
@@ -47,15 +48,14 @@ bool IsHex(const std::string& str);
bool IsHexNumber(const std::string& str);
std::vector<unsigned char> DecodeBase64(const char* p, bool* pf_invalid = nullptr);
std::string DecodeBase64(const std::string& str, bool* pf_invalid = nullptr);
-std::string EncodeBase64(const unsigned char* pch, size_t len);
+std::string EncodeBase64(Span<const unsigned char> input);
std::string EncodeBase64(const std::string& str);
std::vector<unsigned char> DecodeBase32(const char* p, bool* pf_invalid = nullptr);
std::string DecodeBase32(const std::string& str, bool* pf_invalid = nullptr);
-std::string EncodeBase32(const unsigned char* pch, size_t len);
+std::string EncodeBase32(Span<const unsigned char> input);
std::string EncodeBase32(const std::string& str);
void SplitHostPort(std::string in, int& portOut, std::string& hostOut);
-int64_t atoi64(const char* psz);
int64_t atoi64(const std::string& str);
int atoi(const std::string& str);
@@ -99,6 +99,13 @@ NODISCARD bool ParseInt32(const std::string& str, int32_t *out);
NODISCARD bool ParseInt64(const std::string& str, int64_t *out);
/**
+ * Convert decimal string to unsigned 8-bit integer with strict parse error feedback.
+ * @returns true if the entire string could be parsed as valid integer,
+ * false if not the entire string could be parsed or when overflow or underflow occurred.
+ */
+NODISCARD bool ParseUInt8(const std::string& str, uint8_t *out);
+
+/**
* Convert decimal string to unsigned 32-bit integer with strict parse error feedback.
* @returns true if the entire string could be parsed as valid integer,
* false if not the entire string could be parsed or when overflow or underflow occurred.
@@ -119,27 +126,11 @@ NODISCARD bool ParseUInt64(const std::string& str, uint64_t *out);
*/
NODISCARD bool ParseDouble(const std::string& str, double *out);
-template<typename T>
-std::string HexStr(const T itbegin, const T itend)
-{
- std::string rv;
- static const char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
- '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
- rv.reserve(std::distance(itbegin, itend) * 2);
- for(T it = itbegin; it < itend; ++it)
- {
- unsigned char val = (unsigned char)(*it);
- rv.push_back(hexmap[val>>4]);
- rv.push_back(hexmap[val&15]);
- }
- return rv;
-}
-
-template<typename T>
-inline std::string HexStr(const T& vch)
-{
- return HexStr(vch.begin(), vch.end());
-}
+/**
+ * Convert a span of bytes to a lower-case hexadecimal string.
+ */
+std::string HexStr(const Span<const uint8_t> s);
+inline std::string HexStr(const Span<const char> s) { return HexStr(MakeUCharSpan(s)); }
/**
* Format a paragraph of text to a fixed width, adding spaces for
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8164e884b1..00aa53df70 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -6,6 +6,10 @@
#include <sync.h>
#include <util/system.h>
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
#include <chainparamsbase.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -44,12 +48,6 @@
#pragma warning(disable:4717)
#endif
-#ifdef _WIN32_IE
-#undef _WIN32_IE
-#endif
-#define _WIN32_IE 0x0501
-
-#define WIN32_LEAN_AND_MEAN 1
#ifndef NOMINMAX
#define NOMINMAX
#endif
@@ -1021,7 +1019,7 @@ bool FileCommit(FILE *file)
return false;
}
#else
- #if defined(__linux__) || defined(__NetBSD__)
+ #if defined(HAVE_FDATASYNC)
if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync
LogPrintf("%s: fdatasync failed: %d\n", __func__, errno);
return false;
@@ -1161,6 +1159,43 @@ void runCommand(const std::string& strCommand)
}
#endif
+#ifdef HAVE_BOOST_PROCESS
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in)
+{
+ namespace bp = boost::process;
+
+ UniValue result_json;
+ bp::opstream stdin_stream;
+ bp::ipstream stdout_stream;
+ bp::ipstream stderr_stream;
+
+ if (str_command.empty()) return UniValue::VNULL;
+
+ bp::child c(
+ str_command,
+ bp::std_out > stdout_stream,
+ bp::std_err > stderr_stream,
+ bp::std_in < stdin_stream
+ );
+ if (!str_std_in.empty()) {
+ stdin_stream << str_std_in << std::endl;
+ }
+ stdin_stream.pipe().close();
+
+ std::string result;
+ std::string error;
+ std::getline(stdout_stream, result);
+ std::getline(stderr_stream, error);
+
+ c.wait();
+ const int n_error = c.exit_code();
+ if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error));
+ if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result);
+
+ return result_json;
+}
+#endif // HAVE_BOOST_PROCESS
+
void SetupEnvironment()
{
#ifdef HAVE_MALLOPT_ARENA_MAX
diff --git a/src/util/system.h b/src/util/system.h
index 0bd14cc9ea..1df194ca84 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -37,6 +37,8 @@
#include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted
+class UniValue;
+
// Application startup time (used for uptime calculation)
int64_t GetStartupTime();
@@ -96,6 +98,16 @@ std::string ShellEscape(const std::string& arg);
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
+#ifdef HAVE_BOOST_PROCESS
+/**
+ * Execute a command which returns JSON, and parse the result.
+ *
+ * @param str_command The command to execute, including any arguments
+ * @param str_std_in string to pass to stdin
+ * @return parsed JSON
+ */
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in="");
+#endif // HAVE_BOOST_PROCESS
/**
* Most paths passed as configuration arguments are treated as relative to
diff --git a/src/validation.cpp b/src/validation.cpp
index 5aa3d315d5..cf2f9dde62 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -139,7 +139,6 @@ bool fPruneMode = false;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
-size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
@@ -689,8 +688,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
// Check for non-standard pay-to-script-hash in inputs
- if (fRequireStandard && !AreInputsStandard(tx, m_view))
- return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
+ return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ }
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
@@ -1089,45 +1089,33 @@ bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTrans
return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
}
-/**
- * Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock.
- * If blockIndex is provided, the transaction is fetched from the corresponding block.
- */
-bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, const CBlockIndex* const block_index)
+CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
{
LOCK(cs_main);
- if (!block_index) {
- CTransactionRef ptx = mempool.get(hash);
- if (ptx) {
- txOut = ptx;
- return true;
- }
-
- if (g_txindex) {
- return g_txindex->FindTx(hash, hashBlock, txOut);
- }
- } else {
+ if (block_index) {
CBlock block;
if (ReadBlockFromDisk(block, block_index, consensusParams)) {
for (const auto& tx : block.vtx) {
if (tx->GetHash() == hash) {
- txOut = tx;
hashBlock = block_index->GetBlockHash();
- return true;
+ return tx;
}
}
}
+ return nullptr;
}
-
- return false;
+ if (mempool) {
+ CTransactionRef ptx = mempool->get(hash);
+ if (ptx) return ptx;
+ }
+ if (g_txindex) {
+ CTransactionRef tx;
+ if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
+ }
+ return nullptr;
}
-
-
-
-
-
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
@@ -1211,8 +1199,8 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
- HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE),
- HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE));
+ HexStr(blk_start),
+ HexStr(message_start));
}
if (blk_size > MAX_SIZE) {
@@ -1284,9 +1272,10 @@ void CChainState::InitCoinsDB(
leveldb_name, cache_size_bytes, in_memory, should_wipe);
}
-void CChainState::InitCoinsCache()
+void CChainState::InitCoinsCache(size_t cache_size_bytes)
{
assert(m_coins_views != nullptr);
+ m_coinstip_cache_size_bytes = cache_size_bytes;
m_coins_views->InitCache();
}
@@ -2239,20 +2228,20 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
return true;
}
-CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
{
return this->GetCoinsCacheSizeState(
tx_pool,
- nCoinCacheUsage,
+ m_coinstip_cache_size_bytes,
gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
}
CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
- const CTxMemPool& tx_pool,
+ const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes)
{
- int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
+ const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
int64_t nTotalSpace =
max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
@@ -2291,7 +2280,7 @@ bool CChainState::FlushStateToDisk(
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
- CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(::mempool);
+ CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&::mempool);
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
if (nManualPruneHeight > 0) {
@@ -3446,7 +3435,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
if (commitpos == -1) {
uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
- CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin());
+ CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
CTxOut out;
out.nValue = 0;
out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
@@ -3591,7 +3580,7 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
}
- CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
+ CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
}
@@ -4318,7 +4307,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
- if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= nCoinCacheUsage) {
+ if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= ::ChainstateActive().m_coinstip_cache_size_bytes) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
@@ -4599,13 +4588,13 @@ void CChainState::UnloadBlockIndex() {
// May NOT be used after any connections are up as much
// of the peer-processing logic assumes a consistent
// block index state
-void UnloadBlockIndex()
+void UnloadBlockIndex(CTxMemPool* mempool)
{
LOCK(cs_main);
g_chainman.Unload();
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
- mempool.clear();
+ if (mempool) mempool->clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
setDirtyBlockIndex.clear();
@@ -4981,6 +4970,39 @@ std::string CChainState::ToString()
tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
}
+bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
+{
+ if (coinstip_size == m_coinstip_cache_size_bytes &&
+ coinsdb_size == m_coinsdb_cache_size_bytes) {
+ // Cache sizes are unchanged, no need to continue.
+ return true;
+ }
+ size_t old_coinstip_size = m_coinstip_cache_size_bytes;
+ m_coinstip_cache_size_bytes = coinstip_size;
+ m_coinsdb_cache_size_bytes = coinsdb_size;
+ CoinsDB().ResizeCache(coinsdb_size);
+
+ LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
+ this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
+ LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
+ this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
+
+ BlockValidationState state;
+ const CChainParams& chainparams = Params();
+
+ bool ret;
+
+ if (coinstip_size > old_coinstip_size) {
+ // Likely no need to flush if cache sizes have grown.
+ ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
+ } else {
+ // Otherwise, flush state to disk and deallocate the in-memory coins map.
+ ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
+ CoinsTip().ReallocateCache();
+ }
+ return ret;
+}
+
std::string CBlockFileInfo::ToString() const
{
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
@@ -5289,3 +5311,33 @@ void ChainstateManager::Reset()
m_active_chainstate = nullptr;
m_snapshot_validated = false;
}
+
+void ChainstateManager::MaybeRebalanceCaches()
+{
+ if (m_ibd_chainstate && !m_snapshot_chainstate) {
+ LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
+ // Allocate everything to the IBD chainstate.
+ m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
+ }
+ else if (m_snapshot_chainstate && !m_ibd_chainstate) {
+ LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
+ // Allocate everything to the snapshot chainstate.
+ m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
+ }
+ else if (m_ibd_chainstate && m_snapshot_chainstate) {
+ // If both chainstates exist, determine who needs more cache based on IBD status.
+ //
+ // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
+ if (m_snapshot_chainstate->IsInitialBlockDownload()) {
+ m_ibd_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
+ m_snapshot_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
+ } else {
+ m_snapshot_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
+ m_ibd_chainstate->ResizeCoinsCaches(
+ m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
+ }
+ }
+}
diff --git a/src/validation.h b/src/validation.h
index acadf151c5..534162d64a 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -127,7 +127,6 @@ extern bool g_parallel_script_checks;
extern bool fRequireStandard;
extern bool fCheckBlockIndex;
extern bool fCheckpointsEnabled;
-extern size_t nCoinCacheUsage;
/** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */
extern CFeeRate minRelayTxFee;
/** If the tip is older than this (in seconds), the node is considered to be in initial block download. */
@@ -161,11 +160,22 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi
/** Ensures we have a genesis block in the block tree, possibly writing one to disk. */
bool LoadGenesisBlock(const CChainParams& chainparams);
/** Unload database information */
-void UnloadBlockIndex();
+void UnloadBlockIndex(CTxMemPool* mempool);
/** Run an instance of the script checking thread */
void ThreadScriptCheck(int worker_num);
-/** Retrieve a transaction (from memory pool, or from disk, if possible) */
-bool GetTransaction(const uint256& hash, CTransactionRef& tx, const Consensus::Params& params, uint256& hashBlock, const CBlockIndex* const blockIndex = nullptr);
+/**
+ * Return transaction from the block at block_index.
+ * If block_index is not provided, fall back to mempool.
+ * If mempool is not provided or the tx couldn't be found in mempool, fall back to g_txindex.
+ *
+ * @param[in] block_index The block to read from disk, or nullptr
+ * @param[in] mempool If block_index is not provided, look in the mempool, if provided
+ * @param[in] hash The txid
+ * @param[in] consensusParams The params
+ * @param[out] hashBlock The hash of block_index, if the tx was found via block_index
+ * @returns The tx if found, otherwise nullptr
+ */
+CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock);
/**
* Find the best known block, and make it the tip of the block chain
*
@@ -521,7 +531,7 @@ public:
//! Initialize the in-memory coins cache (to be done after the health of the on-disk database
//! is verified).
- void InitCoinsCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ void InitCoinsCache(size_t cache_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
//! @returns whether or not the CoinsViews object has been fully initialized and we can
//! safely flush this object to disk.
@@ -570,6 +580,17 @@ public:
//! Destructs all objects related to accessing the UTXO set.
void ResetCoinsViews() { m_coins_views.reset(); }
+ //! The cache size of the on-disk coins view.
+ size_t m_coinsdb_cache_size_bytes{0};
+
+ //! The cache size of the in-memory coins view.
+ size_t m_coinstip_cache_size_bytes{0};
+
+ //! Resize the CoinsViews caches dynamically and flush state to disk.
+ //! @returns true unless an error occurred during the flush.
+ bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
@@ -653,11 +674,11 @@ public:
//! Dictates whether we need to flush the cache to disk or not.
//!
//! @return the state of the size of the coins cache.
- CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+ CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
CoinsCacheSizeState GetCoinsCacheSizeState(
- const CTxMemPool& tx_pool,
+ const CTxMemPool* tx_pool,
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
@@ -786,6 +807,14 @@ public:
//! chainstate to avoid duplicating block metadata.
BlockManager m_blockman GUARDED_BY(::cs_main);
+ //! The total number of bytes available for us to use across all in-memory
+ //! coins caches. This will be split somehow across chainstates.
+ int64_t m_total_coinstip_cache{0};
+ //
+ //! The total number of bytes available for us to use across all leveldb
+ //! coins databases. This will be split somehow across chainstates.
+ int64_t m_total_coinsdb_cache{0};
+
//! Instantiate a new chainstate and assign it based upon whether it is
//! from a snapshot.
//!
@@ -874,6 +903,10 @@ public:
//! Clear (deconstruct) chainstate data.
void Reset();
+
+ //! Check to see if caches are out of balance and if so, call
+ //! ResizeCoinsCaches() as needed.
+ void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
/** DEPRECATED! Please use node.chainman instead. May only be used in validation.cpp internally */
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 1953be2d54..24eb2ee34c 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -32,13 +32,13 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena
int ret = db.get_mpf()->get_fileid(fileid.value);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (get_fileid failed with %d)", filename, ret));
}
for (const auto& item : env.m_fileids) {
if (fileid == item.second && &fileid != &item.second) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Can't open database %s (duplicates fileid %s from %s)", filename,
- HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename,
+ HexStr(item.second.value), item.first));
}
}
}
@@ -97,9 +97,8 @@ void BerkeleyEnvironment::Close()
fDbEnvInit = false;
for (auto& db : m_databases) {
- auto count = mapFileUseCount.find(db.first);
- assert(count == mapFileUseCount.end() || count->second == 0);
BerkeleyDatabase& database = db.second.get();
+ assert(database.m_refcount <= 0);
if (database.m_db) {
database.m_db->close(0);
database.m_db.reset();
@@ -232,16 +231,6 @@ BerkeleyEnvironment::BerkeleyEnvironment()
fMockDb = true;
}
-bool BerkeleyEnvironment::Verify(const std::string& strFile)
-{
- LOCK(cs_db);
- assert(mapFileUseCount.count(strFile) == 0);
-
- Db db(dbenv.get(), 0);
- int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
- return result == 0;
-}
-
BerkeleyBatch::SafeDbt::SafeDbt()
{
m_dbt.set_flags(DB_DBT_MALLOC);
@@ -295,7 +284,11 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
if (fs::exists(file_path))
{
- if (!env->Verify(strFile)) {
+ assert(m_refcount == 0);
+
+ Db db(env->dbenv.get(), 0);
+ int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
+ if (result != 0) {
errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), file_path);
return false;
}
@@ -316,6 +309,8 @@ BerkeleyDatabase::~BerkeleyDatabase()
{
if (env) {
LOCK(cs_db);
+ env->CloseDb(strFile);
+ assert(!m_db);
size_t erased = env->m_databases.erase(strFile);
assert(erased == 1);
env->m_fileids.erase(strFile);
@@ -324,14 +319,24 @@ BerkeleyDatabase::~BerkeleyDatabase()
BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database)
{
+ database.AddRef();
+ database.Open(pszMode);
fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w'));
fFlushOnClose = fFlushOnCloseIn;
env = database.env.get();
- if (database.IsDummy()) {
- return;
+ pdb = database.m_db.get();
+ strFile = database.strFile;
+ bool fCreate = strchr(pszMode, 'c') != nullptr;
+ if (fCreate && !Exists(std::string("version"))) {
+ bool fTmp = fReadOnly;
+ fReadOnly = false;
+ Write(std::string("version"), CLIENT_VERSION);
+ fReadOnly = fTmp;
}
- const std::string &strFilename = database.strFile;
+}
+void BerkeleyDatabase::Open(const char* pszMode)
+{
bool fCreate = strchr(pszMode, 'c') != nullptr;
unsigned int nFlags = DB_THREAD;
if (fCreate)
@@ -341,10 +346,9 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo
LOCK(cs_db);
bilingual_str open_err;
if (!env->Open(open_err))
- throw std::runtime_error("BerkeleyBatch: Failed to open database environment.");
+ throw std::runtime_error("BerkeleyDatabase: Failed to open database environment.");
- pdb = database.m_db.get();
- if (pdb == nullptr) {
+ if (m_db == nullptr) {
int ret;
std::unique_ptr<Db> pdb_temp = MakeUnique<Db>(env->dbenv.get(), 0);
@@ -353,60 +357,33 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo
DbMpoolFile* mpf = pdb_temp->get_mpf();
ret = mpf->set_flags(DB_MPOOL_NOFILE, 1);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Failed to configure for no temp file backing for database %s", strFilename));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Failed to configure for no temp file backing for database %s", strFile));
}
}
ret = pdb_temp->open(nullptr, // Txn pointer
- fMockDb ? nullptr : strFilename.c_str(), // Filename
- fMockDb ? strFilename.c_str() : "main", // Logical db name
+ fMockDb ? nullptr : strFile.c_str(), // Filename
+ fMockDb ? strFile.c_str() : "main", // Logical db name
DB_BTREE, // Database type
nFlags, // Flags
0);
if (ret != 0) {
- throw std::runtime_error(strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename));
+ throw std::runtime_error(strprintf("BerkeleyDatabase: Error %d, can't open database %s", ret, strFile));
}
+ m_file_path = (env->Directory() / strFile).string();
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
- //
- // Also call CheckUniqueFileid on all the other g_dbenvs to prevent
- // bitcoin from opening the same data file through another
- // environment when the file is referenced through equivalent but
- // not obviously identical symlinked or hard linked or bind mounted
- // paths. In the future a more relaxed check for equal inode and
- // device ids could be done instead, which would allow opening
- // different backup copies of a wallet at the same time. Maybe even
- // more ideally, an exclusive lock for accessing the database could
- // be implemented, so no equality checks are needed at all. (Newer
- // versions of BDB have an set_lk_exclusive method for this
- // purpose, but the older version we use does not.)
- for (const auto& env : g_dbenvs) {
- CheckUniqueFileid(*env.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]);
- }
+ CheckUniqueFileid(*env, strFile, *pdb_temp, this->env->m_fileids[strFile]);
- pdb = pdb_temp.release();
- database.m_db.reset(pdb);
+ m_db.reset(pdb_temp.release());
- if (fCreate && !Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
}
- database.AddRef();
- strFile = strFilename;
}
}
-void BerkeleyDatabase::Open(const char* mode)
-{
- throw std::logic_error("BerkeleyDatabase does not implement Open. This function should not be called.");
-}
-
void BerkeleyBatch::Flush()
{
if (activeTxn)
@@ -427,6 +404,12 @@ void BerkeleyDatabase::IncrementUpdateCounter()
++nUpdateCounter;
}
+BerkeleyBatch::~BerkeleyBatch()
+{
+ Close();
+ m_database.RemoveRef();
+}
+
void BerkeleyBatch::Close()
{
if (!pdb)
@@ -439,8 +422,6 @@ void BerkeleyBatch::Close()
if (fFlushOnClose)
Flush();
-
- m_database.RemoveRef();
}
void BerkeleyEnvironment::CloseDb(const std::string& strFile)
@@ -464,8 +445,8 @@ void BerkeleyEnvironment::ReloadDbEnv()
AssertLockNotHeld(cs_db);
std::unique_lock<RecursiveMutex> lock(cs_db);
m_db_in_use.wait(lock, [this](){
- for (auto& count : mapFileUseCount) {
- if (count.second > 0) return false;
+ for (auto& db : m_databases) {
+ if (db.second.get().m_refcount > 0) return false;
}
return true;
});
@@ -487,17 +468,14 @@ void BerkeleyEnvironment::ReloadDbEnv()
bool BerkeleyDatabase::Rewrite(const char* pszSkip)
{
- if (IsDummy()) {
- return true;
- }
while (true) {
{
LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) {
+ if (m_refcount <= 0) {
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
+ m_refcount = -1;
bool fSuccess = true;
LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile);
@@ -581,10 +559,11 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
return;
{
LOCK(cs_db);
- std::map<std::string, int>::iterator mi = mapFileUseCount.begin();
- while (mi != mapFileUseCount.end()) {
- std::string strFile = (*mi).first;
- int nRefCount = (*mi).second;
+ bool no_dbs_accessed = true;
+ for (auto& db_it : m_databases) {
+ std::string strFile = db_it.first;
+ int nRefCount = db_it.second.get().m_refcount;
+ if (nRefCount < 0) continue;
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
if (nRefCount == 0) {
// Move log data to the dat file
@@ -595,14 +574,15 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
if (!fMockDb)
dbenv->lsn_reset(strFile.c_str(), 0);
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile);
- mapFileUseCount.erase(mi++);
- } else
- mi++;
+ nRefCount = -1;
+ } else {
+ no_dbs_accessed = false;
+ }
}
LogPrint(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
if (fShutdown) {
char** listp;
- if (mapFileUseCount.empty()) {
+ if (no_dbs_accessed) {
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
@@ -615,21 +595,17 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
bool BerkeleyDatabase::PeriodicFlush()
{
- // There's nothing to do for dummy databases. Return true.
- if (IsDummy()) return true;
-
// Don't flush if we can't acquire the lock.
TRY_LOCK(cs_db, lockDb);
if (!lockDb) return false;
// Don't flush if any databases are in use
- for (const auto& use_count : env->mapFileUseCount) {
- if (use_count.second > 0) return false;
+ for (auto& it : env->m_databases) {
+ if (it.second.get().m_refcount > 0) return false;
}
// Don't flush if there haven't been any batch writes for this database.
- auto it = env->mapFileUseCount.find(strFile);
- if (it == env->mapFileUseCount.end()) return false;
+ if (m_refcount < 0) return false;
LogPrint(BCLog::WALLETDB, "Flushing %s\n", strFile);
int64_t nStart = GetTimeMillis();
@@ -637,7 +613,7 @@ bool BerkeleyDatabase::PeriodicFlush()
// Flush wallet file so it's self contained
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(it);
+ m_refcount = -1;
LogPrint(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
@@ -646,19 +622,15 @@ bool BerkeleyDatabase::PeriodicFlush()
bool BerkeleyDatabase::Backup(const std::string& strDest) const
{
- if (IsDummy()) {
- return false;
- }
while (true)
{
{
LOCK(cs_db);
- if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0)
+ if (m_refcount <= 0)
{
// Flush log data to the dat file
env->CloseDb(strFile);
env->CheckpointLSN(strFile);
- env->mapFileUseCount.erase(strFile);
// Copy wallet file
fs::path pathSrc = env->Directory() / strFile;
@@ -687,23 +659,17 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const
void BerkeleyDatabase::Flush()
{
- if (!IsDummy()) {
- env->Flush(false);
- }
+ env->Flush(false);
}
void BerkeleyDatabase::Close()
{
- if (!IsDummy()) {
- env->Flush(true);
- }
+ env->Flush(true);
}
void BerkeleyDatabase::ReloadDbEnv()
{
- if (!IsDummy()) {
- env->ReloadDbEnv();
- }
+ env->ReloadDbEnv();
}
bool BerkeleyBatch::StartCursor()
@@ -801,7 +767,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
bool BerkeleyBatch::WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite)
{
if (!pdb)
- return true;
+ return false;
if (fReadOnly)
assert(!"Write called on database in read-only mode");
@@ -840,16 +806,18 @@ bool BerkeleyBatch::HasKey(CDataStream&& key)
void BerkeleyDatabase::AddRef()
{
LOCK(cs_db);
- ++env->mapFileUseCount[strFile];
+ if (m_refcount < 0) {
+ m_refcount = 1;
+ } else {
+ m_refcount++;
+ }
}
void BerkeleyDatabase::RemoveRef()
{
- {
- LOCK(cs_db);
- --env->mapFileUseCount[strFile];
- }
- env->m_db_in_use.notify_all();
+ LOCK(cs_db);
+ m_refcount--;
+ if (env) env->m_db_in_use.notify_all();
}
std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close)
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index ef3b81d4d6..75546924e8 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -52,7 +52,6 @@ private:
public:
std::unique_ptr<DbEnv> dbenv;
- std::map<std::string, int> mapFileUseCount;
std::map<std::string, std::reference_wrapper<BerkeleyDatabase>> m_databases;
std::unordered_map<std::string, WalletDatabaseFileId> m_fileids;
std::condition_variable_any m_db_in_use;
@@ -67,8 +66,6 @@ public:
bool IsDatabaseLoaded(const std::string& db_filename) const { return m_databases.find(db_filename) != m_databases.end(); }
fs::path Directory() const { return strPath; }
- bool Verify(const std::string& strFile);
-
bool Open(bilingual_str& error);
void Close();
void Flush(bool fShutdown);
@@ -100,12 +97,8 @@ class BerkeleyBatch;
**/
class BerkeleyDatabase : public WalletDatabase
{
- friend class BerkeleyBatch;
public:
- /** Create dummy DB handle */
- BerkeleyDatabase() : WalletDatabase(), env(nullptr)
- {
- }
+ BerkeleyDatabase() = delete;
/** Create DB handle to real database */
BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) :
@@ -166,17 +159,10 @@ public:
/** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
std::unique_ptr<Db> m_db;
- /** Make a BerkeleyBatch connected to this database */
- std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override;
-
-private:
std::string strFile;
- /** Return whether this database handle is a dummy for testing.
- * Only to be used at a low level, application should ideally not care
- * about this.
- */
- bool IsDummy() const { return env == nullptr; }
+ /** Make a BerkeleyBatch connected to this database */
+ std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override;
};
/** RAII class that provides access to a Berkeley database */
@@ -220,7 +206,7 @@ protected:
public:
explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true);
- ~BerkeleyBatch() override { Close(); }
+ ~BerkeleyBatch() override;
BerkeleyBatch(const BerkeleyBatch&) = delete;
BerkeleyBatch& operator=(const BerkeleyBatch&) = delete;
diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp
index 079a5d3d53..1a45a2b313 100644
--- a/src/wallet/coinselection.cpp
+++ b/src/wallet/coinselection.cpp
@@ -5,6 +5,7 @@
#include <wallet/coinselection.h>
#include <optional.h>
+#include <policy/feerate.h>
#include <util/system.h>
#include <util/moneystr.h>
@@ -302,7 +303,7 @@ bool KnapsackSolver(const CAmount& nTargetValue, std::vector<OutputGroup>& group
void OutputGroup::Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants) {
m_outputs.push_back(output);
m_from_me &= from_me;
- m_value += output.effective_value;
+ m_value += output.txout.nValue;
m_depth = std::min(m_depth, depth);
// ancestors here express the number of ancestors the new coin will end up having, which is
// the sum, rather than the max; this will overestimate in the cases where multiple inputs
@@ -311,15 +312,19 @@ void OutputGroup::Insert(const CInputCoin& output, int depth, bool from_me, size
// descendants is the count as seen from the top ancestor, not the descendants as seen from the
// coin itself; thus, this value is counted as the max, not the sum
m_descendants = std::max(m_descendants, descendants);
- effective_value = m_value;
+ effective_value += output.effective_value;
+ fee += output.m_fee;
+ long_term_fee += output.m_long_term_fee;
}
std::vector<CInputCoin>::iterator OutputGroup::Discard(const CInputCoin& output) {
auto it = m_outputs.begin();
while (it != m_outputs.end() && it->outpoint != output.outpoint) ++it;
if (it == m_outputs.end()) return it;
- m_value -= output.effective_value;
+ m_value -= output.txout.nValue;
effective_value -= output.effective_value;
+ fee -= output.m_fee;
+ long_term_fee -= output.m_long_term_fee;
return m_outputs.erase(it);
}
@@ -329,3 +334,35 @@ bool OutputGroup::EligibleForSpending(const CoinEligibilityFilter& eligibility_f
&& m_ancestors <= eligibility_filter.max_ancestors
&& m_descendants <= eligibility_filter.max_descendants;
}
+
+void OutputGroup::SetFees(const CFeeRate effective_feerate, const CFeeRate long_term_feerate)
+{
+ fee = 0;
+ long_term_fee = 0;
+ effective_value = 0;
+ for (CInputCoin& coin : m_outputs) {
+ coin.m_fee = coin.m_input_bytes < 0 ? 0 : effective_feerate.GetFee(coin.m_input_bytes);
+ fee += coin.m_fee;
+
+ coin.m_long_term_fee = coin.m_input_bytes < 0 ? 0 : long_term_feerate.GetFee(coin.m_input_bytes);
+ long_term_fee += coin.m_long_term_fee;
+
+ coin.effective_value = coin.txout.nValue - coin.m_fee;
+ effective_value += coin.effective_value;
+ }
+}
+
+OutputGroup OutputGroup::GetPositiveOnlyGroup()
+{
+ OutputGroup group(*this);
+ for (auto it = group.m_outputs.begin(); it != group.m_outputs.end(); ) {
+ const CInputCoin& coin = *it;
+ // Only include outputs that are positive effective value (i.e. not dust)
+ if (coin.effective_value <= 0) {
+ it = group.Discard(coin);
+ } else {
+ ++it;
+ }
+ }
+ return group;
+}
diff --git a/src/wallet/coinselection.h b/src/wallet/coinselection.h
index 5348401f45..49c1134ec6 100644
--- a/src/wallet/coinselection.h
+++ b/src/wallet/coinselection.h
@@ -9,6 +9,8 @@
#include <primitives/transaction.h>
#include <random.h>
+class CFeeRate;
+
//! target minimum change amount
static constexpr CAmount MIN_CHANGE{COIN / 100};
//! final minimum change amount after paying for fees
@@ -36,6 +38,8 @@ public:
COutPoint outpoint;
CTxOut txout;
CAmount effective_value;
+ CAmount m_fee{0};
+ CAmount m_long_term_fee{0};
/** Pre-computed estimated size of this output as a fully-signed input in a transaction. Can be -1 if it could not be calculated */
int m_input_bytes{-1};
@@ -91,6 +95,10 @@ struct OutputGroup
void Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants);
std::vector<CInputCoin>::iterator Discard(const CInputCoin& output);
bool EligibleForSpending(const CoinEligibilityFilter& eligibility_filter) const;
+
+ //! Update the OutputGroup's fee, long_term_fee, and effective_value based on the given feerates
+ void SetFees(const CFeeRate effective_feerate, const CFeeRate long_term_feerate);
+ OutputGroup GetPositiveOnlyGroup();
};
bool SelectCoinsBnB(std::vector<OutputGroup>& utxo_pool, const CAmount& target_value, const CAmount& cost_of_change, std::set<CInputCoin>& out_set, CAmount& value_ret, CAmount not_input_fees);
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 12dc1cc96b..0afaba5fd1 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -9,6 +9,7 @@
#include <clientversion.h>
#include <fs.h>
#include <streams.h>
+#include <util/memory.h>
#include <atomic>
#include <memory>
@@ -154,4 +155,44 @@ public:
virtual std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) = 0;
};
+/** RAII class that provides access to a DummyDatabase. Never fails. */
+class DummyBatch : public DatabaseBatch
+{
+private:
+ bool ReadKey(CDataStream&& key, CDataStream& value) override { return true; }
+ bool WriteKey(CDataStream&& key, CDataStream&& value, bool overwrite=true) override { return true; }
+ bool EraseKey(CDataStream&& key) override { return true; }
+ bool HasKey(CDataStream&& key) override { return true; }
+
+public:
+ void Flush() override {}
+ void Close() override {}
+
+ bool StartCursor() override { return true; }
+ bool ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool& complete) override { return true; }
+ void CloseCursor() override {}
+ bool TxnBegin() override { return true; }
+ bool TxnCommit() override { return true; }
+ bool TxnAbort() override { return true; }
+};
+
+/** A dummy WalletDatabase that does nothing and never fails. Only used by unit tests.
+ **/
+class DummyDatabase : public WalletDatabase
+{
+public:
+ void Open(const char* mode) override {};
+ void AddRef() override {}
+ void RemoveRef() override {}
+ bool Rewrite(const char* pszSkip=nullptr) override { return true; }
+ bool Backup(const std::string& strDest) const override { return true; }
+ void Close() override {}
+ void Flush() override {}
+ bool PeriodicFlush() override { return true; }
+ void IncrementUpdateCounter() override { ++nUpdateCounter; }
+ void ReloadDbEnv() override {}
+ bool Verify(bilingual_str& errorStr) override { return true; }
+ std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override { return MakeUnique<DummyBatch>(); }
+};
+
#endif // BITCOIN_WALLET_DB_H
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 781920755c..bf05ef844a 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -9,6 +9,7 @@
#include <node/context.h>
#include <node/ui_interface.h>
#include <outputtype.h>
+#include <univalue.h>
#include <util/check.h>
#include <util/moneystr.h>
#include <util/system.h>
@@ -24,7 +25,7 @@ public:
bool HasWalletSupport() const override {return true;}
//! Return the wallets help message.
- void AddWalletOptions() const override;
+ void AddWalletOptions(ArgsManager& argsman) const override;
//! Wallets parameter interaction
bool ParameterInteraction() const override;
@@ -35,42 +36,43 @@ public:
const WalletInitInterface& g_wallet_init_interface = WalletInit();
-void WalletInit::AddWalletOptions() const
+void WalletInit::AddWalletOptions(ArgsManager& argsman) const
{
- gArgs.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). "
+ argsman.AddArg("-addresstype", strprintf("What type of addresses to use (\"legacy\", \"p2sh-segwit\", or \"bech32\", default: \"%s\")", FormatOutputType(DEFAULT_ADDRESS_TYPE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-avoidpartialspends", strprintf("Group outputs by address, selecting all or none, instead of selecting on a per-output basis. Privacy is improved as an address is only used once (unless someone sends to it after spending from it), but may result in slightly higher fees as suboptimal coin selection may result due to the added limitation (default: %u (always enabled for wallets with \"avoid_reuse\" enabled))", DEFAULT_AVOIDPARTIALSPENDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-changetype", "What type of change to use (\"legacy\", \"p2sh-segwit\", or \"bech32\"). Default is same as -addresstype, except when -addresstype=p2sh-segwit a native segwit output is used when sending to a native segwit address)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-disablewallet", "Do not load the wallet and disable wallet RPC calls", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-discardfee=<amt>", strprintf("The fee rate (in %s/kB) that indicates your tolerance for discarding change by adding it to the fee (default: %s). "
"Note: An output is discarded if it is dust at this rate, but we will always discard up to the dust relay fee and a discard fee above that is limited by the fee estimate for the longest target",
CURRENCY_UNIT, FormatMoney(DEFAULT_DISCARD_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)",
+ argsman.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_FALLBACK_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)",
+ argsman.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-maxapsfee=<n>", strprintf("Spend up to this amount in additional (absolute) fees (in %s) if it allows the use of partial spend avoidance (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_MAX_AVOIDPARTIALSPEND_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
- gArgs.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)",
+ argsman.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MINFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)",
+ argsman.AddArg("-paytxfee=<amt>", strprintf("Fee (in %s/kB) to add to transactions you send (default: %s)",
CURRENCY_UNIT, FormatMoney(CFeeRate{DEFAULT_PAY_TX_FEE}.GetFeePerK())), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
- gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
+ argsman.AddArg("-rescan", "Rescan the block chain for missing wallet transactions on startup", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-spendzeroconfchange", strprintf("Spend unconfirmed change when sending transactions (default: %u)", DEFAULT_SPEND_ZEROCONF_CHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-txconfirmtarget=<n>", strprintf("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)", DEFAULT_TX_CONFIRM_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-wallet=<path>", "Specify wallet database path. Can be specified multiple times to load multiple wallets. Path is interpreted relative to <walletdir> if it is not absolute, and will be created if it does not exist (as a directory containing a wallet.dat file and log files). For backwards compatibility this will also accept names of existing data files in <walletdir>.)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
#if HAVE_SYSTEM
- gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
#endif
- gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
+ argsman.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ argsman.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
" (1 = keep tx meta data e.g. payment request information, 2 = drop tx meta data)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
- gArgs.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
+ argsman.AddArg("-walletrejectlongchains", strprintf("Wallet will not create transactions that violate mempool chain limits (default: %u)", DEFAULT_WALLET_REJECT_LONG_CHAINS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
}
bool WalletInit::ParameterInteraction() const
@@ -118,6 +120,14 @@ void WalletInit::Construct(NodeContext& node) const
LogPrintf("Wallet disabled!\n");
return;
}
- args.SoftSetArg("-wallet", "");
+ // If there's no -wallet setting with a list of wallets to load, set it to
+ // load the default "" wallet.
+ if (!args.IsArgSet("wallet")) {
+ args.LockSettings([&](util::Settings& settings) {
+ util::SettingsValue wallets(util::SettingsValue::VARR);
+ wallets.push_back(""); // Default wallet name is ""
+ settings.rw_settings["wallet"] = wallets;
+ });
+ }
node.chain_clients.emplace_back(interfaces::MakeWalletClient(*node.chain, args, args.GetArgs("-wallet")));
}
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 2a81d30133..ae14769edb 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -13,6 +13,8 @@
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
+#include <univalue.h>
+
bool VerifyWallets(interfaces::Chain& chain, const std::vector<std::string>& wallet_files)
{
if (gArgs.IsArgSet("-walletdir")) {
@@ -120,3 +122,26 @@ void UnloadWallets()
UnloadWallet(std::move(wallet));
}
}
+
+bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name)
+{
+ util::SettingsValue setting_value = chain.getRwSetting("wallet");
+ if (!setting_value.isArray()) setting_value.setArray();
+ for (const util::SettingsValue& value : setting_value.getValues()) {
+ if (value.isStr() && value.get_str() == wallet_name) return true;
+ }
+ setting_value.push_back(wallet_name);
+ return chain.updateRwSetting("wallet", setting_value);
+}
+
+bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_name)
+{
+ util::SettingsValue setting_value = chain.getRwSetting("wallet");
+ if (!setting_value.isArray()) return true;
+ util::SettingsValue new_value(util::SettingsValue::VARR);
+ for (const util::SettingsValue& value : setting_value.getValues()) {
+ if (!value.isStr() || value.get_str() != wallet_name) new_value.push_back(value);
+ }
+ if (new_value.size() == setting_value.size()) return true;
+ return chain.updateRwSetting("wallet", new_value);
+}
diff --git a/src/wallet/load.h b/src/wallet/load.h
index ff4f5b4b23..30f1a4c90d 100644
--- a/src/wallet/load.h
+++ b/src/wallet/load.h
@@ -34,4 +34,10 @@ void StopWallets();
//! Close all wallets.
void UnloadWallets();
+//! Add wallet name to persistent configuration so it will be loaded on startup.
+bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
+
+//! Remove wallet name from persistent configuration so it will not be loaded on startup.
+bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
+
#endif // BITCOIN_WALLET_LOAD_H
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 3b752ca936..e0c3a1287a 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -34,7 +34,7 @@ std::string static EncodeDumpString(const std::string &str) {
std::stringstream ret;
for (const unsigned char c : str) {
if (c <= 32 || c >= 128 || c == '%') {
- ret << '%' << HexStr(&c, &c + 1);
+ ret << '%' << HexStr(Span<const unsigned char>(&c, 1));
} else {
ret << c;
}
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 9d334063c4..17512265b5 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -30,6 +30,7 @@
#include <wallet/coincontrol.h>
#include <wallet/context.h>
#include <wallet/feebumper.h>
+#include <wallet/load.h>
#include <wallet/rpcwallet.h>
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
@@ -229,6 +230,18 @@ static void SetFeeEstimateMode(const CWallet* pwallet, CCoinControl& cc, const U
}
}
+static void UpdateWalletSetting(interfaces::Chain& chain,
+ const std::string& wallet_name,
+ const UniValue& load_on_startup,
+ std::vector<bilingual_str>& warnings)
+{
+ if (load_on_startup.isTrue() && !AddWalletSetting(chain, wallet_name)) {
+ warnings.emplace_back(Untranslated("Wallet load on startup setting could not be updated, so wallet may not be loaded next node startup."));
+ } else if (load_on_startup.isFalse() && !RemoveWalletSetting(chain, wallet_name)) {
+ warnings.emplace_back(Untranslated("Wallet load on startup setting could not be updated, so wallet may still be loaded next node startup."));
+ }
+}
+
static UniValue getnewaddress(const JSONRPCRequest& request)
{
RPCHelpMan{"getnewaddress",
@@ -1484,7 +1497,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
{RPCResult::Type::ARR, "removed", "<structure is the same as \"transactions\" above, only present if include_removed=true>\n"
"Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count."
, {{RPCResult::Type::ELISION, "", ""},}},
- {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"},
+ {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain, or the genesis hash if the referenced block does not exist yet. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"},
}
},
RPCExamples{
@@ -1567,6 +1580,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
}
uint256 lastblock;
+ target_confirms = std::min(target_confirms, wallet.GetLastBlockHeight() + 1);
CHECK_NONFATAL(wallet.chain().findAncestorByHeight(wallet.GetLastBlockHash(), wallet.GetLastBlockHeight() + 1 - target_confirms, FoundBlock().hash(lastblock)));
UniValue ret(UniValue::VOBJ);
@@ -2338,7 +2352,7 @@ static UniValue getwalletinfo(const JSONRPCRequest& request)
{RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only."},
{RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"},
{RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"},
- {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"},
+ {RPCResult::Type::NUM_TIME, "unlocked_until", /* optional */ true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"},
{RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"},
{RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"},
{RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"},
@@ -2483,6 +2497,7 @@ static UniValue loadwallet(const JSONRPCRequest& request)
"\napplied to the new wallet (eg -zapwallettxes, rescan, etc).\n",
{
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet directory or .dat file."},
+ {"load_on_startup", RPCArg::Type::BOOL, /* default */ "null", "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -2515,6 +2530,8 @@ static UniValue loadwallet(const JSONRPCRequest& request)
std::shared_ptr<CWallet> const wallet = LoadWallet(*context.chain, location, error, warnings);
if (!wallet) throw JSONRPCError(RPC_WALLET_ERROR, error.original);
+ UpdateWalletSetting(*context.chain, location.GetName(), request.params[1], warnings);
+
UniValue obj(UniValue::VOBJ);
obj.pushKV("name", wallet->GetName());
obj.pushKV("warning", Join(warnings, Untranslated("\n")).original);
@@ -2599,6 +2616,7 @@ static UniValue createwallet(const JSONRPCRequest& request)
{"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Encrypt the wallet with this passphrase."},
{"avoid_reuse", RPCArg::Type::BOOL, /* default */ "false", "Keep track of coin reuse, and treat dirty and clean coins differently with privacy considerations in mind."},
{"descriptors", RPCArg::Type::BOOL, /* default */ "false", "Create a native descriptor wallet. The wallet will use descriptors internally to handle address creation"},
+ {"load_on_startup", RPCArg::Type::BOOL, /* default */ "null", "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -2654,6 +2672,8 @@ static UniValue createwallet(const JSONRPCRequest& request)
// no default case, so the compiler can warn about missing cases
}
+ UpdateWalletSetting(*context.chain, request.params[0].get_str(), request.params[6], warnings);
+
UniValue obj(UniValue::VOBJ);
obj.pushKV("name", wallet->GetName());
obj.pushKV("warning", Join(warnings, Untranslated("\n")).original);
@@ -2668,8 +2688,11 @@ static UniValue unloadwallet(const JSONRPCRequest& request)
"Specifying the wallet name on a wallet endpoint is invalid.",
{
{"wallet_name", RPCArg::Type::STR, /* default */ "the wallet name from the RPC request", "The name of the wallet to unload."},
+ {"load_on_startup", RPCArg::Type::BOOL, /* default */ "null", "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."},
},
- RPCResult{RPCResult::Type::NONE, "", ""},
+ RPCResult{RPCResult::Type::OBJ, "", "", {
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not unloaded cleanly."},
+ }},
RPCExamples{
HelpExampleCli("unloadwallet", "wallet_name")
+ HelpExampleRpc("unloadwallet", "wallet_name")
@@ -2697,9 +2720,15 @@ static UniValue unloadwallet(const JSONRPCRequest& request)
throw JSONRPCError(RPC_MISC_ERROR, "Requested wallet already unloaded");
}
+ interfaces::Chain& chain = wallet->chain();
+ std::vector<bilingual_str> warnings;
+
UnloadWallet(std::move(wallet));
+ UpdateWalletSetting(chain, wallet_name, request.params[1], warnings);
- return NullUniValue;
+ UniValue result(UniValue::VOBJ);
+ result.pushKV("warning", Join(warnings, Untranslated("\n")).original);
+ return result;
}
static UniValue listunspent(const JSONRPCRequest& request)
@@ -2807,6 +2836,15 @@ static UniValue listunspent(const JSONRPCRequest& request)
if (!request.params[4].isNull()) {
const UniValue& options = request.params[4].get_obj();
+ RPCTypeCheckObj(options,
+ {
+ {"minimumAmount", UniValueType()},
+ {"maximumAmount", UniValueType()},
+ {"minimumSumAmount", UniValueType()},
+ {"maximumCount", UniValueType(UniValue::VNUM)},
+ },
+ true, true);
+
if (options.exists("minimumAmount"))
nMinimumAmount = AmountFromValue(options["minimumAmount"]);
@@ -3165,7 +3203,7 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
{
{RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
{RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
- {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Script verification errors (if there are any)",
{
{RPCResult::Type::OBJ, "", "",
{
@@ -3222,59 +3260,73 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
static UniValue bumpfee(const JSONRPCRequest& request)
{
- RPCHelpMan{"bumpfee",
- "\nBumps the fee of an opt-in-RBF transaction T, replacing it with a new transaction B.\n"
- "An opt-in RBF transaction with the given txid must be in the wallet.\n"
- "The command will pay the additional fee by reducing change outputs or adding inputs when necessary. It may add a new change output if one does not already exist.\n"
- "All inputs in the original transaction will be included in the replacement transaction.\n"
- "The command will fail if the wallet or mempool contains a transaction that spends one of T's outputs.\n"
- "By default, the new fee will be calculated automatically using estimatesmartfee.\n"
- "The user can specify a confirmation target for estimatesmartfee.\n"
- "Alternatively, the user can specify a fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction.\n"
- "At a minimum, the new fee rate must be high enough to pay an additional new relay fee (incrementalfee\n"
- "returned by getnetworkinfo) to enter the node's mempool.\n",
+ bool want_psbt = request.strMethod == "psbtbumpfee";
+
+ RPCHelpMan{request.strMethod,
+ "\nBumps the fee of an opt-in-RBF transaction T, replacing it with a new transaction B.\n"
+ + std::string(want_psbt ? "Returns a PSBT instead of creating and signing a new transaction.\n" : "") +
+ "An opt-in RBF transaction with the given txid must be in the wallet.\n"
+ "The command will pay the additional fee by reducing change outputs or adding inputs when necessary. It may add a new change output if one does not already exist.\n"
+ "All inputs in the original transaction will be included in the replacement transaction.\n"
+ "The command will fail if the wallet or mempool contains a transaction that spends one of T's outputs.\n"
+ "By default, the new fee will be calculated automatically using estimatesmartfee.\n"
+ "The user can specify a confirmation target for estimatesmartfee.\n"
+ "Alternatively, the user can specify a fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction.\n"
+ "At a minimum, the new fee rate must be high enough to pay an additional new relay fee (incrementalfee\n"
+ "returned by getnetworkinfo) to enter the node's mempool.\n",
+ {
+ {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The txid to be bumped"},
+ {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
{
- {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The txid to be bumped"},
- {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
- {
- {"conf_target", RPCArg::Type::NUM, /* default */ "wallet default", "Confirmation target (in blocks)"},
- {"fee_rate", RPCArg::Type::NUM, /* default */ "fall back to 'conf_target'", "fee rate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n"
- " Specify a fee rate instead of relying on the built-in fee estimator.\n"
- "Must be at least 0.0001 " + CURRENCY_UNIT + " per kB higher than the current transaction fee rate.\n"},
- {"replaceable", RPCArg::Type::BOOL, /* default */ "true", "Whether the new transaction should still be\n"
- " marked bip-125 replaceable. If true, the sequence numbers in the transaction will\n"
- " be left unchanged from the original. If false, any input sequence numbers in the\n"
- " original transaction that were less than 0xfffffffe will be increased to 0xfffffffe\n"
- " so the new transaction will not be explicitly bip-125 replaceable (though it may\n"
- " still be replaceable in practice, for example if it has unconfirmed ancestors which\n"
- " are replaceable)."},
- {"estimate_mode", RPCArg::Type::STR, /* default */ "unset", std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
- " \"" + FeeModes("\"\n\"") + "\""},
- },
- "options"},
- },
- RPCResult{
- RPCResult::Type::OBJ, "", "", {
- {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled."},
- {RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."},
- {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."},
- {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."},
- {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).",
- {
- {RPCResult::Type::STR, "", ""},
- }},
- }
- },
- RPCExamples{
- "\nBump the fee, get the new transaction\'s txid\n" +
- HelpExampleCli("bumpfee", "<txid>")
+ {"conf_target", RPCArg::Type::NUM, /* default */ "wallet default", "Confirmation target (in blocks)"},
+ {"fee_rate", RPCArg::Type::NUM, /* default */ "fall back to 'conf_target'", "fee rate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n"
+ " Specify a fee rate instead of relying on the built-in fee estimator.\n"
+ "Must be at least 0.0001 " + CURRENCY_UNIT + " per kB higher than the current transaction fee rate.\n"},
+ {"replaceable", RPCArg::Type::BOOL, /* default */ "true", "Whether the new transaction should still be\n"
+ " marked bip-125 replaceable. If true, the sequence numbers in the transaction will\n"
+ " be left unchanged from the original. If false, any input sequence numbers in the\n"
+ " original transaction that were less than 0xfffffffe will be increased to 0xfffffffe\n"
+ " so the new transaction will not be explicitly bip-125 replaceable (though it may\n"
+ " still be replaceable in practice, for example if it has unconfirmed ancestors which\n"
+ " are replaceable)."},
+ {"estimate_mode", RPCArg::Type::STR, /* default */ "unset", std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
+ " \"" + FeeModes("\"\n\"") + "\""},
},
- }.Check(request);
+ "options"},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction." + std::string(want_psbt ? "" : " Only returned when wallet private keys are disabled. (DEPRECATED)")},
+ },
+ want_psbt ? std::vector<RPCResult>{} : std::vector<RPCResult>{{RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."}}
+ ),
+ {
+ {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."},
+ {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).",
+ {
+ {RPCResult::Type::STR, "", ""},
+ }},
+ })
+ },
+ RPCExamples{
+ "\nBump the fee, get the new transaction\'s" + std::string(want_psbt ? "psbt" : "txid") + "\n" +
+ HelpExampleCli(request.strMethod, "<txid>")
+ },
+ }.Check(request);
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
if (!wallet) return NullUniValue;
CWallet* const pwallet = wallet.get();
+ if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !want_psbt) {
+ if (!pwallet->chain().rpcEnableDeprecated("bumpfee")) {
+ throw JSONRPCError(RPC_METHOD_DEPRECATED, "Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart bitcoind with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22");
+ }
+ want_psbt = true;
+ }
+
RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VOBJ});
uint256 hash(ParseHashV(request.params[0], "txid"));
@@ -3359,7 +3411,7 @@ static UniValue bumpfee(const JSONRPCRequest& request)
// If wallet private keys are enabled, return the new transaction id,
// otherwise return the base64-encoded unsigned PSBT of the new transaction.
- if (!pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
+ if (!want_psbt) {
if (!feebumper::SignTransaction(*pwallet, mtx)) {
throw JSONRPCError(RPC_WALLET_ERROR, "Can't sign transaction.");
}
@@ -3392,6 +3444,11 @@ static UniValue bumpfee(const JSONRPCRequest& request)
return result;
}
+static UniValue psbtbumpfee(const JSONRPCRequest& request)
+{
+ return bumpfee(request);
+}
+
UniValue rescanblockchain(const JSONRPCRequest& request)
{
RPCHelpMan{"rescanblockchain",
@@ -3688,7 +3745,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
if (meta->has_key_origin) {
ret.pushKV("hdkeypath", WriteHDKeypath(meta->key_origin.path));
ret.pushKV("hdseedid", meta->hd_seed_id.GetHex());
- ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint, meta->key_origin.fingerprint + 4));
+ ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint));
}
}
}
@@ -4137,7 +4194,8 @@ static const CRPCCommand commands[] =
{ "wallet", "addmultisigaddress", &addmultisigaddress, {"nrequired","keys","label","address_type"} },
{ "wallet", "backupwallet", &backupwallet, {"destination"} },
{ "wallet", "bumpfee", &bumpfee, {"txid", "options"} },
- { "wallet", "createwallet", &createwallet, {"wallet_name", "disable_private_keys", "blank", "passphrase", "avoid_reuse", "descriptors"} },
+ { "wallet", "psbtbumpfee", &psbtbumpfee, {"txid", "options"} },
+ { "wallet", "createwallet", &createwallet, {"wallet_name", "disable_private_keys", "blank", "passphrase", "avoid_reuse", "descriptors", "load_on_startup"} },
{ "wallet", "dumpprivkey", &dumpprivkey, {"address"} },
{ "wallet", "dumpwallet", &dumpwallet, {"filename"} },
{ "wallet", "encryptwallet", &encryptwallet, {"passphrase"} },
@@ -4170,7 +4228,7 @@ static const CRPCCommand commands[] =
{ "wallet", "listunspent", &listunspent, {"minconf","maxconf","addresses","include_unsafe","query_options"} },
{ "wallet", "listwalletdir", &listwalletdir, {} },
{ "wallet", "listwallets", &listwallets, {} },
- { "wallet", "loadwallet", &loadwallet, {"filename"} },
+ { "wallet", "loadwallet", &loadwallet, {"filename", "load_on_startup"} },
{ "wallet", "lockunspent", &lockunspent, {"unlock","transactions"} },
{ "wallet", "removeprunedfunds", &removeprunedfunds, {"txid"} },
{ "wallet", "rescanblockchain", &rescanblockchain, {"start_height", "stop_height"} },
@@ -4182,7 +4240,7 @@ static const CRPCCommand commands[] =
{ "wallet", "setwalletflag", &setwalletflag, {"flag","value"} },
{ "wallet", "signmessage", &signmessage, {"address","message"} },
{ "wallet", "signrawtransactionwithwallet", &signrawtransactionwithwallet, {"hexstring","prevtxs","sighashtype"} },
- { "wallet", "unloadwallet", &unloadwallet, {"wallet_name"} },
+ { "wallet", "unloadwallet", &unloadwallet, {"wallet_name", "load_on_startup"} },
{ "wallet", "upgradewallet", &upgradewallet, {"version"} },
{ "wallet", "walletcreatefundedpsbt", &walletcreatefundedpsbt, {"inputs","outputs","locktime","options","bip32derivs"} },
{ "wallet", "walletlock", &walletlock, {} },
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index af57210f01..c0755db751 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -16,14 +16,12 @@ static const char *HEADER_END = "HEADER=END";
static const char *DATA_END = "DATA=END";
typedef std::pair<std::vector<unsigned char>, std::vector<unsigned char> > KeyValPair;
-bool RecoverDatabaseFile(const fs::path& file_path)
+bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::vector<bilingual_str>& warnings)
{
std::string filename;
std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename);
- bilingual_str open_err;
- if (!env->Open(open_err)) {
- tfm::format(std::cerr, "%s\n", open_err.original);
+ if (!env->Open(error)) {
return false;
}
@@ -39,11 +37,9 @@ bool RecoverDatabaseFile(const fs::path& file_path)
int result = env->dbenv->dbrename(nullptr, filename.c_str(), nullptr,
newFilename.c_str(), DB_AUTO_COMMIT);
- if (result == 0)
- LogPrintf("Renamed %s to %s\n", filename, newFilename);
- else
+ if (result != 0)
{
- LogPrintf("Failed to rename %s to %s\n", filename, newFilename);
+ error = strprintf(Untranslated("Failed to rename %s to %s"), filename, newFilename);
return false;
}
@@ -60,10 +56,10 @@ bool RecoverDatabaseFile(const fs::path& file_path)
Db db(env->dbenv.get(), 0);
result = db.verify(newFilename.c_str(), nullptr, &strDump, DB_SALVAGE | DB_AGGRESSIVE);
if (result == DB_VERIFY_BAD) {
- LogPrintf("Salvage: Database salvage found errors, all data may not be recoverable.\n");
+ warnings.push_back(Untranslated("Salvage: Database salvage found errors, all data may not be recoverable."));
}
if (result != 0 && result != DB_VERIFY_BAD) {
- LogPrintf("Salvage: Database salvage failed with result %d.\n", result);
+ error = strprintf(Untranslated("Salvage: Database salvage failed with result %d."), result);
return false;
}
@@ -87,7 +83,7 @@ bool RecoverDatabaseFile(const fs::path& file_path)
break;
getline(strDump, valueHex);
if (valueHex == DATA_END) {
- LogPrintf("Salvage: WARNING: Number of keys in data does not match number of values.\n");
+ warnings.push_back(Untranslated("Salvage: WARNING: Number of keys in data does not match number of values."));
break;
}
salvagedData.push_back(make_pair(ParseHex(keyHex), ParseHex(valueHex)));
@@ -96,7 +92,7 @@ bool RecoverDatabaseFile(const fs::path& file_path)
bool fSuccess;
if (keyHex != DATA_END) {
- LogPrintf("Salvage: WARNING: Unexpected end of file while reading salvage output.\n");
+ warnings.push_back(Untranslated("Salvage: WARNING: Unexpected end of file while reading salvage output."));
fSuccess = false;
} else {
fSuccess = (result == 0);
@@ -104,10 +100,9 @@ bool RecoverDatabaseFile(const fs::path& file_path)
if (salvagedData.empty())
{
- LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename);
+ error = strprintf(Untranslated("Salvage(aggressive) found no records in %s."), newFilename);
return false;
}
- LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size());
std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0);
int ret = pdbCopy->open(nullptr, // Txn pointer
@@ -117,7 +112,7 @@ bool RecoverDatabaseFile(const fs::path& file_path)
DB_CREATE, // Flags
0);
if (ret > 0) {
- LogPrintf("Cannot create database file %s\n", filename);
+ error = strprintf(Untranslated("Cannot create database file %s"), filename);
pdbCopy->close(0);
return false;
}
@@ -141,7 +136,7 @@ bool RecoverDatabaseFile(const fs::path& file_path)
}
if (!fReadOK)
{
- LogPrintf("WARNING: WalletBatch::Recover skipping %s: %s\n", strType, strErr);
+ warnings.push_back(strprintf(Untranslated("WARNING: WalletBatch::Recover skipping %s: %s"), strType, strErr));
continue;
}
Dbt datKey(&row.first[0], row.first.size());
diff --git a/src/wallet/salvage.h b/src/wallet/salvage.h
index e361930f5e..5a8538f942 100644
--- a/src/wallet/salvage.h
+++ b/src/wallet/salvage.h
@@ -9,6 +9,8 @@
#include <fs.h>
#include <streams.h>
-bool RecoverDatabaseFile(const fs::path& file_path);
+struct bilingual_str;
+
+bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::vector<bilingual_str>& warnings);
#endif // BITCOIN_WALLET_SALVAGE_H
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index d2770a46f7..7ef06663b5 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -630,13 +630,13 @@ static size_t CalculateNestedKeyhashInputSize(bool use_max_sig)
CPubKey pubkey = key.GetPubKey();
// Generate pubkey hash
- uint160 key_hash(Hash160(pubkey.begin(), pubkey.end()));
+ uint160 key_hash(Hash160(pubkey));
// Create inner-script to enter into keystore. Key hash can't be 0...
CScript inner_script = CScript() << OP_0 << std::vector<unsigned char>(key_hash.begin(), key_hash.end());
// Create outer P2SH script for the output
- uint160 script_id(Hash160(inner_script.begin(), inner_script.end()));
+ uint160 script_id(Hash160(inner_script));
CScript script_pubkey = CScript() << OP_HASH160 << std::vector<unsigned char>(script_id.begin(), script_id.end()) << OP_EQUAL;
// Add inner-script to key store and key to watchonly
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index cee2f2214c..fa00d12551 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -276,7 +276,7 @@ std::string COutput::ToString() const
const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const
{
- LOCK(cs_wallet);
+ AssertLockHeld(cs_wallet);
std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(hash);
if (it == mapWallet.end())
return nullptr;
@@ -1210,15 +1210,13 @@ void CWallet::BlockUntilSyncedToCurrentChain() const {
isminetype CWallet::IsMine(const CTxIn &txin) const
{
+ AssertLockHeld(cs_wallet);
+ std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.hash);
+ if (mi != mapWallet.end())
{
- LOCK(cs_wallet);
- std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.hash);
- if (mi != mapWallet.end())
- {
- const CWalletTx& prev = (*mi).second;
- if (txin.prevout.n < prev.tx->vout.size())
- return IsMine(prev.tx->vout[txin.prevout.n]);
- }
+ const CWalletTx& prev = (*mi).second;
+ if (txin.prevout.n < prev.tx->vout.size())
+ return IsMine(prev.tx->vout[txin.prevout.n]);
}
return ISMINE_NO;
}
@@ -1243,16 +1241,19 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const isminefilter& filter) const
isminetype CWallet::IsMine(const CTxOut& txout) const
{
+ AssertLockHeld(cs_wallet);
return IsMine(txout.scriptPubKey);
}
isminetype CWallet::IsMine(const CTxDestination& dest) const
{
+ AssertLockHeld(cs_wallet);
return IsMine(GetScriptForDestination(dest));
}
isminetype CWallet::IsMine(const CScript& script) const
{
+ AssertLockHeld(cs_wallet);
isminetype result = ISMINE_NO;
for (const auto& spk_man_pair : m_spk_managers) {
result = std::max(result, spk_man_pair.second->IsMine(script));
@@ -1264,6 +1265,7 @@ CAmount CWallet::GetCredit(const CTxOut& txout, const isminefilter& filter) cons
{
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
+ LOCK(cs_wallet);
return ((IsMine(txout) & filter) ? txout.nValue : 0);
}
@@ -1281,13 +1283,12 @@ bool CWallet::IsChange(const CScript& script) const
// a better way of identifying which outputs are 'the send' and which are
// 'the change' will need to be implemented (maybe extend CWalletTx to remember
// which output, if any, was change).
+ AssertLockHeld(cs_wallet);
if (IsMine(script))
{
CTxDestination address;
if (!ExtractDestination(script, address))
return true;
-
- LOCK(cs_wallet);
if (!FindAddressBookEntry(address)) {
return true;
}
@@ -1297,6 +1298,7 @@ bool CWallet::IsChange(const CScript& script) const
CAmount CWallet::GetChange(const CTxOut& txout) const
{
+ AssertLockHeld(cs_wallet);
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
return (IsChange(txout) ? txout.nValue : 0);
@@ -1304,6 +1306,7 @@ CAmount CWallet::GetChange(const CTxOut& txout) const
bool CWallet::IsMine(const CTransaction& tx) const
{
+ AssertLockHeld(cs_wallet);
for (const CTxOut& txout : tx.vout)
if (IsMine(txout))
return true;
@@ -1362,6 +1365,7 @@ CAmount CWallet::GetCredit(const CTransaction& tx, const isminefilter& filter) c
CAmount CWallet::GetChange(const CTransaction& tx) const
{
+ LOCK(cs_wallet);
CAmount nChange = 0;
for (const CTxOut& txout : tx.vout)
{
@@ -1597,6 +1601,7 @@ void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
nFee = nDebit - nValueOut;
}
+ LOCK(pwallet->cs_wallet);
// Sent/received.
for (unsigned int i = 0; i < tx->vout.size(); ++i)
{
@@ -1983,6 +1988,7 @@ bool CWalletTx::IsTrusted(std::set<uint256>& trusted_parents) const
if (!InMempool()) return false;
// Trusted if all inputs are from us and are in the mempool:
+ LOCK(pwallet->cs_wallet);
for (const CTxIn& txin : tx->vin)
{
// Transactions not sent by us: not trusted
@@ -2320,27 +2326,15 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibil
for (OutputGroup& group : groups) {
if (!group.EligibleForSpending(eligibility_filter)) continue;
- group.fee = 0;
- group.long_term_fee = 0;
- group.effective_value = 0;
- for (auto it = group.m_outputs.begin(); it != group.m_outputs.end(); ) {
- const CInputCoin& coin = *it;
- CAmount effective_value = coin.txout.nValue - (coin.m_input_bytes < 0 ? 0 : coin_selection_params.effective_fee.GetFee(coin.m_input_bytes));
- // Only include outputs that are positive effective value (i.e. not dust)
- if (effective_value > 0) {
- group.fee += coin.m_input_bytes < 0 ? 0 : coin_selection_params.effective_fee.GetFee(coin.m_input_bytes);
- group.long_term_fee += coin.m_input_bytes < 0 ? 0 : long_term_feerate.GetFee(coin.m_input_bytes);
- if (coin_selection_params.m_subtract_fee_outputs) {
- group.effective_value += coin.txout.nValue;
- } else {
- group.effective_value += effective_value;
- }
- ++it;
- } else {
- it = group.Discard(coin);
- }
+ if (coin_selection_params.m_subtract_fee_outputs) {
+ // Set the effective feerate to 0 as we don't want to use the effective value since the fees will be deducted from the output
+ group.SetFees(CFeeRate(0) /* effective_feerate */, long_term_feerate);
+ } else {
+ group.SetFees(coin_selection_params.effective_fee, long_term_feerate);
}
- if (group.effective_value > 0) utxo_pool.push_back(group);
+
+ OutputGroup pos_group = group.GetPositiveOnlyGroup();
+ if (pos_group.effective_value > 0) utxo_pool.push_back(pos_group);
}
// Calculate the fees for things that aren't inputs
CAmount not_input_fees = coin_selection_params.effective_fee.GetFee(coin_selection_params.tx_noinputs_size);
@@ -2486,23 +2480,6 @@ bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint,
}
// At this point, one input was not fully signed otherwise we would have exited already
- // Find that input and figure out what went wrong.
- for (unsigned int i = 0; i < tx.vin.size(); i++) {
- // Get the prevout
- CTxIn& txin = tx.vin[i];
- auto coin = coins.find(txin.prevout);
- if (coin == coins.end() || coin->second.IsSpent()) {
- input_errors[i] = "Input not found or already spent";
- continue;
- }
-
- // Check if this input is complete
- SignatureData sigdata = DataFromTransaction(tx, i, coin->second.out);
- if (!sigdata.complete) {
- input_errors[i] = "Unable to sign input, missing keys";
- continue;
- }
- }
return false;
}
@@ -2706,7 +2683,14 @@ OutputType CWallet::TransactionChangeType(const Optional<OutputType>& change_typ
return m_default_address_type;
}
-bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, const CCoinControl& coin_control, bool sign)
+bool CWallet::CreateTransactionInternal(
+ const std::vector<CRecipient>& vecSend,
+ CTransactionRef& tx,
+ CAmount& nFeeRet,
+ int& nChangePosInOut,
+ bilingual_str& error,
+ const CCoinControl& coin_control,
+ bool sign)
{
CAmount nValue = 0;
const OutputType change_type = TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : m_default_change_type, vecSend);
@@ -3061,6 +3045,39 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransac
return true;
}
+bool CWallet::CreateTransaction(
+ const std::vector<CRecipient>& vecSend,
+ CTransactionRef& tx,
+ CAmount& nFeeRet,
+ int& nChangePosInOut,
+ bilingual_str& error,
+ const CCoinControl& coin_control,
+ bool sign)
+{
+ int nChangePosIn = nChangePosInOut;
+ CTransactionRef tx2 = tx;
+ bool res = CreateTransactionInternal(vecSend, tx, nFeeRet, nChangePosInOut, error, coin_control, sign);
+ // try with avoidpartialspends unless it's enabled already
+ if (res && nFeeRet > 0 /* 0 means non-functional fee rate estimation */ && m_max_aps_fee > -1 && !coin_control.m_avoid_partial_spends) {
+ CCoinControl tmp_cc = coin_control;
+ tmp_cc.m_avoid_partial_spends = true;
+ CAmount nFeeRet2;
+ int nChangePosInOut2 = nChangePosIn;
+ bilingual_str error2; // fired and forgotten; if an error occurs, we discard the results
+ if (CreateTransactionInternal(vecSend, tx2, nFeeRet2, nChangePosInOut2, error2, tmp_cc, sign)) {
+ // if fee of this alternative one is within the range of the max fee, we use this one
+ const bool use_aps = nFeeRet2 <= nFeeRet + m_max_aps_fee;
+ WalletLogPrintf("Fee non-grouped = %lld, grouped = %lld, using %s\n", nFeeRet, nFeeRet2, use_aps ? "grouped" : "non-grouped");
+ if (use_aps) {
+ tx = tx2;
+ nFeeRet = nFeeRet2;
+ nChangePosInOut = nChangePosInOut2;
+ }
+ }
+ }
+ return res;
+}
+
void CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm)
{
LOCK(cs_wallet);
@@ -3183,6 +3200,7 @@ DBErrors CWallet::ZapWalletTx(std::list<CWalletTx>& vWtx)
bool CWallet::SetAddressBookWithDB(WalletBatch& batch, const CTxDestination& address, const std::string& strName, const std::string& strPurpose)
{
bool fUpdated = false;
+ bool is_mine;
{
LOCK(cs_wallet);
std::map<CTxDestination, CAddressBookData>::iterator mi = m_address_book.find(address);
@@ -3190,8 +3208,9 @@ bool CWallet::SetAddressBookWithDB(WalletBatch& batch, const CTxDestination& add
m_address_book[address].SetLabel(strName);
if (!strPurpose.empty()) /* update purpose only if requested */
m_address_book[address].purpose = strPurpose;
+ is_mine = IsMine(address) != ISMINE_NO;
}
- NotifyAddressBookChanged(this, address, strName, IsMine(address) != ISMINE_NO,
+ NotifyAddressBookChanged(this, address, strName, is_mine,
strPurpose, (fUpdated ? CT_UPDATED : CT_NEW) );
if (!strPurpose.empty() && !batch.WritePurpose(EncodeDestination(address), strPurpose))
return false;
@@ -3206,17 +3225,16 @@ bool CWallet::SetAddressBook(const CTxDestination& address, const std::string& s
bool CWallet::DelAddressBook(const CTxDestination& address)
{
- // If we want to delete receiving addresses, we need to take care that DestData "used" (and possibly newer DestData) gets preserved (and the "deleted" address transformed into a change entry instead of actually being deleted)
- // NOTE: This isn't a problem for sending addresses because they never have any DestData yet!
- // When adding new DestData, it should be considered here whether to retain or delete it (or move it?).
- if (IsMine(address)) {
- WalletLogPrintf("%s called with IsMine address, NOT SUPPORTED. Please report this bug! %s\n", __func__, PACKAGE_BUGREPORT);
- return false;
- }
-
+ bool is_mine;
{
LOCK(cs_wallet);
-
+ // If we want to delete receiving addresses, we need to take care that DestData "used" (and possibly newer DestData) gets preserved (and the "deleted" address transformed into a change entry instead of actually being deleted)
+ // NOTE: This isn't a problem for sending addresses because they never have any DestData yet!
+ // When adding new DestData, it should be considered here whether to retain or delete it (or move it?).
+ if (IsMine(address)) {
+ WalletLogPrintf("%s called with IsMine address, NOT SUPPORTED. Please report this bug! %s\n", __func__, PACKAGE_BUGREPORT);
+ return false;
+ }
// Delete destdata tuples associated with address
std::string strAddress = EncodeDestination(address);
for (const std::pair<const std::string, std::string> &item : m_address_book[address].destdata)
@@ -3224,9 +3242,10 @@ bool CWallet::DelAddressBook(const CTxDestination& address)
WalletBatch(*database).EraseDestData(strAddress, item.first);
}
m_address_book.erase(address);
+ is_mine = IsMine(address) != ISMINE_NO;
}
- NotifyAddressBookChanged(this, address, "", IsMine(address) != ISMINE_NO, "", CT_DELETED);
+ NotifyAddressBookChanged(this, address, "", is_mine, "", CT_DELETED);
WalletBatch(*database).ErasePurpose(EncodeDestination(address));
return WalletBatch(*database).EraseName(EncodeDestination(address));
@@ -3878,6 +3897,22 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
walletInstance->m_min_fee = CFeeRate(n);
}
+ if (gArgs.IsArgSet("-maxapsfee")) {
+ const std::string max_aps_fee{gArgs.GetArg("-maxapsfee", "")};
+ CAmount n = 0;
+ if (max_aps_fee == "-1") {
+ n = -1;
+ } else if (!ParseMoney(max_aps_fee, n)) {
+ error = AmountErrMsg("maxapsfee", max_aps_fee);
+ return nullptr;
+ }
+ if (n > HIGH_APS_FEE) {
+ warnings.push_back(AmountHighWarn("-maxapsfee") + Untranslated(" ") +
+ _("This is the maximum transaction fee you pay (in addition to the normal fee) to prioritize partial spend avoidance over regular coin selection."));
+ }
+ walletInstance->m_max_aps_fee = n;
+ }
+
if (gArgs.IsArgSet("-fallbackfee")) {
CAmount nFeePerK = 0;
if (!ParseMoney(gArgs.GetArg("-fallbackfee", ""), nFeePerK)) {
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index a761caf38c..f421de0cf2 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -72,6 +72,16 @@ static const CAmount DEFAULT_FALLBACK_FEE = 0;
static const CAmount DEFAULT_DISCARD_FEE = 10000;
//! -mintxfee default
static const CAmount DEFAULT_TRANSACTION_MINFEE = 1000;
+/**
+ * maximum fee increase allowed to do partial spend avoidance, even for nodes with this feature disabled by default
+ *
+ * A value of -1 disables this feature completely.
+ * A value of 0 (current default) means to attempt to do partial spend avoidance, and use its results if the fees remain *unchanged*
+ * A value > 0 means to do partial spend avoidance if the fee difference against a regular coin selection instance is in the range [0..value].
+ */
+static const CAmount DEFAULT_MAX_AVOIDPARTIALSPEND_FEE = 0;
+//! discourage APS fee higher than this amount
+constexpr CAmount HIGH_APS_FEE{COIN / 10000};
//! minimum recommended increment for BIP 125 replacement txs
static const CAmount WALLET_INCREMENTAL_RELAY_FEE = 5000;
//! Default for -spendzeroconfchange
@@ -217,7 +227,7 @@ static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
nOrderPos = -1; // TODO: calculate elsewhere
return;
}
- nOrderPos = atoi64(mapValue["n"].c_str());
+ nOrderPos = atoi64(mapValue["n"]);
}
@@ -719,6 +729,8 @@ private:
// ScriptPubKeyMan::GetID. In many cases it will be the hash of an internal structure
std::map<uint256, std::unique_ptr<ScriptPubKeyMan>> m_spk_managers;
+ bool CreateTransactionInternal(const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CAmount& nFeeRet, int& nChangePosInOut, bilingual_str& error, const CCoinControl& coin_control, bool sign);
+
public:
/*
* Main wallet lock.
@@ -793,7 +805,7 @@ public:
/** Interface for accessing chain state. */
interfaces::Chain& chain() const { assert(m_chain); return *m_chain; }
- const CWalletTx* GetWalletTx(const uint256& hash) const;
+ const CWalletTx* GetWalletTx(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
//! check whether we are allowed to upgrade (or already support) to the named feature
bool CanSupportFeature(enum WalletFeature wf) const override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { AssertLockHeld(cs_wallet); return nWalletMaxVersion >= wf; }
@@ -1008,6 +1020,7 @@ public:
*/
CFeeRate m_fallback_fee{DEFAULT_FALLBACK_FEE};
CFeeRate m_discard_rate{DEFAULT_DISCARD_FEE};
+ CAmount m_max_aps_fee{DEFAULT_MAX_AVOIDPARTIALSPEND_FEE}; //!< note: this is absolute fee, not fee rate
OutputType m_default_address_type{DEFAULT_ADDRESS_TYPE};
/**
* Default output type for change outputs. When unset, automatically choose type
@@ -1038,20 +1051,20 @@ public:
bool GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, std::string& error);
bool GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error);
- isminetype IsMine(const CTxDestination& dest) const;
- isminetype IsMine(const CScript& script) const;
- isminetype IsMine(const CTxIn& txin) const;
+ isminetype IsMine(const CTxDestination& dest) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ isminetype IsMine(const CScript& script) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ isminetype IsMine(const CTxIn& txin) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/**
* Returns amount of debit if the input matches the
* filter, otherwise returns 0
*/
CAmount GetDebit(const CTxIn& txin, const isminefilter& filter) const;
- isminetype IsMine(const CTxOut& txout) const;
+ isminetype IsMine(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
CAmount GetCredit(const CTxOut& txout, const isminefilter& filter) const;
- bool IsChange(const CTxOut& txout) const;
- bool IsChange(const CScript& script) const;
- CAmount GetChange(const CTxOut& txout) const;
- bool IsMine(const CTransaction& tx) const;
+ bool IsChange(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool IsChange(const CScript& script) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ CAmount GetChange(const CTxOut& txout) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool IsMine(const CTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
/** should probably be renamed to IsRelevantToMe */
bool IsFromMe(const CTransaction& tx) const;
CAmount GetDebit(const CTransaction& tx, const isminefilter& filter) const;
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 8c409b40cd..fa6814d0d3 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -103,7 +103,7 @@ bool WalletBatch::WriteKey(const CPubKey& vchPubKey, const CPrivKey& vchPrivKey,
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end());
- return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false);
+ return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey)), false);
}
bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey,
@@ -115,7 +115,7 @@ bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey,
}
// Compute a checksum of the encrypted key
- uint256 checksum = Hash(vchCryptedSecret.begin(), vchCryptedSecret.end());
+ uint256 checksum = Hash(vchCryptedSecret);
const auto key = std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey);
if (!WriteIC(key, std::make_pair(vchCryptedSecret, checksum), false)) {
@@ -209,7 +209,7 @@ bool WalletBatch::WriteDescriptorKey(const uint256& desc_id, const CPubKey& pubk
key.insert(key.end(), pubkey.begin(), pubkey.end());
key.insert(key.end(), privkey.begin(), privkey.end());
- return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key.begin(), key.end())), false);
+ return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key)), false);
}
bool WalletBatch::WriteCryptedDescriptorKey(const uint256& desc_id, const CPubKey& pubkey, const std::vector<unsigned char>& secret)
@@ -365,7 +365,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end());
vchKey.insert(vchKey.end(), pkey.begin(), pkey.end());
- if (Hash(vchKey.begin(), vchKey.end()) != hash)
+ if (Hash(vchKey) != hash)
{
strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt";
return false;
@@ -414,7 +414,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
if (!ssValue.eof()) {
uint256 checksum;
ssValue >> checksum;
- if ((checksum_valid = Hash(vchPrivKey.begin(), vchPrivKey.end()) != checksum)) {
+ if ((checksum_valid = Hash(vchPrivKey) != checksum)) {
strErr = "Error reading wallet database: Crypted key corrupt";
return false;
}
@@ -621,7 +621,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
to_hash.insert(to_hash.end(), pubkey.begin(), pubkey.end());
to_hash.insert(to_hash.end(), pkey.begin(), pkey.end());
- if (Hash(to_hash.begin(), to_hash.end()) != hash)
+ if (Hash(to_hash) != hash)
{
strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt";
return false;
@@ -1021,7 +1021,7 @@ std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path)
/** Return object for accessing dummy database with no read/write capabilities. */
std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase()
{
- return MakeUnique<BerkeleyDatabase>();
+ return MakeUnique<DummyDatabase>();
}
/** Return object for accessing temporary in-memory database. */
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 7c5bf7652b..64d60b1f44 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -261,10 +261,6 @@ public:
DBErrors ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut);
/* Function to determine if a certain KV/key-type is a key (cryptographical key) type */
static bool IsKeyType(const std::string& strType);
- /* verifies the database environment */
- static bool VerifyEnvironment(const fs::path& wallet_path, bilingual_str& errorStr);
- /* verifies the database file */
- static bool VerifyDatabaseFile(const fs::path& wallet_path, bilingual_str& errorStr);
//! write the hdchain model (external chain child index counter)
bool WriteHDChain(const CHDChain& chain);
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index 9f25b1ae7d..9b51461843 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -104,27 +104,6 @@ static void WalletShowInfo(CWallet* wallet_instance)
tfm::format(std::cout, "Address Book: %zu\n", wallet_instance->m_address_book.size());
}
-static bool SalvageWallet(const fs::path& path)
-{
- // Create a Database handle to allow for the db to be initialized before recovery
- std::unique_ptr<WalletDatabase> database = CreateWalletDatabase(path);
-
- // Initialize the environment before recovery
- bilingual_str error_string;
- try {
- database->Verify(error_string);
- } catch (const fs::filesystem_error& e) {
- error_string = Untranslated(strprintf("Error loading wallet. %s", fsbridge::get_filesystem_error_message(e)));
- }
- if (!error_string.original.empty()) {
- tfm::format(std::cerr, "Failed to open wallet for salvage :%s\n", error_string.original);
- return false;
- }
-
- // Perform the recovery
- return RecoverDatabaseFile(path);
-}
-
bool ExecuteWalletToolFunc(const std::string& command, const std::string& name)
{
fs::path path = fs::absolute(name, GetWalletDir());
@@ -147,7 +126,18 @@ bool ExecuteWalletToolFunc(const std::string& command, const std::string& name)
WalletShowInfo(wallet_instance.get());
wallet_instance->Close();
} else if (command == "salvage") {
- return SalvageWallet(path);
+ bilingual_str error;
+ std::vector<bilingual_str> warnings;
+ bool ret = RecoverDatabaseFile(path, error, warnings);
+ if (!ret) {
+ for (const auto& warning : warnings) {
+ tfm::format(std::cerr, "%s\n", warning.original);
+ }
+ if (!error.empty()) {
+ tfm::format(std::cerr, "%s\n", error.original);
+ }
+ }
+ return ret;
}
} else {
tfm::format(std::cerr, "Invalid command: %s\n", command);
diff --git a/src/walletinitinterface.h b/src/walletinitinterface.h
index f4730273f1..a55e02f2dc 100644
--- a/src/walletinitinterface.h
+++ b/src/walletinitinterface.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_WALLETINITINTERFACE_H
#define BITCOIN_WALLETINITINTERFACE_H
+class ArgsManager;
+
struct NodeContext;
class WalletInitInterface {
@@ -12,7 +14,7 @@ public:
/** Is the wallet component enabled */
virtual bool HasWalletSupport() const = 0;
/** Get wallet help string */
- virtual void AddWalletOptions() const = 0;
+ virtual void AddWalletOptions(ArgsManager& argsman) const = 0;
/** Check wallet parameter interaction */
virtual bool ParameterInteraction() const = 0;
/** Add wallets that should be opened to list of chain clients. */
diff --git a/test/functional/README.md b/test/functional/README.md
index aff5f714f2..0d85a74074 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -127,8 +127,8 @@ Base class for functional tests.
#### [util.py](test_framework/util.py)
Generally useful functions.
-#### [mininode.py](test_framework/mininode.py)
-Basic code to support P2P connectivity to a bitcoind.
+#### [p2p.py](test_framework/p2p.py)
+Test objects for interacting with a bitcoind node over the p2p interface.
#### [script.py](test_framework/script.py)
Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index 5d782026dc..1832043989 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -16,17 +16,16 @@ from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PInterface,
- mininode_lock,
msg_block,
msg_getdata,
+ p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
- wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
@@ -166,8 +165,8 @@ class ExampleTest(BitcoinTestFramework):
height = self.nodes[0].getblockcount()
- for i in range(10):
- # Use the mininode and blocktools functionality to manually build a block
+ for _ in range(10):
+ # Use the blocktools functionality to manually build a block.
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height+1), self.block_time)
@@ -203,13 +202,13 @@ class ExampleTest(BitcoinTestFramework):
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
- wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
+ self.nodes[2].p2p.wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
- with mininode_lock:
+ with p2p_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py
index 75267de80b..17fbf50cc8 100755
--- a/test/functional/feature_abortnode.py
+++ b/test/functional/feature_abortnode.py
@@ -11,7 +11,7 @@
"""
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import wait_until, get_datadir_path, connect_nodes
+from test_framework.util import get_datadir_path, connect_nodes
import os
@@ -41,7 +41,7 @@ class AbortNodeTest(BitcoinTestFramework):
# Check that node0 aborted
self.log.info("Waiting for crash")
- wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=200)
+ self.nodes[0].wait_until_stopped(timeout=200)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py
index 79777f5582..603d7f5d3b 100755
--- a/test/functional/feature_assumevalid.py
+++ b/test/functional/feature_assumevalid.py
@@ -42,7 +42,7 @@ from test_framework.messages import (
msg_block,
msg_headers,
)
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -123,7 +123,7 @@ class AssumeValidTest(BitcoinTestFramework):
height += 1
# Bury the block 100 deep so the coinbase output is spendable
- for i in range(100):
+ for _ in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
@@ -149,7 +149,7 @@ class AssumeValidTest(BitcoinTestFramework):
height += 1
# Bury the assumed valid block 2100 deep
- for i in range(2100):
+ for _ in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index 19cdc10935..1253c45418 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -141,7 +141,7 @@ class BIP68Test(BitcoinTestFramework):
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
- for i in range(400):
+ for _ in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
@@ -260,7 +260,7 @@ class BIP68Test(BitcoinTestFramework):
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
- for i in range(10):
+ for _ in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 6619d83dc4..efafcfaec3 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -26,7 +26,7 @@ from test_framework.messages import (
uint256_from_compact,
uint256_from_str,
)
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
@@ -53,7 +53,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from data import invalid_txs
-# Use this class for tests that require behavior other than normal "mininode" behavior.
+# Use this class for tests that require behavior other than normal p2p behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
@@ -125,7 +125,7 @@ class FullBlockTest(BitcoinTestFramework):
# collect spendable outputs now to avoid cluttering the code later on
out = []
- for i in range(NUM_OUTPUTS_TO_COLLECT):
+ for _ in range(NUM_OUTPUTS_TO_COLLECT):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index fd0330924d..2919b0ea0b 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -10,7 +10,7 @@ Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import CTransaction, msg_block, ToHex
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index c6852ef017..38e95f00e9 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -44,7 +44,7 @@ import time
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import ToHex, CTransaction
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSEQUENCEVERIFY,
@@ -161,7 +161,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
def generate_blocks(self, number):
test_blocks = []
- for i in range(number):
+ for _ in range(number):
block = self.create_test_block([])
test_blocks.append(block)
self.last_block_time += 600
@@ -209,22 +209,22 @@ class BIP68_112_113Test(BitcoinTestFramework):
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
- for i in range(16):
+ for _ in range(16):
bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
- for j in range(2):
+ for _ in range(2):
inputs = []
- for i in range(16):
+ for _ in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
- for j in range(2):
+ for _ in range(2):
inputs = []
- for i in range(16):
+ for _ in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py
index 7b38e09bf9..7a2e35c095 100755
--- a/test/functional/feature_dbcrash.py
+++ b/test/functional/feature_dbcrash.py
@@ -195,7 +195,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
- for i in range(2):
+ for _ in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
@@ -205,7 +205,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# Sanity check -- if we chose inputs that are too small, skip
continue
- for i in range(3):
+ for _ in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index 05fdacd451..f263c93c8a 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -9,7 +9,7 @@ Test that the DERSIG soft-fork activates at (regtest) height 1251.
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 3cf0fb8f7b..702a1d9995 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -176,9 +176,9 @@ class EstimateFeeTest(BitcoinTestFramework):
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
- for i in range(numblocks):
+ for _ in range(numblocks):
random.shuffle(self.confutxo)
- for j in range(random.randrange(100 - 50, 100 + 50)):
+ for _ in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
@@ -243,7 +243,7 @@ class EstimateFeeTest(BitcoinTestFramework):
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
- for i in range(2):
+ for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 5538d6d3b4..e5c62d1ea7 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -14,7 +14,7 @@ from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
@@ -104,7 +104,7 @@ class MaxUploadTest(BitcoinTestFramework):
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
- for i in range(3):
+ for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index dd4c318cee..3497b49a19 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -9,7 +9,6 @@ from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
connect_nodes,
disconnect_nodes,
hex_str_to_bytes,
@@ -56,7 +55,7 @@ class NotificationsTest(BitcoinTestFramework):
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
# wait at most 10 seconds for expected number of files before reading the content
- wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
+ self.wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
# directory content should equal the generated blocks hashes
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
@@ -64,7 +63,7 @@ class NotificationsTest(BitcoinTestFramework):
if self.is_wallet_compiled():
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected number of files before reading the content
- wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
+ self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
@@ -78,7 +77,7 @@ class NotificationsTest(BitcoinTestFramework):
self.start_node(1)
connect_nodes(self.nodes[0], 1)
- wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
+ self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
@@ -140,7 +139,7 @@ class NotificationsTest(BitcoinTestFramework):
# TODO: add test for `-alertnotify` large fork notifications
def expect_wallet_notify(self, tx_ids):
- wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10)
+ self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10)
assert_equal(sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index e46e5aacc8..db408ab67a 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -20,7 +20,6 @@ from test_framework.util import (
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
- wait_until,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
@@ -136,7 +135,7 @@ class PruneTest(BitcoinTestFramework):
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
- wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
+ self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
@@ -147,7 +146,7 @@ class PruneTest(BitcoinTestFramework):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
- for j in range(12):
+ for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
disconnect_nodes(self.nodes[0], 1)
@@ -250,7 +249,7 @@ class PruneTest(BitcoinTestFramework):
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
- wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
+ self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index acf551ef69..1b531ad51d 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -376,7 +376,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
- for i in range(MAX_REPLACEMENT_LIMIT+1):
+ for _ in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 5195d20dcb..0842972779 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -126,11 +126,11 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
- for v in range(2):
+ for _ in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
- for i in range(5):
+ for _ in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
diff --git a/test/functional/feature_shutdown.py b/test/functional/feature_shutdown.py
index d782d3b1d8..a76e0f1b50 100755
--- a/test/functional/feature_shutdown.py
+++ b/test/functional/feature_shutdown.py
@@ -5,7 +5,7 @@
"""Test bitcoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, get_rpc_proxy, wait_until
+from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
@@ -25,7 +25,7 @@ class ShutdownTest(BitcoinTestFramework):
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
- wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
+ self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py
index 0713925141..e045adac32 100755
--- a/test/functional/feature_versionbits_warning.py
+++ b/test/functional/feature_versionbits_warning.py
@@ -12,9 +12,8 @@ import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
-from test_framework.mininode import P2PInterface, mininode_lock
+from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import wait_until
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
@@ -91,14 +90,14 @@ class VersionBitsWarningTest(BitcoinTestFramework):
# Generating one block guarantees that we'll get out of IBD
node.generatetoaddress(1, node_deterministic_address)
- wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
+ self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=p2p_lock)
# Generating one more block will be enough to generate an error.
node.generatetoaddress(1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
# Check that the alert file shows the versionbits unknown rules warning
- wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
+ self.wait_until(lambda: self.versionbits_in_alert_file())
if __name__ == '__main__':
VersionBitsWarningTest().main()
diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py
index 0739d7e29b..e956fe07d2 100755
--- a/test/functional/mempool_package_onemore.py
+++ b/test/functional/mempool_package_onemore.py
@@ -31,7 +31,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
for (txid, vout) in zip(parent_txids, vouts):
inputs.append({'txid' : txid, 'vout' : vout})
outputs = {}
- for i in range(num_outputs):
+ for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
signedtx = node.signrawtransactionwithwallet(rawtx)
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 542d24f4be..e74ef8cf16 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -7,13 +7,12 @@
from decimal import Decimal
from test_framework.messages import COIN
-from test_framework.mininode import P2PTxInvStore
+from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
satoshi_round,
- wait_until,
)
# default limits
@@ -48,7 +47,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
- for i in range(num_outputs):
+ for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransactionwithwallet(rawtx)
@@ -70,7 +69,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
witness_chain = []
- for i in range(MAX_ANCESTORS):
+ for _ in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
@@ -245,7 +244,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
- for i in range(MAX_DESCENDANTS - 1):
+ for _ in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
chain.append(txid)
@@ -269,8 +268,8 @@ class MempoolPackagesTest(BitcoinTestFramework):
# - txs from previous ancestor test (-> custom ancestor limit)
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
- wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
- MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
+ self.wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
+ MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert set(mempool1).issubset(set(mempool0))
@@ -312,7 +311,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
- for i in range(2):
+ for _ in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
@@ -326,7 +325,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Create tx2-7
vout = 1
txid = tx0_id
- for i in range(6):
+ for _ in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py
index 5d00648aed..f73f1a02a2 100755
--- a/test/functional/mempool_persist.py
+++ b/test/functional/mempool_persist.py
@@ -39,15 +39,14 @@ from decimal import Decimal
import os
import time
+from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.mininode import P2PTxInvStore
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
- wait_until,
)
@@ -62,7 +61,7 @@ class MempoolPersistTest(BitcoinTestFramework):
def run_test(self):
self.log.debug("Send 5 transactions from node2 (to its own address)")
tx_creation_time_lower = int(time.time())
- for i in range(5):
+ for _ in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
@@ -172,7 +171,7 @@ class MempoolPersistTest(BitcoinTestFramework):
# check that txn gets broadcast due to unbroadcast logic
conn = node0.add_p2p_connection(P2PTxInvStore())
node0.mockscheduler(16*60) # 15 min + 1 for buffer
- wait_until(lambda: len(conn.get_invs()) == 1)
+ self.wait_until(lambda: len(conn.get_invs()) == 1)
if __name__ == '__main__':
MempoolPersistTest().main()
diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py
index 365d011157..abd5a03d95 100755
--- a/test/functional/mempool_unbroadcast.py
+++ b/test/functional/mempool_unbroadcast.py
@@ -7,7 +7,7 @@ to peers until a GETDATA is received."""
import time
-from test_framework.mininode import P2PTxInvStore
+from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 8a703ef009..8baf974a0a 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -73,7 +73,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
n_outputs = size - tx_count
output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
outputs = {}
- for n in range(0, n_outputs):
+ for _ in range(n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
index 63d1ccfb36..b13740750f 100755
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -20,7 +20,7 @@ from test_framework.messages import (
CBlockHeader,
BLOCK_HEADER_SIZE,
)
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py
index 5c7e27a3a8..80f262d0d3 100755
--- a/test/functional/p2p_addr_relay.py
+++ b/test/functional/p2p_addr_relay.py
@@ -12,9 +12,7 @@ from test_framework.messages import (
NODE_WITNESS,
msg_addr,
)
-from test_framework.mininode import (
- P2PInterface,
-)
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py
index 6d947ac660..84178d0dd7 100755
--- a/test/functional/p2p_blockfilters.py
+++ b/test/functional/p2p_blockfilters.py
@@ -4,12 +4,13 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_COMPACT_FILTERS (BIP 157/158).
-Tests that a node configured with -blockfilterindex and -peerblockfilters can serve
-cfheaders and cfcheckpts.
+Tests that a node configured with -blockfilterindex and -peerblockfilters signals
+NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts.
"""
from test_framework.messages import (
FILTER_TYPE_BASIC,
+ NODE_COMPACT_FILTERS,
hash256,
msg_getcfcheckpt,
msg_getcfheaders,
@@ -17,13 +18,12 @@ from test_framework.messages import (
ser_uint256,
uint256_from_str,
)
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
- wait_until,
)
class CFiltersClient(P2PInterface):
@@ -64,11 +64,19 @@ class CompactFiltersTest(BitcoinTestFramework):
disconnect_nodes(self.nodes[0], 1)
self.nodes[0].generate(1)
- wait_until(lambda: self.nodes[0].getblockcount() == 1000)
+ self.wait_until(lambda: self.nodes[0].getblockcount() == 1000)
stale_block_hash = self.nodes[0].getblockhash(1000)
self.nodes[1].generate(1001)
- wait_until(lambda: self.nodes[1].getblockcount() == 2000)
+ self.wait_until(lambda: self.nodes[1].getblockcount() == 2000)
+
+ # Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
+ assert node0.nServices & NODE_COMPACT_FILTERS != 0
+ assert node1.nServices & NODE_COMPACT_FILTERS == 0
+
+ # Check that the localservices is as expected.
+ assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0
+ assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0
self.log.info("get cfcheckpt on chain to be re-orged out.")
request = msg_getcfcheckpt(
diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py
index 27e6b669f6..65259f1869 100755
--- a/test/functional/p2p_blocksonly.py
+++ b/test/functional/p2p_blocksonly.py
@@ -5,7 +5,7 @@
"""Test p2p blocksonly"""
from test_framework.messages import msg_tx, CTransaction, FromHex
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 0b3738b572..fdae7fb68b 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -11,10 +11,10 @@ import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_BLOCK, MSG_CMPCT_BLOCK, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
-from test_framework.mininode import mininode_lock, P2PInterface
+from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, wait_until, softfork_active
+from test_framework.util import assert_equal, softfork_active
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
@@ -48,12 +48,12 @@ class TestP2PConn(P2PInterface):
self.block_announced = True
self.announced_blockhashes.add(x.hash)
- # Requires caller to hold mininode_lock
+ # Requires caller to hold p2p_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
- with mininode_lock:
+ with p2p_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
@@ -73,7 +73,7 @@ class TestP2PConn(P2PInterface):
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
- wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
+ self.wait_until(self.received_block_announcement, timeout=30)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
@@ -81,7 +81,7 @@ class TestP2PConn(P2PInterface):
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
- wait_until(received_hash, timeout=timeout, lock=mininode_lock)
+ self.wait_until(received_hash, timeout=timeout)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
@@ -89,7 +89,7 @@ class TestP2PConn(P2PInterface):
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
- wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
+ self.wait_for_disconnect(timeout)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
@@ -125,7 +125,7 @@ class CompactBlocksTest(BitcoinTestFramework):
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
- for i in range(10):
+ for _ in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
@@ -154,8 +154,8 @@ class CompactBlocksTest(BitcoinTestFramework):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
- wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
- with mininode_lock:
+ test_node.wait_until(received_sendcmpct, timeout=30)
+ with p2p_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
@@ -170,7 +170,7 @@ class CompactBlocksTest(BitcoinTestFramework):
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
- with mininode_lock:
+ with p2p_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
@@ -266,7 +266,7 @@ class CompactBlocksTest(BitcoinTestFramework):
address = node.getnewaddress()
segwit_tx_generated = False
- for i in range(num_transactions):
+ for _ in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
@@ -294,12 +294,11 @@ class CompactBlocksTest(BitcoinTestFramework):
block.rehash()
# Wait until the block was announced (via compact blocks)
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
# Now fetch and check the compact block
header_and_shortids = None
- with mininode_lock:
- assert "cmpctblock" in test_node.last_message
+ with p2p_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
@@ -309,12 +308,11 @@ class CompactBlocksTest(BitcoinTestFramework):
inv = CInv(MSG_CMPCT_BLOCK, block_hash)
test_node.send_message(msg_getdata([inv]))
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
# Now fetch and check the compact block
header_and_shortids = None
- with mininode_lock:
- assert "cmpctblock" in test_node.last_message
+ with p2p_lock:
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
@@ -380,7 +378,7 @@ class CompactBlocksTest(BitcoinTestFramework):
if announce == "inv":
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))
- wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
+ test_node.wait_until(lambda: "getheaders" in test_node.last_message, timeout=30)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
@@ -399,7 +397,7 @@ class CompactBlocksTest(BitcoinTestFramework):
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
- with mininode_lock:
+ with p2p_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
@@ -418,7 +416,7 @@ class CompactBlocksTest(BitcoinTestFramework):
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
- for i in range(num_transactions):
+ for _ in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
@@ -441,7 +439,7 @@ class CompactBlocksTest(BitcoinTestFramework):
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
- with mininode_lock:
+ with p2p_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
@@ -506,13 +504,13 @@ class CompactBlocksTest(BitcoinTestFramework):
assert tx.hash in mempool
# Clear out last request.
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
- with mininode_lock:
+ with p2p_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
@@ -539,7 +537,7 @@ class CompactBlocksTest(BitcoinTestFramework):
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
- with mininode_lock:
+ with p2p_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
@@ -590,10 +588,10 @@ class CompactBlocksTest(BitcoinTestFramework):
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
- wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
+ test_node.wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)
[tx.calc_sha256() for tx in block.vtx]
- with mininode_lock:
+ with p2p_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
@@ -613,11 +611,11 @@ class CompactBlocksTest(BitcoinTestFramework):
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
- with mininode_lock:
+ with p2p_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
@@ -627,24 +625,24 @@ class CompactBlocksTest(BitcoinTestFramework):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
- for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
+ for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ test_node.wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
- wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
+ test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
test_node.clear_block_announcement()
node.generate(1)
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ test_node.wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
- wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
- with mininode_lock:
+ test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30)
+ with p2p_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
@@ -672,10 +670,10 @@ class CompactBlocksTest(BitcoinTestFramework):
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
- with mininode_lock:
+ with p2p_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
@@ -691,10 +689,9 @@ class CompactBlocksTest(BitcoinTestFramework):
node.submitblock(ToHex(block))
for l in listeners:
- wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
- with mininode_lock:
+ l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30)
+ with p2p_lock:
for l in listeners:
- assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
@@ -750,7 +747,7 @@ class CompactBlocksTest(BitcoinTestFramework):
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
- with mininode_lock:
+ with p2p_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py
index 09b9ebeb2d..b7c2a306eb 100755
--- a/test/functional/p2p_disconnect_ban.py
+++ b/test/functional/p2p_disconnect_ban.py
@@ -10,7 +10,6 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
- wait_until,
)
class DisconnectBanTest(BitcoinTestFramework):
@@ -28,7 +27,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
- wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
+ self.wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
@@ -95,7 +94,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
- wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
+ self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
@@ -106,7 +105,7 @@ class DisconnectBanTest(BitcoinTestFramework):
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
- wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
+ self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
diff --git a/test/functional/p2p_dos_header_tree.py b/test/functional/p2p_dos_header_tree.py
index f8552cf53d..7dd8c3146b 100755
--- a/test/functional/p2p_dos_header_tree.py
+++ b/test/functional/p2p_dos_header_tree.py
@@ -8,7 +8,7 @@ from test_framework.messages import (
CBlockHeader,
FromHex,
)
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PInterface,
msg_headers,
)
diff --git a/test/functional/p2p_eviction.py b/test/functional/p2p_eviction.py
index b2b3a89aab..72a255991c 100755
--- a/test/functional/p2p_eviction.py
+++ b/test/functional/p2p_eviction.py
@@ -15,11 +15,11 @@ Therefore, this test is limited to the remaining protection criteria.
import time
-from test_framework.test_framework import BitcoinTestFramework
-from test_framework.mininode import P2PInterface, P2PDataStore
-from test_framework.util import assert_equal, wait_until
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CTransaction, FromHex, msg_pong, msg_tx
+from test_framework.p2p import P2PDataStore, P2PInterface
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
class SlowP2PDataStore(P2PDataStore):
@@ -92,7 +92,7 @@ class P2PEvict(BitcoinTestFramework):
for _ in range(8):
fastpeer = node.add_p2p_connection(P2PInterface())
current_peer += 1
- wait_until(lambda: "ping" in fastpeer.last_message, timeout=10)
+ self.wait_until(lambda: "ping" in fastpeer.last_message, timeout=10)
# Make sure by asking the node what the actual min pings are
peerinfo = node.getpeerinfo()
diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py
index 73afe9adc4..0c07b56a69 100755
--- a/test/functional/p2p_feefilter.py
+++ b/test/functional/p2p_feefilter.py
@@ -5,10 +5,9 @@
"""Test processing of feefilter messages."""
from decimal import Decimal
-import time
from test_framework.messages import MSG_TX, MSG_WTX, msg_feefilter
-from test_framework.mininode import mininode_lock, P2PInterface
+from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -17,16 +16,6 @@ def hashToHex(hash):
return format(hash, '064x')
-# Wait up to 60 secs to see if the testnode has received all the expected invs
-def allInvsMatch(invsExpected, testnode):
- for x in range(60):
- with mininode_lock:
- if (sorted(invsExpected) == sorted(testnode.txinvs)):
- return True
- time.sleep(1)
- return False
-
-
class FeefilterConn(P2PInterface):
feefilter_received = False
@@ -34,7 +23,7 @@ class FeefilterConn(P2PInterface):
self.feefilter_received = True
def assert_feefilter_received(self, recv: bool):
- with mininode_lock:
+ with p2p_lock:
assert_equal(self.feefilter_received, recv)
@@ -48,8 +37,12 @@ class TestP2PConn(P2PInterface):
if (i.type == MSG_TX) or (i.type == MSG_WTX):
self.txinvs.append(hashToHex(i.hash))
+ def wait_for_invs_to_match(self, invs_expected):
+ invs_expected.sort()
+ self.wait_until(lambda: invs_expected == sorted(self.txinvs))
+
def clear_invs(self):
- with mininode_lock:
+ with p2p_lock:
self.txinvs = []
@@ -61,7 +54,12 @@ class FeeFilterTest(BitcoinTestFramework):
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
- self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]] * self.num_nodes
+ # grant noban permission to all peers to speed up tx relay / mempool sync
+ self.extra_args = [[
+ "-minrelaytxfee=0.00000100",
+ "-mintxfee=0.00000100",
+ "-whitelist=noban@127.0.0.1",
+ ]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -88,26 +86,24 @@ class FeeFilterTest(BitcoinTestFramework):
conn = self.nodes[0].add_p2p_connection(TestP2PConn())
- # Test that invs are received by test connection for all txs at
- # feerate of .2 sat/byte
+ self.log.info("Test txs paying 0.2 sat/byte are received by test connection")
node1.settxfee(Decimal("0.00000200"))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert allInvsMatch(txids, conn)
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
+ conn.wait_for_invs_to_match(txids)
conn.clear_invs()
- # Set a filter of .15 sat/byte on test connection
+ # Set a fee filter of 0.15 sat/byte on test connection
conn.send_and_ping(msg_feefilter(150))
- # Test that txs are still being received by test connection (paying .15 sat/byte)
+ self.log.info("Test txs paying 0.15 sat/byte are received by test connection")
node1.settxfee(Decimal("0.00000150"))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert allInvsMatch(txids, conn)
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
+ conn.wait_for_invs_to_match(txids)
conn.clear_invs()
- # Change tx fee rate to .1 sat/byte and test they are no longer received
- # by the test connection
+ self.log.info("Test txs paying 0.1 sat/byte are no longer received by test connection")
node1.settxfee(Decimal("0.00000100"))
- [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
@@ -119,13 +115,13 @@ class FeeFilterTest(BitcoinTestFramework):
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
- assert allInvsMatch(txids, conn)
+ conn.wait_for_invs_to_match(txids)
conn.clear_invs()
- # Remove fee filter and check that txs are received again
+ self.log.info("Remove fee filter and check txs are received again")
conn.send_and_ping(msg_feefilter(0))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
- assert allInvsMatch(txids, conn)
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
+ conn.wait_for_invs_to_match(txids)
conn.clear_invs()
diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index 741da3be31..613d96eaad 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -19,7 +19,7 @@ from test_framework.messages import (
msg_mempool,
msg_version,
)
-from test_framework.mininode import P2PInterface, mininode_lock
+from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE
from test_framework.test_framework import BitcoinTestFramework
@@ -60,22 +60,22 @@ class P2PBloomFilter(P2PInterface):
@property
def tx_received(self):
- with mininode_lock:
+ with p2p_lock:
return self._tx_received
@tx_received.setter
def tx_received(self, value):
- with mininode_lock:
+ with p2p_lock:
self._tx_received = value
@property
def merkleblock_received(self):
- with mininode_lock:
+ with p2p_lock:
return self._merkleblock_received
@merkleblock_received.setter
def merkleblock_received(self, value):
- with mininode_lock:
+ with p2p_lock:
self._merkleblock_received = value
@@ -218,7 +218,6 @@ class FilterTest(BitcoinTestFramework):
# Add peer but do not send version yet
filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False)
# Send version with fRelay=False
- filter_peer_without_nrelay.wait_until(lambda: filter_peer_without_nrelay.is_connected, timeout=10)
version_without_fRelay = msg_version()
version_without_fRelay.nRelay = 0
filter_peer_without_nrelay.send_message(version_without_fRelay)
diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py
index d743abe681..aaf862e6c8 100755
--- a/test/functional/p2p_fingerprint.py
+++ b/test/functional/p2p_fingerprint.py
@@ -12,7 +12,7 @@ import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
@@ -22,9 +22,9 @@ from test_framework.mininode import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
)
+
class P2PFingerprintTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
@@ -102,12 +102,12 @@ class P2PFingerprintTest(BitcoinTestFramework):
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
- wait_until(test_function, timeout=3)
+ self.wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
- wait_until(test_function, timeout=3)
+ self.wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
@@ -138,11 +138,11 @@ class P2PFingerprintTest(BitcoinTestFramework):
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
- wait_until(test_function, timeout=3)
+ self.wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
- wait_until(test_function, timeout=3)
+ self.wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py
new file mode 100755
index 0000000000..6622ea9ec2
--- /dev/null
+++ b/test/functional/p2p_getaddr_caching.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test addr response caching"""
+
+import time
+from test_framework.messages import (
+ CAddress,
+ NODE_NETWORK,
+ NODE_WITNESS,
+ msg_addr,
+ msg_getaddr,
+)
+from test_framework.p2p import (
+ P2PInterface,
+ p2p_lock
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+MAX_ADDR_TO_SEND = 1000
+
+def gen_addrs(n):
+ addrs = []
+ for i in range(n):
+ addr = CAddress()
+ addr.time = int(time.time())
+ addr.nServices = NODE_NETWORK | NODE_WITNESS
+ # Use first octets to occupy different AddrMan buckets
+ first_octet = i >> 8
+ second_octet = i % 256
+ addr.ip = "{}.{}.1.1".format(first_octet, second_octet)
+ addr.port = 8333
+ addrs.append(addr)
+ return addrs
+
+class AddrReceiver(P2PInterface):
+
+ def __init__(self):
+ super().__init__()
+ self.received_addrs = None
+
+ def get_received_addrs(self):
+ with p2p_lock:
+ return self.received_addrs
+
+ def on_addr(self, message):
+ self.received_addrs = []
+ for addr in message.addrs:
+ self.received_addrs.append(addr.ip)
+
+ def addr_received(self):
+ return self.received_addrs is not None
+
+
+class AddrTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def run_test(self):
+ self.log.info('Create connection that sends and requests addr messages')
+ addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
+
+ msg_send_addrs = msg_addr()
+ self.log.info('Fill peer AddrMan with a lot of records')
+ # Since these addrs are sent from the same source, not all of them will be stored,
+ # because we allocate a limited number of AddrMan buckets per addr source.
+ total_addrs = 10000
+ addrs = gen_addrs(total_addrs)
+ for i in range(int(total_addrs/MAX_ADDR_TO_SEND)):
+ msg_send_addrs.addrs = addrs[i * MAX_ADDR_TO_SEND:(i + 1) * MAX_ADDR_TO_SEND]
+ addr_source.send_and_ping(msg_send_addrs)
+
+ responses = []
+ self.log.info('Send many addr requests within short time to receive same response')
+ N = 5
+ cur_mock_time = int(time.time())
+ for i in range(N):
+ addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
+ addr_receiver.send_and_ping(msg_getaddr())
+ # Trigger response
+ cur_mock_time += 5 * 60
+ self.nodes[0].setmocktime(cur_mock_time)
+ addr_receiver.wait_until(addr_receiver.addr_received)
+ responses.append(addr_receiver.get_received_addrs())
+ for response in responses[1:]:
+ assert_equal(response, responses[0])
+ assert(len(response) < MAX_ADDR_TO_SEND)
+
+ cur_mock_time += 3 * 24 * 60 * 60
+ self.nodes[0].setmocktime(cur_mock_time)
+
+ self.log.info('After time passed, see a new response to addr request')
+ last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
+ last_addr_receiver.send_and_ping(msg_getaddr())
+ # Trigger response
+ cur_mock_time += 5 * 60
+ self.nodes[0].setmocktime(cur_mock_time)
+ last_addr_receiver.wait_until(last_addr_receiver.addr_received)
+ # new response is different
+ assert(set(responses[0]) != set(last_addr_receiver.get_received_addrs()))
+
+
+if __name__ == '__main__':
+ AddrTest().main()
diff --git a/test/functional/p2p_getdata.py b/test/functional/p2p_getdata.py
index d1b11c2c61..51921a8ab5 100755
--- a/test/functional/p2p_getdata.py
+++ b/test/functional/p2p_getdata.py
@@ -9,7 +9,7 @@ from test_framework.messages import (
CInv,
msg_getdata,
)
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py
index e280a62997..b2c3c5d45f 100755
--- a/test/functional/p2p_invalid_block.py
+++ b/test/functional/p2p_invalid_block.py
@@ -14,7 +14,7 @@ import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py
index 0155eb21f0..24328c2919 100755
--- a/test/functional/p2p_invalid_locator.py
+++ b/test/functional/p2p_invalid_locator.py
@@ -6,7 +6,7 @@
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index d9a9ae5188..fe57057a83 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -17,14 +17,13 @@ from test_framework.messages import (
MSG_TX,
ser_string,
)
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PDataStore,
P2PInterface,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
)
VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix
@@ -70,7 +69,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
before = int(self.nodes[0].getnettotals()['totalbytesrecv'])
conn.send_raw_message(msg[:cut_pos])
# Wait until node has processed the first half of the message
- wait_until(lambda: int(self.nodes[0].getnettotals()['totalbytesrecv']) != before)
+ self.wait_until(lambda: int(self.nodes[0].getnettotals()['totalbytesrecv']) != before)
middle = int(self.nodes[0].getnettotals()['totalbytesrecv'])
# If this assert fails, we've hit an unlikely race
# where the test framework sent a message in between the two halves
diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py
index c70a892463..a0ef6c9d6e 100755
--- a/test/functional/p2p_invalid_tx.py
+++ b/test/functional/p2p_invalid_tx.py
@@ -13,11 +13,10 @@ from test_framework.messages import (
CTxIn,
CTxOut,
)
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
)
from data import invalid_txs
@@ -146,7 +145,7 @@ class InvalidTxRequestTest(BitcoinTestFramework):
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
- wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
+ self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
self.log.info('Test orphan pool overflow')
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index fe6e236fc4..4978aa3845 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -15,21 +15,19 @@ import time
from test_framework.messages import (
msg_getaddr,
msg_ping,
- msg_verack,
msg_version,
)
-from test_framework.mininode import mininode_lock, P2PInterface
+from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
- wait_until,
)
DISCOURAGEMENT_THRESHOLD = 100
-class CLazyNode(P2PInterface):
+class LazyPeer(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
@@ -42,6 +40,7 @@ class CLazyNode(P2PInterface):
def on_open(self):
self.ever_connected = True
+ # Does not respond to "version" with "verack"
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
@@ -63,25 +62,9 @@ class CLazyNode(P2PInterface):
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
-# Node that never sends a version. We'll use this to send a bunch of messages
-# anyway, and eventually get disconnected.
-class CNodeNoVersionMisbehavior(CLazyNode):
- # Send enough veracks without a message to reach the peer discouragement
- # threshold. This should get us disconnected. NOTE: implementation-specific
- # test; update if our discouragement policy for peer misbehavior changes.
- def on_open(self):
- super().on_open()
- for _ in range(DISCOURAGEMENT_THRESHOLD):
- self.send_message(msg_verack())
-
-# Node that never sends a version. This one just sits idle and hopes to receive
-# any message (it shouldn't!)
-class CNodeNoVersionIdle(CLazyNode):
- def __init__(self):
- super().__init__()
-# Node that sends a version but not a verack.
-class CNodeNoVerackIdle(CLazyNode):
+# Peer that sends a version but not a verack.
+class NoVerackIdlePeer(LazyPeer):
def __init__(self):
self.version_received = False
super().__init__()
@@ -100,6 +83,7 @@ class P2PVersionStore(P2PInterface):
version_received = None
def on_version(self, msg):
+ # Responds with an appropriate verack
super().on_version(msg)
self.version_received = msg
@@ -109,34 +93,45 @@ class P2PLeakTest(BitcoinTestFramework):
self.num_nodes = 1
def run_test(self):
- no_version_disconnect_node = self.nodes[0].add_p2p_connection(
- CNodeNoVersionMisbehavior(), send_version=False, wait_for_verack=False)
- no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
- no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False)
+ # Peer that never sends a version. We will send a bunch of messages
+ # from this peer anyway and verify eventual disconnection.
+ no_version_disconnect_peer = self.nodes[0].add_p2p_connection(
+ LazyPeer(), send_version=False, wait_for_verack=False)
+
+ # Another peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
+ no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)
+
+ # Peer that sends a version but not a verack.
+ no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)
+
+ # Send enough ping messages (any non-version message will do) prior to sending
+ # version to reach the peer discouragement threshold. This should get us disconnected.
+ for _ in range(DISCOURAGEMENT_THRESHOLD):
+ no_version_disconnect_peer.send_message(msg_ping())
- # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
+ # Wait until we got the verack in response to the version. Though, don't wait for the node to receive the
# verack, since we never sent one
- no_verack_idlenode.wait_for_verack()
+ no_verack_idle_peer.wait_for_verack()
- wait_until(lambda: no_version_disconnect_node.ever_connected, timeout=10, lock=mininode_lock)
- wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
- wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
+ self.wait_until(lambda: no_version_disconnect_peer.ever_connected, timeout=10, lock=p2p_lock)
+ self.wait_until(lambda: no_version_idle_peer.ever_connected, timeout=10, lock=p2p_lock)
+ self.wait_until(lambda: no_verack_idle_peer.version_received, timeout=10, lock=p2p_lock)
- # Mine a block and make sure that it's not sent to the connected nodes
- self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
+ # Mine a block and make sure that it's not sent to the connected peers
+ self.nodes[0].generate(nblocks=1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
- # Expect this node to be disconnected for misbehavior
- assert not no_version_disconnect_node.is_connected
+ # Expect this peer to be disconnected for misbehavior
+ assert not no_version_disconnect_peer.is_connected
self.nodes[0].disconnect_p2ps()
# Make sure no unexpected messages came in
- assert no_version_disconnect_node.unexpected_msg == False
- assert no_version_idlenode.unexpected_msg == False
- assert no_verack_idlenode.unexpected_msg == False
+ assert no_version_disconnect_peer.unexpected_msg == False
+ assert no_version_idle_peer.unexpected_msg == False
+ assert no_verack_idle_peer.unexpected_msg == False
self.log.info('Check that the version message does not leak the local address of the node')
p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
@@ -149,14 +144,13 @@ class P2PLeakTest(BitcoinTestFramework):
assert_equal(ver.nStartingHeight, 201)
assert_equal(ver.nRelay, 1)
- self.log.info('Check that old nodes are disconnected')
- p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
+ self.log.info('Check that old peers are disconnected')
+ p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
old_version_msg = msg_version()
old_version_msg.nVersion = 31799
- wait_until(lambda: p2p_old_node.is_connected)
with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
- p2p_old_node.send_message(old_version_msg)
- p2p_old_node.wait_for_disconnect()
+ p2p_old_peer.send_message(old_version_msg)
+ p2p_old_peer.wait_for_disconnect()
if __name__ == '__main__':
diff --git a/test/functional/p2p_leak_tx.py b/test/functional/p2p_leak_tx.py
index da30ad5977..9e761db03f 100755
--- a/test/functional/p2p_leak_tx.py
+++ b/test/functional/p2p_leak_tx.py
@@ -5,7 +5,7 @@
"""Test that we don't leak txs to inbound peers that we haven't yet announced to"""
from test_framework.messages import msg_getdata, CInv, MSG_TX
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
diff --git a/test/functional/p2p_nobloomfilter_messages.py b/test/functional/p2p_nobloomfilter_messages.py
index accc5dc23c..c2311cb197 100755
--- a/test/functional/p2p_nobloomfilter_messages.py
+++ b/test/functional/p2p_nobloomfilter_messages.py
@@ -12,7 +12,7 @@ Test that, when bloom filters are not enabled, peers are disconnected if:
"""
from test_framework.messages import msg_mempool, msg_filteradd, msg_filterload, msg_filterclear
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index a2f6ea538c..2c9cbea5e4 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -9,13 +9,12 @@ and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
-from test_framework.mininode import P2PInterface, mininode_lock
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
disconnect_nodes,
connect_nodes,
- wait_until,
)
@@ -28,7 +27,7 @@ class P2PIgnoreInv(P2PInterface):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
+ self.wait_until(test_function, timeout=timeout)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 32a795e345..3ec36edb41 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -13,7 +13,7 @@ from test_framework.messages import (
CTxInWitness,
FromHex,
)
-from test_framework.mininode import P2PDataStore
+from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_TRUE,
@@ -24,7 +24,6 @@ from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
- wait_until,
)
@@ -96,7 +95,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
self.checkpermission(
# all permission added
["-whitelist=all@127.0.0.1"],
- ["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download"],
+ ["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"],
False)
self.stop_node(1)
@@ -109,7 +108,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
self.sync_all()
self.log.debug("Create a connection from a forcerelay peer that rebroadcasts raw txs")
- # A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
+ # A test framework p2p connection is needed to send the raw transaction directly. If a full node was used, it could only
# rebroadcast via the inv-getdata mechanism. However, even for forcerelay connections, a full node would
# currently not request a txid that is already in the mempool.
self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
@@ -137,7 +136,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
connect_nodes(self.nodes[1], 0)
with self.nodes[1].assert_debug_log(["Force relaying tx {} from peer=0".format(txid)]):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
- wait_until(lambda: txid in self.nodes[0].getrawmempool())
+ self.wait_until(lambda: txid in self.nodes[0].getrawmempool())
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
tx.vout[0].nValue += 1
diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py
index e00af88cc4..888e986fba 100755
--- a/test/functional/p2p_ping.py
+++ b/test/functional/p2p_ping.py
@@ -7,13 +7,8 @@
import time
-from test_framework.messages import (
- msg_pong,
-)
-from test_framework.mininode import (
- P2PInterface,
- wait_until,
-)
+from test_framework.messages import msg_pong
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -78,7 +73,7 @@ class PingPongTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
self.mock_forward(9)
# Send the wrong pong
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))
@@ -93,27 +88,27 @@ class PingPongTest(BitcoinTestFramework):
assert 'ping' not in no_pong_node.last_message
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 29
self.mock_forward(ping_delay)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that minping is decreased after a fast roundtrip')
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 9
self.mock_forward(ping_delay)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that peer is disconnected after ping timeout')
assert 'ping' not in no_pong_node.last_message
self.nodes[0].ping()
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):
self.mock_forward(20 * 60 + 1)
time.sleep(4) # peertimeout + 1
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 9915b844d1..2155c1d0e7 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -42,9 +42,9 @@ from test_framework.messages import (
uint256_from_str,
FromHex,
)
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PInterface,
- mininode_lock,
+ p2p_lock,
)
from test_framework.script import (
CScript,
@@ -83,7 +83,6 @@ from test_framework.util import (
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
- wait_until,
)
# The versionbit bit used to signal activation of SegWit
@@ -153,8 +152,8 @@ class TestP2PConn(P2PInterface):
self.lastgetdata = []
self.wtxidrelay = wtxidrelay
- # Avoid sending out msg_getdata in the mininode thread as a reply to invs.
- # They are not needed and would only lead to races because we send msg_getdata out in the test thread
+ # Don't send getdata message replies to invs automatically.
+ # We'll send the getdata messages explicitly in the test logic.
def on_inv(self, message):
pass
@@ -174,7 +173,10 @@ class TestP2PConn(P2PInterface):
self.last_wtxidrelay.append(message)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True, use_wtxid=False):
- with mininode_lock:
+ if success:
+ # sanity check
+ assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid)
+ with p2p_lock:
self.last_message.pop("getdata", None)
if use_wtxid:
wtxid = tx.calc_sha256(True)
@@ -192,7 +194,7 @@ class TestP2PConn(P2PInterface):
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
- with mininode_lock:
+ with p2p_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
@@ -206,7 +208,7 @@ class TestP2PConn(P2PInterface):
self.wait_for_getdata([block.sha256])
def request_block(self, blockhash, inv_type, timeout=60):
- with mininode_lock:
+ with p2p_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
@@ -259,6 +261,8 @@ class SegWitTest(BitcoinTestFramework):
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
+ # self.std_wtx_node is for testing node1 with wtxid relay
+ self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
@@ -942,7 +946,7 @@ class SegWitTest(BitcoinTestFramework):
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
- for i in range(NUM_OUTPUTS):
+ for _ in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
@@ -952,7 +956,7 @@ class SegWitTest(BitcoinTestFramework):
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
- for i in range(NUM_OUTPUTS):
+ for _ in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
@@ -1199,7 +1203,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
- for i in range(10):
+ for _ in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
@@ -1319,9 +1323,14 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
- # Node will not be blinded to the transaction
+ # Node will not be blinded to the transaction, requesting it any number of times
+ # if it is being announced via txid relay.
+ # Node will be blinded to the transaction via wtxid, however.
self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False)
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
@@ -1372,7 +1381,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
- for i in range(NUM_SEGWIT_VERSIONS):
+ for _ in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
@@ -1418,7 +1427,7 @@ class SegWitTest(BitcoinTestFramework):
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
- # Spend everything in temp_utxo back to an OP_TRUE output.
+ # Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
@@ -1426,8 +1435,16 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
- tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
+ tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))
tx3.rehash()
+
+ # First we test this transaction against fRequireStandard=true node
+ # making sure the txid is added to the reject filter
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
+ # Now the node will no longer ask for getdata of this transaction when advertised by same txid
+ self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False)
+
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
@@ -1646,7 +1663,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
- for i in range(NUM_SIGHASH_TESTS):
+ for _ in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
@@ -1676,7 +1693,7 @@ class SegWitTest(BitcoinTestFramework):
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
- for i in range(num_outputs):
+ for _ in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
@@ -1974,7 +1991,7 @@ class SegWitTest(BitcoinTestFramework):
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
- for i in range(outputs):
+ for _ in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
@@ -2060,7 +2077,7 @@ class SegWitTest(BitcoinTestFramework):
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
- for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
+ for _ in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
@@ -2096,7 +2113,7 @@ class SegWitTest(BitcoinTestFramework):
# Check wtxidrelay feature negotiation message through connecting a new peer
def received_wtxidrelay():
return (len(self.wtx_node.last_wtxidrelay) > 0)
- wait_until(received_wtxidrelay, timeout=60, lock=mininode_lock)
+ self.wtx_node.wait_until(received_wtxidrelay)
# Create a Segwit output from the latest UTXO
# and announce it to the network
@@ -2120,30 +2137,30 @@ class SegWitTest(BitcoinTestFramework):
# Announce Segwit transaction with wtxid
# and wait for getdata
self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True)
- with mininode_lock:
+ with p2p_lock:
lgd = self.wtx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))])
# Announce Segwit transaction from non wtxidrelay peer
# and wait for getdata
self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False)
- with mininode_lock:
+ with p2p_lock:
lgd = self.tx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)])
# Send tx2 through; it's an orphan so won't be accepted
- with mininode_lock:
- self.tx_node.last_message.pop("getdata", None)
- test_transaction_acceptance(self.nodes[0], self.tx_node, tx2, with_witness=True, accepted=False)
+ with p2p_lock:
+ self.wtx_node.last_message.pop("getdata", None)
+ test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False)
- # Expect a request for parent (tx) due to use of non-WTX peer
- self.tx_node.wait_for_getdata([tx.sha256], 60)
- with mininode_lock:
- lgd = self.tx_node.lastgetdata[:]
+ # Expect a request for parent (tx) by txid despite use of WTX peer
+ self.wtx_node.wait_for_getdata([tx.sha256], 60)
+ with p2p_lock:
+ lgd = self.wtx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx.sha256)])
# Send tx through
- test_transaction_acceptance(self.nodes[0], self.tx_node, tx, with_witness=False, accepted=True)
+ test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True)
# Check tx2 is there now
assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True)
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index 481b1c1841..04e6ec4172 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -87,11 +87,11 @@ e. Announce one more that doesn't connect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
-from test_framework.mininode import (
+from test_framework.p2p import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
- mininode_lock,
+ p2p_lock,
MSG_BLOCK,
msg_block,
msg_getblocks,
@@ -104,7 +104,6 @@ from test_framework.mininode import (
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
@@ -147,7 +146,7 @@ class BaseNode(P2PInterface):
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
+ self.wait_until(test_function, timeout=timeout)
def on_inv(self, message):
self.block_announced = True
@@ -163,7 +162,7 @@ class BaseNode(P2PInterface):
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
- with mininode_lock:
+ with p2p_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
@@ -174,8 +173,8 @@ class BaseNode(P2PInterface):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
- wait_until(test_function, timeout=60, lock=mininode_lock)
- with mininode_lock:
+ self.wait_until(test_function)
+ with p2p_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
@@ -186,9 +185,9 @@ class BaseNode(P2PInterface):
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
- wait_until(test_function, timeout=60, lock=mininode_lock)
+ self.wait_until(test_function)
- with mininode_lock:
+ with p2p_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
@@ -298,7 +297,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_and_ping(msg_block(new_block)) # make sure this block is processed
- wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
+ inv_node.wait_until(lambda: inv_node.block_announced)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
@@ -328,7 +327,7 @@ class SendHeadersTest(BitcoinTestFramework):
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
- for b in range(i + 1):
+ for _ in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -443,7 +442,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
- for b in range(2):
+ for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -456,12 +455,12 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
- with mininode_lock:
+ with p2p_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
- for b in range(3):
+ for _ in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -482,7 +481,7 @@ class SendHeadersTest(BitcoinTestFramework):
blocks = []
# Create extra blocks for later
- for b in range(20):
+ for _ in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -494,7 +493,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
- with mininode_lock:
+ with p2p_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
@@ -513,7 +512,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
- with mininode_lock:
+ with p2p_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
@@ -529,14 +528,14 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
- for j in range(2):
+ for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
@@ -550,7 +549,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
- for j in range(MAX_UNCONNECTING_HEADERS + 1):
+ for _ in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -559,7 +558,7 @@ class SendHeadersTest(BitcoinTestFramework):
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
@@ -574,7 +573,7 @@ class SendHeadersTest(BitcoinTestFramework):
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
- with mininode_lock:
+ with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py
index 5a4fa42988..ce12ce26ce 100755
--- a/test/functional/p2p_timeouts.py
+++ b/test/functional/p2p_timeouts.py
@@ -24,7 +24,7 @@
from time import sleep
from test_framework.messages import msg_ping
-from test_framework.mininode import P2PInterface
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 2527edc135..653c7ae43f 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -16,14 +16,13 @@ from test_framework.messages import (
msg_inv,
msg_notfound,
)
-from test_framework.mininode import (
+from test_framework.p2p import (
P2PInterface,
- mininode_lock,
+ p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
- wait_until,
)
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
@@ -73,14 +72,14 @@ class TxDownloadTest(BitcoinTestFramework):
def getdata_found(peer_index):
p = self.nodes[0].p2ps[peer_index]
- with mininode_lock:
+ with p2p_lock:
return p.last_message.get("getdata") and p.last_message["getdata"].inv[-1].hash == txid
node_0_mocktime = int(time.time())
while outstanding_peer_index:
node_0_mocktime += MAX_GETDATA_INBOUND_WAIT
self.nodes[0].setmocktime(node_0_mocktime)
- wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index))
+ self.wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index))
for i in outstanding_peer_index:
if getdata_found(i):
outstanding_peer_index.remove(i)
@@ -134,24 +133,24 @@ class TxDownloadTest(BitcoinTestFramework):
p = self.nodes[0].p2ps[0]
- with mininode_lock:
+ with p2p_lock:
p.tx_getdata_count = 0
p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids]))
- wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT, lock=mininode_lock)
- with mininode_lock:
+ p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT)
+ with p2p_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])]))
- wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10, lock=mininode_lock)
- with mininode_lock:
+ p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10)
+ with p2p_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL
self.log.info("if we wait about {} minutes, we should eventually get more requests".format(WAIT_TIME / 60))
self.nodes[0].setmocktime(int(time.time() + WAIT_TIME))
- wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2)
+ p.wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2)
self.nodes[0].setmocktime(0)
def test_spurious_notfound(self):
@@ -162,7 +161,7 @@ class TxDownloadTest(BitcoinTestFramework):
# Setup the p2p connections
self.peers = []
for node in self.nodes:
- for i in range(NUM_INBOUND):
+ for _ in range(NUM_INBOUND):
self.peers.append(node.add_p2p_connection(TestP2PConn()))
self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
index 71b0b0f63a..36b434bce3 100755
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -55,7 +55,7 @@ import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
-from test_framework.mininode import mininode_lock, P2PInterface
+from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -199,13 +199,13 @@ class AcceptBlockTest(BitcoinTestFramework):
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
- with mininode_lock:
+ with p2p_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
test_node.sync_with_ping()
- with mininode_lock:
+ with p2p_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 7c70f30ca3..c005584485 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -22,6 +22,17 @@ from decimal import Decimal
import http.client
import subprocess
+from test_framework.blocktools import (
+ create_block,
+ create_coinbase,
+ TIME_GENESIS_BLOCK,
+)
+from test_framework.messages import (
+ CBlockHeader,
+ FromHex,
+ msg_block,
+)
+from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -32,17 +43,6 @@ from test_framework.util import (
assert_is_hex_string,
assert_is_hash_string,
)
-from test_framework.blocktools import (
- create_block,
- create_coinbase,
- TIME_GENESIS_BLOCK,
-)
-from test_framework.messages import (
- msg_block,
-)
-from test_framework.mininode import (
- P2PInterface,
-)
class BlockchainTest(BitcoinTestFramework):
@@ -280,6 +280,14 @@ class BlockchainTest(BitcoinTestFramework):
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
+ # Test with verbose=False, which should return the header as hex.
+ header_hex = node.getblockheader(blockhash=besthash, verbose=False)
+ assert_is_hex_string(header_hex)
+
+ header = FromHex(CBlockHeader(), header_hex)
+ header.calc_sha256()
+ assert_equal(header.hash, besthash)
+
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py
index 9a21998d11..b71854d234 100755
--- a/test/functional/rpc_deprecated.py
+++ b/test/functional/rpc_deprecated.py
@@ -4,13 +4,13 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
-# from test_framework.util import assert_raises_rpc_error
+from test_framework.util import assert_raises_rpc_error, find_vout_for_address
class DeprecatedRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
- self.extra_args = [[], []]
+ self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
@@ -23,7 +23,38 @@ class DeprecatedRpcTest(BitcoinTestFramework):
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
- self.log.info("No tested deprecated RPC methods")
+
+ if self.is_wallet_compiled():
+ self.log.info("Test bumpfee RPC")
+ self.nodes[0].generate(101)
+ self.nodes[0].createwallet(wallet_name='nopriv', disable_private_keys=True)
+ noprivs0 = self.nodes[0].get_wallet_rpc('nopriv')
+ w0 = self.nodes[0].get_wallet_rpc('')
+ self.nodes[1].createwallet(wallet_name='nopriv', disable_private_keys=True)
+ noprivs1 = self.nodes[1].get_wallet_rpc('nopriv')
+
+ address = w0.getnewaddress()
+ desc = w0.getaddressinfo(address)['desc']
+ change_addr = w0.getrawchangeaddress()
+ change_desc = w0.getaddressinfo(change_addr)['desc']
+ txid = w0.sendtoaddress(address=address, amount=10)
+ vout = find_vout_for_address(w0, txid, address)
+ self.nodes[0].generate(1)
+ rawtx = w0.createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 5}, 0, True)
+ rawtx = w0.fundrawtransaction(rawtx, {'changeAddress': change_addr})
+ signed_tx = w0.signrawtransactionwithwallet(rawtx['hex'])['hex']
+
+ noprivs0.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
+ noprivs1.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
+
+ txid = w0.sendrawtransaction(signed_tx)
+ self.sync_all()
+
+ assert_raises_rpc_error(-32, 'Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart bitcoind with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22', noprivs0.bumpfee, txid)
+ bumped_psbt = noprivs1.bumpfee(txid)
+ assert 'psbt' in bumped_psbt
+ else:
+ self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 57c8f511ac..2a0971b808 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -554,7 +554,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].generate(1)
self.sync_all()
- for i in range(0,20):
+ for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
@@ -582,7 +582,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].generate(1)
self.sync_all()
- for i in range(0,20):
+ for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
diff --git a/test/functional/rpc_generate.py b/test/functional/rpc_generate.py
new file mode 100755
index 0000000000..9404f1e25e
--- /dev/null
+++ b/test/functional/rpc_generate.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test generate RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
+
+
+class RPCGenerateTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def run_test(self):
+ message = (
+ "generate ( nblocks maxtries ) has been replaced by the -generate "
+ "cli option. Refer to -help for more information."
+ )
+
+ self.log.info("Test rpc generate raises with message to use cli option")
+ assert_raises_rpc_error(-32601, message, self.nodes[0].rpc.generate)
+
+ self.log.info("Test rpc generate help prints message to use cli option")
+ assert_equal(message, self.nodes[0].help("generate"))
+
+ self.log.info("Test rpc generate is a hidden command not discoverable in general help")
+ assert message not in self.nodes[0].help()
+
+
+if __name__ == "__main__":
+ RPCGenerateTest().main()
diff --git a/test/functional/rpc_generateblock.py b/test/functional/rpc_generateblock.py
index aa58c0af9d..08ff0fba50 100755
--- a/test/functional/rpc_generateblock.py
+++ b/test/functional/rpc_generateblock.py
@@ -55,7 +55,7 @@ class GenerateBlockTest(BitcoinTestFramework):
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get mined
- for i in range(10):
+ for _ in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py
index 1fdc134f97..e788e75557 100755
--- a/test/functional/rpc_invalidateblock.py
+++ b/test/functional/rpc_invalidateblock.py
@@ -9,7 +9,6 @@ from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR
from test_framework.util import (
assert_equal,
connect_nodes,
- wait_until,
)
@@ -57,9 +56,9 @@ class InvalidateTest(BitcoinTestFramework):
self.log.info("..and then mine a block")
self.nodes[2].generatetoaddress(1, self.nodes[2].get_deterministic_priv_key().address)
self.log.info("Verify all nodes are at the right height")
- wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
- wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
- wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
+ self.wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
+ self.wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
+ self.wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
self.log.info("Verify that we reconsider all ancestors as well")
blocks = self.nodes[1].generatetodescriptor(10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR)
diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py
index c8517d719e..0493ceeb64 100755
--- a/test/functional/rpc_misc.py
+++ b/test/functional/rpc_misc.py
@@ -27,8 +27,8 @@ class RpcMiscTest(BitcoinTestFramework):
self.log.info("test CHECK_NONFATAL")
assert_raises_rpc_error(
-1,
- "Internal bug detected: 'request.params.size() != 100'",
- lambda: node.echo(*[0] * 100),
+ 'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'',
+ lambda: node.echo(arg9='trigger_internal_bug'),
)
self.log.info("test getmemoryinfo")
@@ -61,6 +61,34 @@ class RpcMiscTest(BitcoinTestFramework):
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
+ self.log.info("test getindexinfo")
+ # Without any indices running the RPC returns an empty object
+ assert_equal(node.getindexinfo(), {})
+
+ # Restart the node with indices and wait for them to sync
+ self.restart_node(0, ["-txindex", "-blockfilterindex"])
+ self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
+
+ # Returns a list of all running indices by default
+ assert_equal(
+ node.getindexinfo(),
+ {
+ "txindex": {"synced": True, "best_block_height": 200},
+ "basic block filter index": {"synced": True, "best_block_height": 200}
+ }
+ )
+
+ # Specifying an index by name returns only the status of that index
+ assert_equal(
+ node.getindexinfo("txindex"),
+ {
+ "txindex": {"synced": True, "best_block_height": 200},
+ }
+ )
+
+ # Specifying an unknown index name returns an empty result
+ assert_equal(node.getindexinfo("foo"), {})
+
if __name__ == '__main__':
RpcMiscTest().main()
diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py
index 3336246c8b..506c77c567 100755
--- a/test/functional/rpc_net.py
+++ b/test/functional/rpc_net.py
@@ -8,24 +8,24 @@ Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
+from itertools import product
+import time
+from test_framework.p2p import P2PInterface
+import test_framework.messages
+from test_framework.messages import (
+ NODE_NETWORK,
+ NODE_WITNESS,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
+ assert_approx,
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes,
p2p_port,
- wait_until,
-)
-from test_framework.mininode import P2PInterface
-import test_framework.messages
-from test_framework.messages import (
- CAddress,
- msg_addr,
- NODE_NETWORK,
- NODE_WITNESS,
)
@@ -50,25 +50,27 @@ class NetTest(BitcoinTestFramework):
self.supports_cli = False
def run_test(self):
- self.log.info('Get out of IBD for the minfeefilter test')
- self.nodes[0].generate(1)
- self.log.info('Connect nodes both way')
+ # Get out of IBD for the minfeefilter and getpeerinfo tests.
+ self.nodes[0].generate(101)
+ # Connect nodes both ways.
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
- self._test_connection_count()
- self._test_getnettotals()
- self._test_getnetworkinfo()
- self._test_getaddednodeinfo()
- self._test_getpeerinfo()
+ self.test_connection_count()
+ self.test_getpeerinfo()
+ self.test_getnettotals()
+ self.test_getnetworkinfo()
+ self.test_getaddednodeinfo()
self.test_service_flags()
- self._test_getnodeaddresses()
+ self.test_getnodeaddresses()
- def _test_connection_count(self):
- # connect_nodes connects each node to the other
+ def test_connection_count(self):
+ self.log.info("Test getconnectioncount")
+ # After using `connect_nodes` to connect nodes 0 and 1 to each other.
assert_equal(self.nodes[0].getconnectioncount(), 2)
- def _test_getnettotals(self):
+ def test_getnettotals(self):
+ self.log.info("Test getnettotals")
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
@@ -90,15 +92,16 @@ class NetTest(BitcoinTestFramework):
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
- wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
- wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
+ self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
+ self.wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
- def _test_getnetworkinfo(self):
+ def test_getnetworkinfo(self):
+ self.log.info("Test getnetworkinfo")
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
@@ -106,11 +109,11 @@ class NetTest(BitcoinTestFramework):
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
- wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
+ self.wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.nodes[0].setnetworkactive(state=True)
- self.log.info('Connect nodes both way')
+ # Connect nodes both ways.
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
@@ -122,7 +125,8 @@ class NetTest(BitcoinTestFramework):
for info in network_info:
assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"])
- def _test_getaddednodeinfo(self):
+ def test_getaddednodeinfo(self):
+ self.log.info("Test getaddednodeinfo")
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
@@ -131,11 +135,30 @@ class NetTest(BitcoinTestFramework):
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
+ # check that node cannot be added again
+ assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add')
+ # check that node can be removed
+ self.nodes[0].addnode(node=ip_port, command='remove')
+ assert_equal(self.nodes[0].getaddednodeinfo(), [])
+ # check that trying to remove the node again returns an error
+ assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove')
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
- def _test_getpeerinfo(self):
+ def test_getpeerinfo(self):
+ self.log.info("Test getpeerinfo")
+ # Create a few getpeerinfo last_block/last_transaction values.
+ if self.is_wallet_compiled():
+ self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
+ self.nodes[1].generate(1)
+ self.sync_all()
+ time_now = int(time.time())
peer_info = [x.getpeerinfo() for x in self.nodes]
+ # Verify last_block and last_transaction keys/values.
+ for node, peer, field in product(range(self.num_nodes), range(2), ['last_block', 'last_transaction']):
+ assert field in peer_info[node][peer].keys()
+ if peer_info[node][peer][field] != 0:
+ assert_approx(peer_info[node][peer][field], time_now, vspan=60)
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
@@ -147,37 +170,43 @@ class NetTest(BitcoinTestFramework):
assert_net_servicesnames(int(info[0]["services"], 0x10), info[0]["servicesnames"])
def test_service_flags(self):
+ self.log.info("Test service flags")
self.nodes[0].add_p2p_connection(P2PInterface(), services=(1 << 4) | (1 << 63))
assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'], self.nodes[0].getpeerinfo()[-1]['servicesnames'])
self.nodes[0].disconnect_p2ps()
- def _test_getnodeaddresses(self):
+ def test_getnodeaddresses(self):
+ self.log.info("Test getnodeaddresses")
self.nodes[0].add_p2p_connection(P2PInterface())
- # send some addresses to the node via the p2p message addr
- msg = msg_addr()
+ # Add some addresses to the Address Manager over RPC. Due to the way
+ # bucket and bucket position are calculated, some of these addresses
+ # will collide.
imported_addrs = []
- for i in range(256):
- a = "123.123.123.{}".format(i)
+ for i in range(10000):
+ first_octet = i >> 8
+ second_octet = i % 256
+ a = "{}.{}.1.1".format(first_octet, second_octet)
imported_addrs.append(a)
- addr = CAddress()
- addr.time = 100000000
- addr.nServices = NODE_NETWORK | NODE_WITNESS
- addr.ip = a
- addr.port = 8333
- msg.addrs.append(addr)
- self.nodes[0].p2p.send_and_ping(msg)
-
- # obtain addresses via rpc call and check they were ones sent in before
- REQUEST_COUNT = 10
- node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
- assert_equal(len(node_addresses), REQUEST_COUNT)
+ self.nodes[0].addpeeraddress(a, 8333)
+
+ # Obtain addresses via rpc call and check they were ones sent in before.
+ #
+ # Maximum possible addresses in addrman is 10000, although actual
+ # number will usually be less due to bucket and bucket position
+ # collisions.
+ node_addresses = self.nodes[0].getnodeaddresses(0)
+ assert_greater_than(len(node_addresses), 5000)
+ assert_greater_than(10000, len(node_addresses))
for a in node_addresses:
- assert_greater_than(a["time"], 1527811200) # 1st June 2018
+ assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8333)
+ node_addresses = self.nodes[0].getnodeaddresses(1)
+ assert_equal(len(node_addresses), 1)
+
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index e5e62fd646..f7f23bc8f4 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -155,12 +155,14 @@ class PSBTTest(BitcoinTestFramework):
p2pkh_pos = out['n']
# spend single key from node 1
- rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
- walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
+ created_psbt = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})
+ walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
+ # Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
+ assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
@@ -428,7 +430,7 @@ class PSBTTest(BitcoinTestFramework):
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
- for i in range(0, 10):
+ for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py
index 3d08202724..704b65c060 100755
--- a/test/functional/rpc_signrawtransaction.py
+++ b/test/functional/rpc_signrawtransaction.py
@@ -198,10 +198,30 @@ class SignRawTransactionsTest(BitcoinTestFramework):
assert_equal(spending_tx_signed['complete'], True)
self.nodes[0].sendrawtransaction(spending_tx_signed['hex'])
+ def OP_1NEGATE_test(self):
+ self.log.info("Test OP_1NEGATE (0x4f) satisfies BIP62 minimal push standardness rule")
+ hex_str = (
+ "0200000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
+ "FFFFFFFF00000000044F024F9CFDFFFFFF01F0B9F5050000000023210277777777"
+ "77777777777777777777777777777777777777777777777777777777AC66030000"
+ )
+ prev_txs = [
+ {
+ "txid": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "vout": 0,
+ "scriptPubKey": "A914AE44AB6E9AA0B71F1CD2B453B69340E9BFBAEF6087",
+ "redeemScript": "4F9C",
+ "amount": 1,
+ }
+ ]
+ txn = self.nodes[0].signrawtransactionwithwallet(hex_str, prev_txs)
+ assert txn["complete"]
+
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
self.witness_script_test()
+ self.OP_1NEGATE_test()
self.test_with_lock_outputs()
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 2462a9a6db..bd4a53876e 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -32,7 +32,7 @@ from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70016 # past wtxid relay
-MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
+MY_SUBVERSION = b"/python-p2p-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
@@ -53,6 +53,7 @@ NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
+NODE_COMPACT_FILTERS = (1 << 6)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
@@ -111,7 +112,7 @@ def deser_uint256(f):
def ser_uint256(u):
rs = b""
- for i in range(8):
+ for _ in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
@@ -134,7 +135,7 @@ def uint256_from_compact(c):
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = c()
t.deserialize(f)
r.append(t)
@@ -157,7 +158,7 @@ def ser_vector(l, ser_function_name=None):
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = deser_uint256(f)
r.append(t)
return r
@@ -173,7 +174,7 @@ def ser_uint256_vector(l):
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = deser_string(f)
r.append(t)
return r
@@ -467,7 +468,7 @@ class CTransaction:
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
- self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
+ self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
@@ -500,7 +501,7 @@ class CTransaction:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
- for i in range(len(self.wit.vtxinwit), len(self.vin)):
+ for _ in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
@@ -735,7 +736,7 @@ class P2PHeaderAndShortIDs:
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
- for i in range(self.shortids_length):
+ for _ in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
@@ -852,7 +853,7 @@ class BlockTransactionsRequest:
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
- for i in range(indexes_length):
+ for _ in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/p2p.py
index f68c1a9ddd..57c77e60b5 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/p2p.py
@@ -4,10 +4,14 @@
# Copyright (c) 2010-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Bitcoin P2P network half-a-node.
+"""Test objects for interacting with a bitcoind node over the p2p protocol.
-This python code was modified from ArtForz' public domain half-a-node, as
-found in the mini-node branch of http://github.com/jgarzik/pynode.
+The P2PInterface objects interact with the bitcoind nodes under test using the
+node's p2p interface. They can be used to send messages to the node, and
+callbacks can be registered that execute when messages are received from the
+node. Messages are sent to/received from the node on an asyncio event loop.
+State held inside the objects must be guarded by the p2p_lock to avoid data
+races between the main testing thread and the event loop.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
@@ -67,7 +71,7 @@ from test_framework.messages import (
)
from test_framework.util import wait_until
-logger = logging.getLogger("TestFramework.mininode")
+logger = logging.getLogger("TestFramework.p2p")
MESSAGEMAP = {
b"addr": msg_addr,
@@ -283,9 +287,13 @@ class P2PInterface(P2PConnection):
def __init__(self):
super().__init__()
- # Track number of messages of each type received and the most recent
- # message of each type
+ # Track number of messages of each type received.
+ # Should be read-only in a test.
self.message_count = defaultdict(int)
+
+ # Track the most recent message of each type.
+ # To wait for a message to be received, pop that message from
+ # this and use wait_until.
self.last_message = {}
# A count of the number of ping messages we've sent to the node
@@ -316,7 +324,7 @@ class P2PInterface(P2PConnection):
We keep a count of how many of each message type has been received
and the most recent message of each type."""
- with mininode_lock:
+ with p2p_lock:
try:
msgtype = message.msgtype.decode('ascii')
self.message_count[msgtype] += 1
@@ -384,18 +392,22 @@ class P2PInterface(P2PConnection):
# Connection helper methods
- def wait_until(self, test_function, timeout=60):
- wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor)
+ def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
+ def test_function():
+ if check_connected:
+ assert self.is_connected
+ return test_function_in()
+
+ wait_until(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
- self.wait_until(test_function, timeout=timeout)
+ self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
- assert self.is_connected
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
@@ -404,14 +416,12 @@ class P2PInterface(P2PConnection):
def wait_for_block(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
last_headers = self.last_message.get('headers')
if not last_headers:
return False
@@ -421,7 +431,6 @@ class P2PInterface(P2PConnection):
def wait_for_merkleblock(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
return False
@@ -433,9 +442,7 @@ class P2PInterface(P2PConnection):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
-
def test_function():
- assert self.is_connected
last_data = self.last_message.get("getdata")
if not last_data:
return False
@@ -450,9 +457,7 @@ class P2PInterface(P2PConnection):
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
-
def test_function():
- assert self.is_connected
return self.last_message.get("getheaders")
self.wait_until(test_function, timeout=timeout)
@@ -463,7 +468,6 @@ class P2PInterface(P2PConnection):
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
def test_function():
- assert self.is_connected
return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
@@ -472,7 +476,7 @@ class P2PInterface(P2PConnection):
def wait_for_verack(self, timeout=60):
def test_function():
- return self.message_count["verack"]
+ return "verack" in self.last_message
self.wait_until(test_function, timeout=timeout)
@@ -487,7 +491,6 @@ class P2PInterface(P2PConnection):
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
- assert self.is_connected
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
self.wait_until(test_function, timeout=timeout)
@@ -499,7 +502,7 @@ class P2PInterface(P2PConnection):
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
-mininode_lock = threading.Lock()
+p2p_lock = threading.Lock()
class NetworkThread(threading.Thread):
@@ -593,7 +596,7 @@ class P2PDataStore(P2PInterface):
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
- with mininode_lock:
+ with p2p_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
@@ -605,7 +608,11 @@ class P2PDataStore(P2PInterface):
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
- self.wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout)
+ self.wait_until(
+ lambda: blocks[-1].sha256 in self.getdata_requests,
+ timeout=timeout,
+ check_connected=success,
+ )
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
@@ -626,7 +633,7 @@ class P2PDataStore(P2PInterface):
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
- with mininode_lock:
+ with p2p_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
@@ -665,7 +672,7 @@ class P2PTxInvStore(P2PInterface):
self.tx_invs_received[i.hash] += 1
def get_invs(self):
- with mininode_lock:
+ with p2p_lock:
return list(self.tx_invs_received.keys())
def wait_for_broadcast(self, txns, timeout=60):
@@ -673,6 +680,6 @@ class P2PTxInvStore(P2PInterface):
The mempool should mark unbroadcast=False for these transactions.
"""
# Wait until invs have been received (and getdatas sent) for each txid.
- self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout)
+ self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
# Flush messages and wait for the getdatas to be processed
self.sync_with_ping()
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index cc5f8307d3..5e35ba0fce 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -646,7 +646,7 @@ def LegacySignatureHash(script, txTo, inIdx, hashtype):
tmp = txtmp.vout[outIdx]
txtmp.vout = []
- for i in range(outIdx):
+ for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 8d402d4888..2a60f8e0c1 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -20,8 +20,8 @@ import time
from .authproxy import JSONRPCException
from . import coverage
+from .p2p import NetworkThread
from .test_node import TestNode
-from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 66bb2c89b5..5c7a883c43 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -524,6 +524,7 @@ class TestNode():
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
+ p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
@@ -550,7 +551,7 @@ class TestNode():
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
- def num_connected_mininodes(self):
+ def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION])
@@ -559,7 +560,7 @@ class TestNode():
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
- wait_until(lambda: self.num_connected_mininodes() == 0)
+ wait_until(lambda: self.num_test_p2p_connections() == 0)
class TestNodeCLIAttr:
@@ -637,7 +638,7 @@ class TestNodeCLI():
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
- except json.JSONDecodeError:
+ except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
@@ -649,10 +650,10 @@ class RPCOverloadWrapper():
def __getattr__(self, name):
return getattr(self.rpc, name)
- def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None):
+ def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
if descriptors is None:
descriptors = self.descriptors
- return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors)
+ return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 506057f1fa..cfc4ee65d4 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -226,6 +226,14 @@ def satoshi_round(amount):
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
+ """Sleep until the predicate resolves to be True.
+
+ Warning: Note that this method is not recommended to be used in tests as it is
+ not aware of the context of the test framework. Using `wait_until()` counterpart
+ from `BitcoinTestFramework` or `P2PInterface` class ensures an understandable
+ amount of timeout and a common shared timeout_factor. Furthermore, `wait_until()`
+ from `P2PInterface` class in `mininode.py` has a preset lock.
+ """
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
@@ -529,7 +537,7 @@ def create_confirmed_utxos(fee, node, count):
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
- for i in range(iterations):
+ for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
@@ -556,7 +564,7 @@ def gen_return_txouts():
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
- for i in range(512):
+ for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
@@ -564,7 +572,7 @@ def gen_return_txouts():
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
- for k in range(128):
+ for _ in range(128):
txouts.append(txout)
return txouts
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 95c2b7c5ec..01232bda3c 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -159,6 +159,7 @@ BASE_SCRIPTS = [
'rpc_deprecated.py',
'wallet_disable.py',
'p2p_addr_relay.py',
+ 'p2p_getaddr_caching.py',
'p2p_getdata.py',
'rpc_net.py',
'wallet_keypool.py',
@@ -193,6 +194,7 @@ BASE_SCRIPTS = [
'p2p_eviction.py',
'rpc_signmessage.py',
'rpc_generateblock.py',
+ 'rpc_generate.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
@@ -242,6 +244,7 @@ BASE_SCRIPTS = [
'p2p_node_network_limited.py',
'p2p_permissions.py',
'feature_blocksdir.py',
+ 'wallet_startup.py',
'feature_config_args.py',
'feature_settings.py',
'rpc_getdescriptorinfo.py',
@@ -712,14 +715,16 @@ class RPCCoverage():
Return a set of currently untested RPC commands.
"""
- # This is shared from `test/functional/test-framework/coverage.py`
+ # This is shared from `test/functional/test_framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
- covered_cmds = set()
+ # Consider RPC generate covered, because it is overloaded in
+ # test_framework/test_node.py and not seen by the coverage check.
+ covered_cmds = set({'generate'})
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index 9dd91b2495..4766355335 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -129,7 +129,7 @@ class WalletBackupTest(BitcoinTestFramework):
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
- for i in range(5):
+ for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
@@ -142,7 +142,7 @@ class WalletBackupTest(BitcoinTestFramework):
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
- for i in range(5):
+ for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index 81382d94ad..71a1a3f4f6 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -12,7 +12,6 @@ from test_framework.util import (
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes,
- wait_until,
)
from test_framework.wallet_util import test_address
@@ -540,7 +539,7 @@ class WalletTest(BitcoinTestFramework):
self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
- wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
+ self.wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
@@ -569,7 +568,7 @@ class WalletTest(BitcoinTestFramework):
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
- for i in range(chainlimit * 2):
+ for _ in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
@@ -589,7 +588,7 @@ class WalletTest(BitcoinTestFramework):
self.start_node(0, extra_args=extra_args)
# wait until the wallet has submitted all transactions to the mempool
- wait_until(lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2)
+ self.wait_until(lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 72c85b8832..56d1da60b7 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -50,6 +50,11 @@ class BumpFeeTest(BitcoinTestFramework):
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
+ def clear_mempool(self):
+ # Clear mempool between subtests. The subtests may only depend on chainstate (utxos)
+ self.nodes[1].generate(1)
+ self.sync_all()
+
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
@@ -62,7 +67,7 @@ class BumpFeeTest(BitcoinTestFramework):
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
- for i in range(25):
+ for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
@@ -71,7 +76,7 @@ class BumpFeeTest(BitcoinTestFramework):
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
- test_invalid_parameters(rbf_node, dest_address)
+ self.test_invalid_parameters(rbf_node, dest_address)
test_simple_bumpfee_succeeds(self, "default", rbf_node, peer_node, dest_address)
test_simple_bumpfee_succeeds(self, "fee_rate", rbf_node, peer_node, dest_address)
test_feerate_args(self, rbf_node, peer_node, dest_address)
@@ -93,28 +98,30 @@ class BumpFeeTest(BitcoinTestFramework):
test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
test_no_more_inputs_fails(self, rbf_node, dest_address)
-def test_invalid_parameters(node, dest_address):
- txid = spend_one_input(node, dest_address)
- # invalid estimate mode
- assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
- "estimate_mode": "moo",
- })
- assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
- "estimate_mode": 38,
- })
- assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
- "estimate_mode": {
- "foo": "bar",
- },
- })
- assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
- "estimate_mode": Decimal("3.141592"),
- })
- # confTarget and conf_target
- assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set", node.bumpfee, txid, {
- "confTarget": 123,
- "conf_target": 456,
- })
+ def test_invalid_parameters(self, node, dest_address):
+ txid = spend_one_input(node, dest_address)
+ # invalid estimate mode
+ assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
+ "estimate_mode": "moo",
+ })
+ assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
+ "estimate_mode": 38,
+ })
+ assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
+ "estimate_mode": {
+ "foo": "bar",
+ },
+ })
+ assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
+ "estimate_mode": Decimal("3.141592"),
+ })
+ # confTarget and conf_target
+ assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set", node.bumpfee, txid, {
+ "confTarget": 123,
+ "conf_target": 456,
+ })
+ self.clear_mempool()
+
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.log.info('Test simple bumpfee: {}'.format(mode))
@@ -123,13 +130,19 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
+ bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": NORMAL})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
+ bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
assert "psbt" not in bumped_tx
+ assert_equal(bumped_psbt["errors"], [])
+ assert bumped_psbt["fee"] > -rbftx["fee"]
+ assert_equal(bumped_psbt["origfee"], -rbftx["fee"])
+ assert "psbt" in bumped_psbt
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
@@ -142,6 +155,7 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
+ self.clear_mempool()
def test_feerate_args(self, rbf_node, peer_node, dest_address):
@@ -161,6 +175,7 @@ def test_feerate_args(self, rbf_node, peer_node, dest_address):
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate": -1})
assert_raises_rpc_error(-4, "is too high (cannot be higher than", rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
+ self.clear_mempool()
def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
@@ -192,12 +207,14 @@ def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
+ self.clear_mempool()
def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
+ self.clear_mempool()
def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
@@ -205,20 +222,22 @@ def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
- utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
+ fee = Decimal("0.001")
+ utxos = [node.listunspent(query_options={'minimumAmount': fee})[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
- output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
+ output_val = sum(utxo["amount"] for utxo in utxos) - fee
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
+ self.clear_mempool()
def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
@@ -229,6 +248,7 @@ def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_ad
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
+ self.clear_mempool()
def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
@@ -270,6 +290,7 @@ def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
+ self.clear_mempool()
def test_dust_to_fee(self, rbf_node, dest_address):
@@ -292,6 +313,7 @@ def test_dust_to_fee(self, rbf_node, dest_address):
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
+ self.clear_mempool()
def test_settxfee(self, rbf_node, dest_address):
@@ -314,6 +336,8 @@ def test_settxfee(self, rbf_node, dest_address):
assert_raises_rpc_error(-8, "txfee cannot be more than wallet max tx fee", rbf_node.settxfee, Decimal('0.00003'))
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
+ self.connect_nodes(1, 0)
+ self.clear_mempool()
def test_maxtxfee_fails(self, rbf_node, dest_address):
@@ -327,6 +351,8 @@ def test_maxtxfee_fails(self, rbf_node, dest_address):
assert_raises_rpc_error(-4, "Unable to create transaction. Fee exceeds maximum configured by -maxtxfee", rbf_node.bumpfee, rbfid)
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
+ self.connect_nodes(1, 0)
+ self.clear_mempool()
def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
@@ -391,7 +417,7 @@ def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# Bump fee, obnoxiously high to add additional watchonly input
- bumped_psbt = watcher.bumpfee(original_txid, {"fee_rate": HIGH})
+ bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
@@ -409,6 +435,7 @@ def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
+ self.clear_mempool()
def test_rebumping(self, rbf_node, dest_address):
@@ -417,6 +444,7 @@ def test_rebumping(self, rbf_node, dest_address):
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
+ self.clear_mempool()
def test_rebumping_not_replaceable(self, rbf_node, dest_address):
@@ -425,6 +453,7 @@ def test_rebumping_not_replaceable(self, rbf_node, dest_address):
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"fee_rate": NORMAL})
+ self.clear_mempool()
def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
@@ -464,6 +493,7 @@ def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
+ self.clear_mempool()
def test_bumpfee_metadata(self, rbf_node, dest_address):
@@ -475,6 +505,7 @@ def test_bumpfee_metadata(self, rbf_node, dest_address):
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
+ self.clear_mempool()
def test_locked_wallet_fails(self, rbf_node, dest_address):
@@ -484,6 +515,7 @@ def test_locked_wallet_fails(self, rbf_node, dest_address):
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
+ self.clear_mempool()
def test_change_script_match(self, rbf_node, dest_address):
@@ -504,6 +536,7 @@ def test_change_script_match(self, rbf_node, dest_address):
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
+ self.clear_mempool()
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
@@ -542,6 +575,7 @@ def test_no_more_inputs_fails(self, rbf_node, dest_address):
# spend all funds, no change output
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction. Insufficient funds", rbf_node.bumpfee, rbfid)
+ self.clear_mempool()
if __name__ == "__main__":
diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py
index 330de8b0fc..ed9159726a 100755
--- a/test/functional/wallet_create_tx.py
+++ b/test/functional/wallet_create_tx.py
@@ -45,7 +45,7 @@ class CreateTxWalletTest(BitcoinTestFramework):
def test_tx_size_too_large(self):
# More than 10kB of outputs, so that we hit -maxtxfee with a high feerate
- outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)}
+ outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)}
raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs)
for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']:
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index 289ccf43ec..9c63e8f7d3 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -107,7 +107,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
- for i in range(0, 500):
+ for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
@@ -120,7 +120,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
- for i in range(0, 100):
+ for _ in range(100):
send_wrpc.getnewaddress(address_type='bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py
index 6bfb468823..06f01ef191 100755
--- a/test/functional/wallet_dump.py
+++ b/test/functional/wallet_dump.py
@@ -116,7 +116,7 @@ class WalletDumpTest(BitcoinTestFramework):
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
- for i in range(0, test_addr_count):
+ for _ in range(test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index 9dd55b4ab1..e5c4f12f20 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -15,8 +15,14 @@ from test_framework.util import (
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 3
- self.extra_args = [[], [], ['-avoidpartialspends']]
+ self.num_nodes = 5
+ self.extra_args = [
+ [],
+ [],
+ ["-avoidpartialspends"],
+ ["-maxapsfee=0.00002719"],
+ ["-maxapsfee=0.00002720"],
+ ]
self.rpc_timeout = 480
def skip_test_if_missing_module(self):
@@ -27,8 +33,8 @@ class WalletGroupTest(BitcoinTestFramework):
self.nodes[0].generate(110)
# Get some addresses from the two nodes
- addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
- addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
+ addr1 = [self.nodes[1].getnewaddress() for _ in range(3)]
+ addr2 = [self.nodes[2].getnewaddress() for _ in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
@@ -50,8 +56,8 @@ class WalletGroupTest(BitcoinTestFramework):
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
- assert_approx(v[0], 0.2)
- assert_approx(v[1], 0.3, 0.0001)
+ assert_approx(v[0], vexp=0.2, vspan=0.0001)
+ assert_approx(v[1], vexp=0.3, vspan=0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
@@ -61,8 +67,80 @@ class WalletGroupTest(BitcoinTestFramework):
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
- assert_approx(v[0], 0.2)
- assert_approx(v[1], 1.3, 0.0001)
+ assert_approx(v[0], vexp=0.2, vspan=0.0001)
+ assert_approx(v[1], vexp=1.3, vspan=0.0001)
+
+ # Test 'avoid partial if warranted, even if disabled'
+ self.sync_all()
+ self.nodes[0].generate(1)
+ # Nodes 1-2 now have confirmed UTXOs (letters denote destinations):
+ # Node #1: Node #2:
+ # - A 1.0 - D0 1.0
+ # - B0 1.0 - D1 0.5
+ # - B1 0.5 - E0 1.0
+ # - C0 1.0 - E1 0.5
+ # - C1 0.5 - F ~1.3
+ # - D ~0.3
+ assert_approx(self.nodes[1].getbalance(), vexp=4.3, vspan=0.0001)
+ assert_approx(self.nodes[2].getbalance(), vexp=4.3, vspan=0.0001)
+ # Sending 1.4 btc should pick one 1.0 + one more. For node #1,
+ # this could be (A / B0 / C0) + (B1 / C1 / D). We ensure that it is
+ # B0 + B1 or C0 + C1, because this avoids partial spends while not being
+ # detrimental to transaction cost
+ txid3 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.4)
+ tx3 = self.nodes[1].getrawtransaction(txid3, True)
+ # tx3 should have 2 inputs and 2 outputs
+ assert_equal(2, len(tx3["vin"]))
+ assert_equal(2, len(tx3["vout"]))
+ # the accumulated value should be 1.5, so the outputs should be
+ # ~0.1 and 1.4 and should come from the same destination
+ values = [vout["value"] for vout in tx3["vout"]]
+ values.sort()
+ assert_approx(values[0], vexp=0.1, vspan=0.0001)
+ assert_approx(values[1], vexp=1.4, vspan=0.0001)
+
+ input_txids = [vin["txid"] for vin in tx3["vin"]]
+ input_addrs = [self.nodes[1].gettransaction(txid)['details'][0]['address'] for txid in input_txids]
+ assert_equal(input_addrs[0], input_addrs[1])
+ # Node 2 enforces avoidpartialspends so needs no checking here
+
+ # Test wallet option maxapsfee with Node 3
+ addr_aps = self.nodes[3].getnewaddress()
+ self.nodes[0].sendtoaddress(addr_aps, 1.0)
+ self.nodes[0].sendtoaddress(addr_aps, 1.0)
+ self.nodes[0].generate(1)
+ self.sync_all()
+ with self.nodes[3].assert_debug_log(['Fee non-grouped = 2820, grouped = 4160, using grouped']):
+ txid4 = self.nodes[3].sendtoaddress(self.nodes[0].getnewaddress(), 0.1)
+ tx4 = self.nodes[3].getrawtransaction(txid4, True)
+ # tx4 should have 2 inputs and 2 outputs although one output would
+ # have been enough and the transaction caused higher fees
+ assert_equal(2, len(tx4["vin"]))
+ assert_equal(2, len(tx4["vout"]))
+
+ addr_aps2 = self.nodes[3].getnewaddress()
+ [self.nodes[0].sendtoaddress(addr_aps2, 1.0) for _ in range(5)]
+ self.nodes[0].generate(1)
+ self.sync_all()
+ with self.nodes[3].assert_debug_log(['Fee non-grouped = 5520, grouped = 8240, using non-grouped']):
+ txid5 = self.nodes[3].sendtoaddress(self.nodes[0].getnewaddress(), 2.95)
+ tx5 = self.nodes[3].getrawtransaction(txid5, True)
+ # tx5 should have 3 inputs (1.0, 1.0, 1.0) and 2 outputs
+ assert_equal(3, len(tx5["vin"]))
+ assert_equal(2, len(tx5["vout"]))
+
+ # Test wallet option maxapsfee with node 4, which sets maxapsfee
+ # 1 sat higher, crossing the threshold from non-grouped to grouped.
+ addr_aps3 = self.nodes[4].getnewaddress()
+ [self.nodes[0].sendtoaddress(addr_aps3, 1.0) for _ in range(5)]
+ self.nodes[0].generate(1)
+ self.sync_all()
+ with self.nodes[4].assert_debug_log(['Fee non-grouped = 5520, grouped = 8240, using grouped']):
+ txid6 = self.nodes[4].sendtoaddress(self.nodes[0].getnewaddress(), 2.95)
+ tx6 = self.nodes[4].getrawtransaction(txid6, True)
+ # tx6 should have 5 inputs and 2 outputs
+ assert_equal(5, len(tx6["vin"]))
+ assert_equal(2, len(tx6["vout"]))
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
@@ -71,7 +149,7 @@ class WalletGroupTest(BitcoinTestFramework):
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
- for i in range(5):
+ for _ in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index fb4a1f9792..cff59bd1c1 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -118,7 +118,7 @@ class WalletLabelsTest(BitcoinTestFramework):
if not self.options.descriptors:
for label in labels:
addresses = []
- for x in range(10):
+ for _ in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
index 6d51ca6c93..d4131deabf 100755
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -36,6 +36,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
self.test_double_spend()
self.test_double_send()
self.double_spends_filtered()
+ self.test_targetconfirmations()
def test_no_blockhash(self):
self.log.info("Test no blockhash")
@@ -74,6 +75,27 @@ class ListSinceBlockTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock,
"Z000000000000000000000000000000000000000000000000000000000000000")
+ def test_targetconfirmations(self):
+ '''
+ This tests when the value of target_confirmations exceeds the number of
+ blocks in the main chain. In this case, the genesis block hash should be
+ given for the `lastblock` property. If target_confirmations is < 1, then
+ a -8 invalid parameter error is thrown.
+ '''
+ self.log.info("Test target_confirmations")
+ blockhash, = self.nodes[2].generate(1)
+ blockheight = self.nodes[2].getblockheader(blockhash)['height']
+ self.sync_all()
+
+ assert_equal(
+ self.nodes[0].getblockhash(0),
+ self.nodes[0].listsinceblock(blockhash, blockheight + 1)['lastblock'])
+ assert_equal(
+ self.nodes[0].getblockhash(0),
+ self.nodes[0].listsinceblock(blockhash, blockheight + 1000)['lastblock'])
+ assert_raises_rpc_error(-8, "Invalid parameter",
+ self.nodes[0].listsinceblock, blockhash, 0)
+
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index 88beef1034..1872545cdb 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -26,7 +26,7 @@ FEATURE_LATEST = 169900
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
- for i in range(10):
+ for _ in range(10):
if got_loading_error:
return
try:
@@ -120,7 +120,7 @@ class MultiWalletTest(BitcoinTestFramework):
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
- exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
+ exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
@@ -258,10 +258,10 @@ class MultiWalletTest(BitcoinTestFramework):
assert_raises_rpc_error(-4, "Wallet file verification failed. Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
- assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
+ assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
- assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
+ assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py
index 3417616d77..0327c9e070 100755
--- a/test/functional/wallet_resendwallettransactions.py
+++ b/test/functional/wallet_resendwallettransactions.py
@@ -7,9 +7,9 @@ import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import ToHex
-from test_framework.mininode import P2PTxInvStore, mininode_lock
+from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, wait_until
+from test_framework.util import assert_equal
class ResendWalletTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -24,7 +24,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
node.add_p2p_connection(P2PTxInvStore())
self.log.info("Create a new transaction and wait until it's broadcast")
- txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
+ txid = node.sendtoaddress(node.getnewaddress(), 1)
# Wallet rebroadcast is first scheduled 1 sec after startup (see
# nNextResend in ResendWalletTransactions()). Sleep for just over a
@@ -33,7 +33,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
time.sleep(1.1)
# Can take a few seconds due to transaction trickling
- wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock)
+ node.p2p.wait_for_broadcast([txid])
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
node.add_p2p_connection(P2PTxInvStore())
@@ -58,13 +58,13 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
two_min = 2 * 60
node.setmocktime(now + twelve_hrs - two_min)
time.sleep(2) # ensure enough time has passed for rebroadcast attempt to occur
- assert_equal(txid in node.p2ps[1].get_invs(), False)
+ assert_equal(int(txid, 16) in node.p2ps[1].get_invs(), False)
self.log.info("Bump time & check that transaction is rebroadcast")
# Transaction should be rebroadcast approximately 24 hours in the future,
# but can range from 12-36. So bump 36 hours to be sure.
node.setmocktime(now + 36 * 60 * 60)
- wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
+ node.p2p.wait_for_broadcast([txid])
if __name__ == '__main__':
diff --git a/test/functional/wallet_startup.py b/test/functional/wallet_startup.py
new file mode 100755
index 0000000000..cfc4edb8ee
--- /dev/null
+++ b/test/functional/wallet_startup.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test wallet load on startup.
+
+Verify that a bitcoind node can maintain list of wallets loading on startup
+"""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+
+class WalletStartupTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.supports_cli = True
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def setup_nodes(self):
+ self.add_nodes(self.num_nodes)
+ self.start_nodes()
+
+ def run_test(self):
+ self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
+ self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
+ self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
+ self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
+ self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
+ self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
+ self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
+ self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
+ assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
+ self.restart_node(0)
+ assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
+ self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
+ self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
+ self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
+ self.nodes[0].loadwallet(filename='')
+ self.restart_node(0)
+ assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
+
+if __name__ == '__main__':
+ WalletStartupTest().main()
diff --git a/test/functional/wallet_zapwallettxes.py b/test/functional/wallet_zapwallettxes.py
index 7f1cdbd20b..1287092cac 100755
--- a/test/functional/wallet_zapwallettxes.py
+++ b/test/functional/wallet_zapwallettxes.py
@@ -18,9 +18,9 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
- wait_until,
)
+
class ZapWalletTXesTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
@@ -59,7 +59,7 @@ class ZapWalletTXesTest (BitcoinTestFramework):
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.restart_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
- wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
+ self.wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index 56b18752ec..c7895edbcc 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -56,6 +56,14 @@ def main():
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
+ parser.add_argument(
+ '-g',
+ '--generate',
+ action='store_true',
+ help='Create new corpus seeds (or extend the existing ones) by running'
+ ' the given targets for a finite number of times. Outputs them to'
+ ' the passed seed_dir.'
+ )
args = parser.parse_args()
@@ -100,19 +108,20 @@ def main():
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
- test_list_seedless = []
- for t in test_list_selection:
- corpus_path = os.path.join(args.seed_dir, t)
- if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
- test_list_seedless.append(t)
- test_list_seedless.sort()
- if test_list_seedless:
- logging.info(
- "Fuzzing harnesses lacking a seed corpus: {}".format(
- " ".join(test_list_seedless)
+ if not args.generate:
+ test_list_seedless = []
+ for t in test_list_selection:
+ corpus_path = os.path.join(args.seed_dir, t)
+ if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
+ test_list_seedless.append(t)
+ test_list_seedless.sort()
+ if test_list_seedless:
+ logging.info(
+ "Fuzzing harnesses lacking a seed corpus: {}".format(
+ " ".join(test_list_seedless)
+ )
)
- )
- logging.info("Please consider adding a fuzz seed corpus at https://github.com/bitcoin-core/qa-assets")
+ logging.info("Please consider adding a fuzz seed corpus at https://github.com/bitcoin-core/qa-assets")
try:
help_output = subprocess.run(
@@ -133,6 +142,14 @@ def main():
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
+ if args.generate:
+ return generate_corpus_seeds(
+ fuzz_pool=fuzz_pool,
+ build_dir=config["environment"]["BUILDDIR"],
+ seed_dir=args.seed_dir,
+ targets=test_list_selection,
+ )
+
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
@@ -152,6 +169,37 @@ def main():
)
+def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
+ """Generates new corpus seeds.
+
+ Run {targets} without input, and outputs the generated corpus seeds to
+ {seed_dir}.
+ """
+ logging.info("Generating corpus seeds to {}".format(seed_dir))
+
+ def job(command):
+ logging.debug("Running '{}'\n".format(" ".join(command)))
+ logging.debug("Command '{}' output:\n'{}'\n".format(
+ ' '.join(command),
+ subprocess.run(command, check=True, stderr=subprocess.PIPE,
+ universal_newlines=True).stderr
+ ))
+
+ futures = []
+ for target in targets:
+ target_seed_dir = os.path.join(seed_dir, target)
+ os.makedirs(target_seed_dir, exist_ok=True)
+ command = [
+ os.path.join(build_dir, "src", "test", "fuzz", target),
+ "-runs=100000",
+ target_seed_dir,
+ ]
+ futures.append(fuzz_pool.submit(job, command))
+
+ for future in as_completed(futures):
+ future.result()
+
+
def merge_inputs(*, fuzz_pool, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
diff --git a/test/lint/lint-git-commit-check.sh b/test/lint/lint-git-commit-check.sh
new file mode 100755
index 0000000000..8947f67bf6
--- /dev/null
+++ b/test/lint/lint-git-commit-check.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Linter to check that commit messages have a new line before the body
+# or no body at all
+
+export LC_ALL=C
+
+EXIT_CODE=0
+
+while getopts "?" opt; do
+ case $opt in
+ ?)
+ echo "Usage: $0 [N]"
+ echo " COMMIT_RANGE='<commit range>' $0"
+ echo " $0 -?"
+ echo "Checks unmerged commits, the previous N commits, or a commit range."
+ echo "COMMIT_RANGE='47ba2c3...ee50c9e' $0"
+ exit ${EXIT_CODE}
+ ;;
+ esac
+done
+
+if [ -z "${COMMIT_RANGE}" ]; then
+ if [ -n "$1" ]; then
+ COMMIT_RANGE="HEAD~$1...HEAD"
+ else
+ MERGE_BASE=$(git merge-base HEAD master)
+ COMMIT_RANGE="$MERGE_BASE..HEAD"
+ fi
+fi
+
+while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do
+ n_line=0
+ while IFS= read -r line || [[ -n "$line" ]]; do
+ n_line=$((n_line+1))
+ length=${#line}
+ if [ $n_line -eq 2 ] && [ $length -ne 0 ]; then
+ echo "The subject line of commit hash ${commit_hash} is followed by a non-empty line. Subject lines should always be followed by a blank line."
+ EXIT_CODE=1
+ fi
+ done < <(git log --format=%B -n 1 "$commit_hash")
+done < <(git log "${COMMIT_RANGE}" --format=%H)
+
+exit ${EXIT_CODE}
diff --git a/test/lint/lint-include-guards.sh b/test/lint/lint-include-guards.sh
index 3a0494c190..5d5a150db8 100755
--- a/test/lint/lint-include-guards.sh
+++ b/test/lint/lint-include-guards.sh
@@ -10,7 +10,7 @@ export LC_ALL=C
HEADER_ID_PREFIX="BITCOIN_"
HEADER_ID_SUFFIX="_H"
-REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)"
+REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|bench/nanobench.h|univalue/)"
EXIT_CODE=0
for HEADER_FILE in $(git ls-files -- "*.h" | grep -vE "^${REGEXP_EXCLUDE_FILES_WITH_PREFIX}")
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index 611bd4a8c4..fde77aea2d 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -63,6 +63,7 @@ EXPECTED_BOOST_INCLUDES=(
boost/optional.hpp
boost/preprocessor/cat.hpp
boost/preprocessor/stringize.hpp
+ boost/process.hpp
boost/signals2/connection.hpp
boost/signals2/optional_last_value.hpp
boost/signals2/signal.hpp