aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.travis.yml6
-rw-r--r--Makefile.am2
-rwxr-xr-xci/test/00_setup_env.sh2
-rw-r--r--ci/test/00_setup_env_native_multiprocess.sh14
-rw-r--r--ci/test/00_setup_env_native_qt5.sh3
-rwxr-xr-xci/test/04_install.sh2
-rwxr-xr-xci/test/06_script_b.sh2
-rw-r--r--configure.ac63
-rw-r--r--contrib/guix/README.md5
-rw-r--r--contrib/guix/libexec/build.sh8
-rw-r--r--depends/Makefile8
-rw-r--r--depends/README.md1
-rw-r--r--depends/config.site.in7
-rw-r--r--depends/funcs.mk11
-rw-r--r--depends/hosts/android.mk1
-rw-r--r--depends/hosts/darwin.mk1
-rw-r--r--depends/hosts/linux.mk1
-rw-r--r--depends/hosts/mingw32.mk2
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--depends/packages/capnp.mk18
-rw-r--r--depends/packages/libmultiprocess.mk18
-rw-r--r--depends/packages/native_capnp.mk18
-rw-r--r--depends/packages/native_libmultiprocess.mk18
-rw-r--r--depends/packages/packages.mk3
-rw-r--r--depends/packages/qt.mk1
-rw-r--r--doc/multiprocess.md35
-rw-r--r--doc/productivity.md6
-rw-r--r--doc/release-notes-16528.md120
-rw-r--r--src/Makefile.am59
-rw-r--r--src/Makefile.qt.include43
-rw-r--r--src/Makefile.test.include8
-rw-r--r--src/bitcoin-cli.cpp163
-rw-r--r--src/bitcoind.cpp4
-rw-r--r--src/compat/assumptions.h1
-rw-r--r--src/httprpc.cpp11
-rw-r--r--src/httprpc.h7
-rw-r--r--src/index/blockfilterindex.cpp27
-rw-r--r--src/index/blockfilterindex.h14
-rw-r--r--src/init.cpp47
-rw-r--r--src/init.h5
-rw-r--r--src/interfaces/node.cpp6
-rw-r--r--src/logging.cpp4
-rw-r--r--src/net_processing.cpp128
-rw-r--r--src/net_processing.h4
-rw-r--r--src/node/coinstats.cpp3
-rw-r--r--src/node/coinstats.h3
-rw-r--r--src/node/context.h9
-rw-r--r--src/protocol.cpp22
-rw-r--r--src/protocol.h30
-rw-r--r--src/qt/bitcoingui.cpp4
-rw-r--r--src/qt/recentrequeststablemodel.h18
-rw-r--r--src/qt/rpcconsole.cpp5
-rw-r--r--src/qt/sendcoinsrecipient.h33
-rw-r--r--src/qt/walletmodel.cpp10
-rw-r--r--src/rest.cpp45
-rw-r--r--src/rpc/blockchain.cpp53
-rw-r--r--src/rpc/blockchain.h13
-rw-r--r--src/rpc/mining.cpp38
-rw-r--r--src/rpc/misc.cpp12
-rw-r--r--src/rpc/net.cpp107
-rw-r--r--src/rpc/rawtransaction.cpp12
-rw-r--r--src/rpc/request.cpp10
-rw-r--r--src/rpc/request.h9
-rw-r--r--src/rpc/server.cpp7
-rw-r--r--src/rpc/server.h5
-rw-r--r--src/serialize.h60
-rw-r--r--src/sync.cpp121
-rw-r--r--src/test/blockfilter_index_tests.cpp8
-rw-r--r--src/test/denialofservice_tests.cpp10
-rw-r--r--src/test/fuzz/coins_view.cpp294
-rw-r--r--src/test/fuzz/fuzz.cpp2
-rw-r--r--src/test/fuzz/process_message.cpp4
-rw-r--r--src/test/fuzz/string.cpp4
-rw-r--r--src/test/fuzz/util.h12
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/ref_tests.cpp33
-rw-r--r--src/test/rpc_tests.cpp14
-rw-r--r--src/test/util/mining.cpp2
-rw-r--r--src/test/util/setup_common.cpp14
-rw-r--r--src/test/validation_block_tests.cpp14
-rw-r--r--src/txmempool.h11
-rw-r--r--src/util/check.h4
-rw-r--r--src/util/ref.h38
-rw-r--r--src/validation.cpp47
-rw-r--r--src/validation.h89
-rw-r--r--src/wallet/crypter.h12
-rw-r--r--src/wallet/rpcwallet.cpp5
-rw-r--r--src/wallet/scriptpubkeyman.cpp143
-rw-r--r--src/wallet/scriptpubkeyman.h109
-rw-r--r--src/wallet/test/wallet_tests.cpp16
-rw-r--r--src/wallet/wallet.cpp10
-rw-r--r--src/wallet/wallet.h1
-rw-r--r--src/wallet/walletdb.cpp104
-rw-r--r--src/wallet/walletdb.h40
-rw-r--r--src/wallet/walletutil.h34
-rwxr-xr-xtest/functional/example_test.py4
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py12
-rwxr-xr-xtest/functional/interface_bitcoin_cli.py51
-rwxr-xr-xtest/functional/mempool_packages.py10
-rwxr-xr-xtest/functional/mempool_unbroadcast.py12
-rwxr-xr-xtest/functional/p2p_blockfilters.py52
-rwxr-xr-xtest/functional/p2p_blocksonly.py23
-rwxr-xr-xtest/functional/p2p_compactblocks.py15
-rwxr-xr-xtest/functional/p2p_feefilter.py4
-rwxr-xr-xtest/functional/p2p_fingerprint.py4
-rwxr-xr-xtest/functional/p2p_getdata.py18
-rwxr-xr-xtest/functional/p2p_invalid_messages.py23
-rwxr-xr-xtest/functional/p2p_leak_tx.py4
-rwxr-xr-xtest/functional/p2p_node_network_limited.py4
-rwxr-xr-xtest/functional/p2p_segwit.py12
-rwxr-xr-xtest/functional/p2p_sendheaders.py5
-rwxr-xr-xtest/functional/p2p_tx_download.py10
-rwxr-xr-xtest/functional/p2p_unrequested_blocks.py4
-rwxr-xr-xtest/functional/test_framework/messages.py54
-rwxr-xr-xtest/functional/test_framework/mininode.py15
-rwxr-xr-xtest/functional/test_framework/test_framework.py23
-rwxr-xr-xtest/functional/test_framework/test_node.py13
-rwxr-xr-xtest/functional/wallet_hd.py96
119 files changed, 2271 insertions, 813 deletions
diff --git a/.gitignore b/.gitignore
index 1c487f43a7..5b9ec1f355 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,8 @@
src/bitcoin
src/bitcoind
src/bitcoin-cli
+src/bitcoin-gui
+src/bitcoin-node
src/bitcoin-tx
src/bitcoin-wallet
src/test/fuzz
diff --git a/.travis.yml b/.travis.yml
index 4acfe4db76..1598813ed2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -129,6 +129,12 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
- stage: test
+ name: 'x86_64 Linux [GOAL: install] [bionic] [multiprocess]'
+ if: type != pull_request OR commit_message =~ /depends:|multiprocess:/ # Skip on non-depends, non-multiprocess PRs
+ env: >-
+ FILE_ENV="./ci/test/00_setup_env_native_multiprocess.sh"
+
+ - stage: test
name: 'x86_64 Linux [GOAL: install] [bionic] [no wallet]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
diff --git a/Makefile.am b/Makefile.am
index 43790f1c23..45dab3930d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -41,7 +41,7 @@ OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus
OSX_FANCY_PLIST=$(top_srcdir)/contrib/macdeploy/fancy.plist
OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns
OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
-OSX_QT_TRANSLATIONS = da,de,es,hu,ru,uk,zh_CN,zh_TW
+OSX_QT_TRANSLATIONS = ar,bg,ca,cs,da,de,es,fa,fi,fr,gd,gl,he,hu,it,ja,ko,lt,lv,pl,pt,ru,sk,sl,sv,uk,zh_CN,zh_TW
DIST_CONTRIB = \
$(top_srcdir)/contrib/linearize/linearize-data.py \
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 2c08fdbcd8..56b8fc976e 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -33,7 +33,7 @@ export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")}
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
-export TEST_PREVIOUS_RELEASES=${TEST_PREVIOUS_RELEASES:-false}
+export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04}
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
new file mode 100644
index 0000000000..0fc989085c
--- /dev/null
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export CONTAINER_NAME=ci_native_multiprocess
+export PACKAGES="cmake python3"
+export DEP_OPTS="MULTIPROCESS=1"
+export GOAL="install"
+export BITCOIN_CONFIG=""
+export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 906687175d..fa07990756 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -9,10 +9,9 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_qt5
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
-export TEST_RUNNER_EXTRA="--coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
+export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
-export TEST_PREVIOUS_RELEASES=true
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1"
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 8eb49e9ac1..14ab943d68 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -26,7 +26,7 @@ export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=
export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan"
export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan"
export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
-env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|TEST_PREVIOUS_RELEASES|PREVIOUS_RELEASES_DIR)' | tee /tmp/env
+env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|LC_ALL|BOOST_TEST_RANDOM|DEBIAN_FRONTEND|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|PREVIOUS_RELEASES_DIR)' | tee /tmp/env
if [[ $HOST = *-mingw32 ]]; then
DOCKER_ADMIN="--cap-add SYS_ADMIN"
elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index ed720bcd00..51d84ee39d 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -37,7 +37,7 @@ fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
BEGIN_FOLD functional-tests
- DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --quiet --failfast
+ DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib ${TEST_RUNNER_ENV} test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --quiet --failfast
END_FOLD
fi
diff --git a/configure.ac b/configure.ac
index 58f464a73a..7f0c5dbd7a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -242,6 +242,24 @@ if test x$enable_bip70 != xno; then
AC_MSG_ERROR([BIP70 is no longer supported!])
fi
+AC_ARG_WITH([libmultiprocess],
+ [AS_HELP_STRING([--with-libmultiprocess=yes|no|auto],
+ [Build with libmultiprocess library. (default: auto, i.e. detect with pkg-config)])],
+ [with_libmultiprocess=$withval],
+ [with_libmultiprocess=auto])
+
+AC_ARG_WITH([mpgen],
+ [AS_HELP_STRING([--with-mpgen=yes|no|auto|PREFIX],
+ [Build with libmultiprocess codegen tool. Useful to specify different libmultiprocess host system library and build system codegen tool prefixes when cross-compiling (default is host system libmultiprocess prefix)])],
+ [with_mpgen=$withval],
+ [with_mpgen=auto])
+
+AC_ARG_ENABLE([multiprocess],
+ [AS_HELP_STRING([--enable-multiprocess],
+ [build multiprocess bitcoin-node, bitcoin-wallet, and bitcoin-gui executables in addition to monolithic bitcoind and bitcoin-qt executables. Requires libmultiprocess library. Experimental (default is no)])],
+ [enable_multiprocess=$enableval],
+ [enable_multiprocess=no])
+
AC_ARG_ENABLE(man,
[AS_HELP_STRING([--disable-man],
[do not install man pages (default is to install)])],,
@@ -1389,6 +1407,50 @@ AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$need_bundled_univalue = xyes])
AC_SUBST(UNIVALUE_CFLAGS)
AC_SUBST(UNIVALUE_LIBS)
+dnl libmultiprocess library check
+
+libmultiprocess_found=no
+if test "x$with_libmultiprocess" = xyes || test "x$with_libmultiprocess" = xauto; then
+ if test "x$use_pkgconfig" = xyes; then
+ m4_ifdef([PKG_CHECK_MODULES], [PKG_CHECK_MODULES([LIBMULTIPROCESS], [libmultiprocess], [
+ libmultiprocess_found=yes;
+ libmultiprocess_prefix=`$PKG_CONFIG --variable=prefix libmultiprocess`;
+ ], [true])])
+ fi
+elif test "x$with_libmultiprocess" != xno; then
+ AC_MSG_ERROR([--with-libmultiprocess=$with_libmultiprocess value is not yes, auto, or no])
+fi
+AC_SUBST(LIBMULTIPROCESS_CFLAGS)
+AC_SUBST(LIBMULTIPROCESS_LIBS)
+
+dnl Enable multiprocess check
+
+if test "x$enable_multiprocess" = xyes; then
+ if test "x$libmultiprocess_found" != xyes; then
+ AC_MSG_ERROR([--enable-multiprocess=yes option specified but libmultiprocess library was not found. May need to install libmultiprocess library, or specify install path with PKG_CONFIG_PATH environment variable. Running 'pkg-config --debug libmultiprocess' may be helpful for debugging.])
+ fi
+ build_multiprocess=yes
+elif test "x$enable_multiprocess" = xauto; then
+ build_multiprocess=$libmultiprocess_found
+else
+ build_multiprocess=no
+fi
+
+AM_CONDITIONAL([BUILD_MULTIPROCESS],[test "x$build_multiprocess" = xyes])
+AM_CONDITIONAL([BUILD_BITCOIN_NODE], [test "x$build_multiprocess" = xyes])
+AM_CONDITIONAL([BUILD_BITCOIN_GUI], [test "x$build_multiprocess" = xyes])
+
+dnl codegen tools check
+
+if test x$build_multiprocess != xno; then
+ if test "x$with_mpgen" = xyes || test "x$with_mpgen" = xauto; then
+ MPGEN_PREFIX="$libmultiprocess_prefix"
+ elif test "x$with_mpgen" != xno; then
+ MPGEN_PREFIX="$with_mpgen";
+ fi
+ AC_SUBST(MPGEN_PREFIX)
+fi
+
AC_MSG_CHECKING([whether to build bitcoind])
AM_CONDITIONAL([BUILD_BITCOIND], [test x$build_bitcoind = xyes])
AC_MSG_RESULT($build_bitcoind)
@@ -1667,6 +1729,7 @@ esac
echo
echo "Options used to compile and link:"
+echo " multiprocess = $build_multiprocess"
echo " with wallet = $enable_wallet"
echo " with gui / qt = $bitcoin_enable_qt"
if test x$bitcoin_enable_qt != xno; then
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index 8ce8cb97a0..dffcf99607 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -142,6 +142,11 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
If non-empty, will pass `V=1` to all `make` invocations, making `make` output
verbose.
+ Note that any given value is ignored. The variable is only checked for
+ emptiness. More concretely, this means that `V=` (setting `V` to the empty
+ string) is interpreted the same way as not setting `V` at all, and that `V=0`
+ has the same effect as `V=1`.
+
* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_
Additional flags to be passed to `guix environment`. For a fully-bootstrapped
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 01f4518c73..5be3baeefa 100644
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -3,6 +3,14 @@ export LC_ALL=C
set -e -o pipefail
export TZ=UTC
+if [ -n "$V" ]; then
+ # Print both unexpanded (-v) and expanded (-x) forms of commands as they are
+ # read from this file.
+ set -vx
+ # Set VERBOSE for CMake-based builds
+ export VERBOSE="$V"
+fi
+
# Check that environment variables assumed to be set by the environment are set
echo "Building for platform triple ${HOST:?not set} with reference timestamp ${SOURCE_DATE_EPOCH:?not set}..."
echo "At most ${MAX_JOBS:?not set} jobs will run at once..."
diff --git a/depends/Makefile b/depends/Makefile
index 5f5247f881..5ad82bb56a 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -13,6 +13,7 @@ NO_QR ?=
NO_WALLET ?=
NO_ZMQ ?=
NO_UPNP ?=
+MULTIPROCESS ?=
FALLBACK_DOWNLOAD_PATH ?= https://bitcoincore.org/depends-sources
BUILD = $(shell ./config.guess)
@@ -107,6 +108,7 @@ qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch
wallet_packages_$(NO_WALLET) = $(wallet_packages)
upnp_packages_$(NO_UPNP) = $(upnp_packages)
zmq_packages_$(NO_ZMQ) = $(zmq_packages)
+multiprocess_packages_$(MULTIPROCESS) = $(multiprocess_packages)
packages += $($(host_arch)_$(host_os)_packages) $($(host_os)_packages) $(qt_packages_) $(wallet_packages_) $(upnp_packages_)
native_packages += $($(host_arch)_$(host_os)_native_packages) $($(host_os)_native_packages)
@@ -115,6 +117,11 @@ ifneq ($(zmq_packages_),)
packages += $(zmq_packages)
endif
+ifeq ($(multiprocess_packages_),)
+packages += $(multiprocess_packages)
+native_packages += $(multiprocess_native_packages)
+endif
+
all_packages = $(packages) $(native_packages)
meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk
@@ -155,6 +162,7 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_
-e 's|@no_zmq@|$(NO_ZMQ)|' \
-e 's|@no_wallet@|$(NO_WALLET)|' \
-e 's|@no_upnp@|$(NO_UPNP)|' \
+ -e 's|@multiprocess@|$(MULTIPROCESS)|' \
-e 's|@debug@|$(DEBUG)|' \
$< > $@
$(AT)touch $@
diff --git a/depends/README.md b/depends/README.md
index 79865ff011..c12ea8bcb3 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -91,6 +91,7 @@ The following can be set when running make: make FOO=bar
NO_ZMQ: Don't download/build/cache packages needed for enabling zeromq
NO_WALLET: Don't download/build/cache libs needed to enable the wallet
NO_UPNP: Don't download/build/cache packages needed for enabling upnp
+ MULTIPROCESS: build libmultiprocess (experimental, requires cmake)
DEBUG: disable some optimizations and enable more runtime checking
HOST_ID_SALT: Optional salt to use when generating host package ids
BUILD_ID_SALT: Optional salt to use when generating build package ids
diff --git a/depends/config.site.in b/depends/config.site.in
index fb9bf713cc..103aa0d2ff 100644
--- a/depends/config.site.in
+++ b/depends/config.site.in
@@ -16,6 +16,9 @@ fi
if test -z $with_qt_bindir && test -z "@no_qt@"; then
with_qt_bindir=$depends_prefix/native/bin
fi
+if test -z $with_mpgen && test -n "@multiprocess@"; then
+ with_mpgen=$depends_prefix/native
+fi
if test -z $with_qrencode && test -n "@no_qr@"; then
with_qrencode=no
@@ -25,6 +28,10 @@ if test -z $enable_wallet && test -n "@no_wallet@"; then
enable_wallet=no
fi
+if test -z $enable_multiprocess && test -n "@multiprocess@"; then
+ enable_multiprocess=yes
+fi
+
if test -z $with_miniupnpc && test -n "@no_upnp@"; then
with_miniupnpc=no
fi
diff --git a/depends/funcs.mk b/depends/funcs.mk
index a4434b5167..135ebba9f8 100644
--- a/depends/funcs.mk
+++ b/depends/funcs.mk
@@ -130,11 +130,11 @@ $(1)_config_env+=$($(1)_config_env_$(host_arch)_$(host_os)) $($(1)_config_env_$(
$(1)_config_env+=PKG_CONFIG_LIBDIR=$($($(1)_type)_prefix)/lib/pkgconfig
$(1)_config_env+=PKG_CONFIG_PATH=$($($(1)_type)_prefix)/share/pkgconfig
+$(1)_config_env+=CMAKE_MODULE_PATH=$($($(1)_type)_prefix)/lib/cmake
$(1)_config_env+=PATH=$(build_prefix)/bin:$(PATH)
$(1)_build_env+=PATH=$(build_prefix)/bin:$(PATH)
$(1)_stage_env+=PATH=$(build_prefix)/bin:$(PATH)
$(1)_autoconf=./configure --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)"
-
ifneq ($($(1)_nm),)
$(1)_autoconf += NM="$$($(1)_nm)"
endif
@@ -156,6 +156,15 @@ endif
ifneq ($($(1)_ldflags),)
$(1)_autoconf += LDFLAGS="$$($(1)_ldflags)"
endif
+
+$(1)_cmake=cmake -DCMAKE_INSTALL_PREFIX=$($($(1)_type)_prefix)
+ifneq ($($(1)_type),build)
+ifneq ($(host),$(build))
+$(1)_cmake += -DCMAKE_SYSTEM_NAME=$($(host_os)_cmake_system) -DCMAKE_SYSROOT=$(host_prefix)
+$(1)_cmake += -DCMAKE_C_COMPILER_TARGET=$(host) -DCMAKE_C_COMPILER=$(firstword $($($(1)_type)_CC)) -DCMAKE_C_FLAGS="$(wordlist 2,1000,$($($(1)_type)_CC))"
+$(1)_cmake += -DCMAKE_CXX_COMPILER_TARGET=$(host) -DCMAKE_CXX_COMPILER=$(firstword $($($(1)_type)_CXX)) -DCMAKE_CXX_FLAGS="$(wordlist 2,1000,$($($(1)_type)_CXX))"
+endif
+endif
endef
define int_add_cmds
diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk
index 969ec2a1cb..eabd84bbbe 100644
--- a/depends/hosts/android.mk
+++ b/depends/hosts/android.mk
@@ -9,3 +9,4 @@ android_CXX=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang++
android_CC=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)$(ANDROID_API_LEVEL)-clang
android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/$(HOST)-ranlib
endif
+android_cmake_system=Android
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index 1bc4fb8189..82e086a326 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -14,3 +14,4 @@ darwin_debug_CFLAGS=-O1
darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS)
darwin_native_toolchain=native_cctools
+darwin_cmake_system=Darwin
diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk
index b13a0f1ad7..8ab448ce5f 100644
--- a/depends/hosts/linux.mk
+++ b/depends/hosts/linux.mk
@@ -29,3 +29,4 @@ i686_linux_CXX=$(default_host_CXX) -m32
x86_64_linux_CC=$(default_host_CC) -m64
x86_64_linux_CXX=$(default_host_CXX) -m64
endif
+linux_cmake_system=Linux
diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk
index dbfb62fdcf..be5fec570c 100644
--- a/depends/hosts/mingw32.mk
+++ b/depends/hosts/mingw32.mk
@@ -8,3 +8,5 @@ mingw32_debug_CFLAGS=-O1
mingw32_debug_CXXFLAGS=$(mingw32_debug_CFLAGS)
mingw32_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC
+
+mingw_cmake_system=Windows
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index cbe4fe4d97..970c81041e 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -1,7 +1,7 @@
package=boost
$(package)_version=1_70_0
$(package)_download_path=https://dl.bintray.com/boostorg/release/1.70.0/source/
-$(package)_file_name=$(package)_$($(package)_version).tar.bz2
+$(package)_file_name=boost_$($(package)_version).tar.bz2
$(package)_sha256_hash=430ae8354789de4fd19ee52f3b1f739e1fba576f0aded0897c3c2bc00fb38778
define $(package)_set_vars
diff --git a/depends/packages/capnp.mk b/depends/packages/capnp.mk
new file mode 100644
index 0000000000..abeb26545f
--- /dev/null
+++ b/depends/packages/capnp.mk
@@ -0,0 +1,18 @@
+package=capnp
+$(package)_version=$(native_$(package)_version)
+$(package)_download_path=$(native_$(package)_download_path)
+$(package)_file_name=$(native_$(package)_file_name)
+$(package)_sha256_hash=$(native_$(package)_sha256_hash)
+$(package)_dependencies=native_$(package)
+
+define $(package)_config_cmds
+ $($(package)_autoconf) --with-external-capnp
+endef
+
+define $(package)_build_cmds
+ $(MAKE)
+endef
+
+define $(package)_stage_cmds
+ $(MAKE) DESTDIR=$($(package)_staging_dir) install
+endef
diff --git a/depends/packages/libmultiprocess.mk b/depends/packages/libmultiprocess.mk
new file mode 100644
index 0000000000..3e5cf5f160
--- /dev/null
+++ b/depends/packages/libmultiprocess.mk
@@ -0,0 +1,18 @@
+package=libmultiprocess
+$(package)_version=$(native_$(package)_version)
+$(package)_download_path=$(native_$(package)_download_path)
+$(package)_file_name=$(native_$(package)_file_name)
+$(package)_sha256_hash=$(native_$(package)_sha256_hash)
+$(package)_dependencies=native_$(package) boost capnp
+
+define $(package)_config_cmds
+ $($(package)_cmake)
+endef
+
+define $(package)_build_cmds
+ $(MAKE)
+endef
+
+define $(package)_stage_cmds
+ $(MAKE) DESTDIR=$($(package)_staging_dir) install
+endef
diff --git a/depends/packages/native_capnp.mk b/depends/packages/native_capnp.mk
new file mode 100644
index 0000000000..ed5a6deee2
--- /dev/null
+++ b/depends/packages/native_capnp.mk
@@ -0,0 +1,18 @@
+package=native_capnp
+$(package)_version=0.7.0
+$(package)_download_path=https://capnproto.org/
+$(package)_download_file=capnproto-c++-$($(package)_version).tar.gz
+$(package)_file_name=capnproto-cxx-$($(package)_version).tar.gz
+$(package)_sha256_hash=c9a4c0bd88123064d483ab46ecee777f14d933359e23bff6fb4f4dbd28b4cd41
+
+define $(package)_config_cmds
+ $($(package)_autoconf)
+endef
+
+define $(package)_build_cmds
+ $(MAKE)
+endef
+
+define $(package)_stage_cmds
+ $(MAKE) DESTDIR=$($(package)_staging_dir) install
+endef
diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk
new file mode 100644
index 0000000000..c50fdc3f6b
--- /dev/null
+++ b/depends/packages/native_libmultiprocess.mk
@@ -0,0 +1,18 @@
+package=native_libmultiprocess
+$(package)_version=5741d750a04e644a03336090d8979c6d033e32c0
+$(package)_download_path=https://github.com/chaincodelabs/libmultiprocess/archive
+$(package)_file_name=$($(package)_version).tar.gz
+$(package)_sha256_hash=ac848db49a6ed53e423c62d54bd87f1f08cbb0326254a8667e10bbfe5bf032a4
+$(package)_dependencies=native_capnp
+
+define $(package)_config_cmds
+ $($(package)_cmake)
+endef
+
+define $(package)_build_cmds
+ $(MAKE)
+endef
+
+define $(package)_stage_cmds
+ $(MAKE) DESTDIR=$($(package)_staging_dir) install
+endef
diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk
index 42dbaa77a6..8fe2c771c9 100644
--- a/depends/packages/packages.mk
+++ b/depends/packages/packages.mk
@@ -16,6 +16,9 @@ zmq_packages=zeromq
upnp_packages=miniupnpc
+multiprocess_packages = libmultiprocess capnp
+multiprocess_native_packages = native_libmultiprocess native_capnp
+
darwin_native_packages = native_biplist native_ds_store native_mac_alias
ifneq ($(build_os),darwin)
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 366b1d0c42..631851855a 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -10,6 +10,7 @@ $(package)_build_subdir=qtbase
$(package)_qt_libs=corelib network widgets gui plugins testlib
$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_configure_mac.patch fix_no_printer.patch fix_rcc_determinism.patch fix_riscv64_arch.patch xkb-default.patch no-xlib.patch fix_android_qmake_conf.patch fix_android_jni_static.patch
+# Update OSX_QT_TRANSLATIONS when this is updated
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
$(package)_qttranslations_sha256_hash=fb5a47799754af73d3bf501fe513342cfe2fc37f64e80df5533f6110e804220c
diff --git a/doc/multiprocess.md b/doc/multiprocess.md
new file mode 100644
index 0000000000..471d8561f7
--- /dev/null
+++ b/doc/multiprocess.md
@@ -0,0 +1,35 @@
+# Multiprocess Bitcoin
+
+On unix systems, the `--enable-multiprocess` build option can be passed to `./configure` to build new `bitcoin-node`, `bitcoin-wallet`, and `bitcoin-gui` executables alongside existing `bitcoind` and `bitcoin-qt` executables.
+
+`bitcoin-node` is a drop-in replacement for `bitcoind`, and `bitcoin-gui` is a drop-in replacement for `bitcoin-qt`, and there are no differences in use or external behavior between the new and old executables. But internally (after [#10102](https://github.com/bitcoin/bitcoin/pull/10102)), `bitcoin-gui` will spawn a `bitcoin-node` process to run P2P and RPC code, communicating with it across a socket pair, and `bitcoin-node` will spawn `bitcoin-wallet` to run wallet code, also communicating over a socket pair. This will let node, wallet, and GUI code run in separate address spaces for better isolation, and allow future improvements like being able to start and stop components independently on different machines and environments.
+
+## Next steps
+
+Specific next steps after [#10102](https://github.com/bitcoin/bitcoin/pull/10102) will be:
+
+- [ ] Adding `-ipcbind` and `-ipcconnect` options to `bitcoin-node`, `bitcoin-wallet`, and `bitcoin-gui` executables so they can listen and connect to TCP ports and unix socket paths. This will allow separate processes to be started and stopped any time and connect to each other.
+- [ ] Adding `-server` and `-rpcbind` options to the `bitcoin-wallet` executable so wallet processes can handle RPC requests directly without going through the node.
+- [ ] Supporting windows, not just unix systems. The existing socket code is already cross-platform, so the only windows-specific code that needs to be written is code spawning a process and passing a socket descriptor. This can be implemented with `CreateProcess` and `WSADuplicateSocket`. Example: https://memset.wordpress.com/2010/10/13/win32-api-passing-socket-with-ipc-method/.
+- [ ] Adding sandbox features, restricting subprocess access to resources and data. See [https://eklitzke.org/multiprocess-bitcoin](https://eklitzke.org/multiprocess-bitcoin).
+
+## Debugging
+
+After [#10102](https://github.com/bitcoin/bitcoin/pull/10102), the `-debug=ipc` command line option can be used to see requests and responses between processes.
+
+## Installation
+
+The multiprocess feature requires [Cap'n Proto](https://capnproto.org/) and [libmultiprocess](https://github.com/chaincodelabs/libmultiprocess) as dependencies. A simple way to get starting using it without installing these dependencies manually is to use the [depends system](../depends) with the `MULTIPROCESS=1` [dependency option](../depends#dependency-options) passed to make:
+
+```
+cd <BITCOIN_SOURCE_DIRECTORY>
+make -C depends NO_QT=1 MULTIPROCESS=1
+./configure --prefix=$PWD/depends/x86_64-pc-linux-gnu
+make
+src/bitcoin-node -regtest -printtoconsole -debug=ipc
+BITCOIND=bitcoin-node test/functional/test_runner.py
+```
+
+The configure script will pick up settings and library locations from the depends directory, so there is no need to pass `--enable-multiprocess` as a separate flag when using the depends system (it's controlled by the `MULTIPROCESS=1` option).
+
+Alternately, you can install [Cap'n Proto](https://capnproto.org/) and [libmultiprocess](https://github.com/chaincodelabs/libmultiprocess) packages on your system, and just run `./configure --enable-multiprocess` without using the depends system. The configure script will be able to locate the installed packages via [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/). See [Installation](https://github.com/chaincodelabs/libmultiprocess#installation) section of the libmultiprocess readme for install steps. See [build-unix.md](build-unix.md) and [build-osx.md](build-osx.md) for information about installing dependencies in general.
diff --git a/doc/productivity.md b/doc/productivity.md
index 1bf3d9afb5..555f0afe3c 100644
--- a/doc/productivity.md
+++ b/doc/productivity.md
@@ -12,7 +12,7 @@ Table of Contents
* [Multiple working directories with `git worktrees`](#multiple-working-directories-with-git-worktrees)
* [Interactive "dummy rebases" for fixups and execs with `git merge-base`](#interactive-dummy-rebases-for-fixups-and-execs-with-git-merge-base)
* [Writing code](#writing-code)
- * [Format C/C++/Protobuf diffs with `clang-format-diff.py`](#format-ccprotobuf-diffs-with-clang-format-diffpy)
+ * [Format C/C++ diffs with `clang-format-diff.py`](#format-cc-diffs-with-clang-format-diffpy)
* [Format Python diffs with `yapf-diff.py`](#format-python-diffs-with-yapf-diffpy)
* [Rebasing/Merging code](#rebasingmerging-code)
* [More conflict context with `merge.conflictstyle diff3`](#more-conflict-context-with-mergeconflictstyle-diff3)
@@ -118,13 +118,13 @@ You can also set up [upstream refspecs](#reference-prs-easily-with-refspecs) to
Writing code
------------
-### Format C/C++/Protobuf diffs with `clang-format-diff.py`
+### Format C/C++ diffs with `clang-format-diff.py`
See [contrib/devtools/README.md](/contrib/devtools/README.md#clang-format-diff.py).
### Format Python diffs with `yapf-diff.py`
-Usage is exactly the same as [`clang-format-diff.py`](#format-ccprotobuf-diffs-with-clang-format-diffpy). You can get it [here](https://github.com/MarcoFalke/yapf-diff).
+Usage is exactly the same as [`clang-format-diff.py`](#format-cc-diffs-with-clang-format-diffpy). You can get it [here](https://github.com/MarcoFalke/yapf-diff).
Rebasing/Merging code
-------------
diff --git a/doc/release-notes-16528.md b/doc/release-notes-16528.md
new file mode 100644
index 0000000000..0a7a31cc9d
--- /dev/null
+++ b/doc/release-notes-16528.md
@@ -0,0 +1,120 @@
+Wallet
+------
+
+### Experimental Descriptor Wallets
+
+Please note that Descriptor Wallets are still experimental and not all expected functionality
+is available. Additionally there may be some bugs and current functions may change in the future.
+Bugs and missing functionality can be reported to the [issue tracker](https://github.com/bitcoin/bitcoin/issues).
+
+0.21 introduces a new type of wallet - Descriptor Wallets. Descriptor Wallets store
+scriptPubKey information using descriptors. This is in contrast to the Legacy Wallet
+structure where keys are used to generate scriptPubKeys and addresses. Because of this
+shift to being script based instead of key based, many of the confusing things that Legacy
+Wallets do are not possible with Descriptor Wallets. Descriptor Wallets use a definition
+of "mine" for scripts which is simpler and more intuitive than that used by Legacy Wallets.
+Descriptor Wallets also uses different semantics for watch-only things and imports.
+
+As Descriptor Wallets are a new type of wallet, their introduction does not affect existing wallets.
+Users who already have a Bitcoin Core wallet can continue to use it as they did before without
+any change in behavior. Newly created Legacy Wallets (which is the default type of wallet) will
+behave as they did in previous versions of Bitcoin Core.
+
+The differences between Descriptor Wallets and Legacy Wallets are largely limited to non user facing
+things. They are intended to behave similarly except for the import/export and watchonly functionality
+as described below.
+
+#### Creating Descriptor Wallets
+
+Descriptor Wallets are not created by default. They must be explicitly created using the
+`createwallet` RPC or via the GUI. A `descriptors` option has been added to `createwallet`.
+Setting `descriptors` to `true` will create a Descriptor Wallet instead of a Legacy Wallet.
+
+In the GUI, a checkbox has been added to the Create Wallet Dialog to indicate that a
+Descriptor Wallet should be created.
+
+Without those options being set, a Legacy Wallet will be created instead. Additionally the
+Default Wallet created upon first startup of Bitcoin Core will be a Legacy Wallet.
+
+#### `IsMine` Semantics
+
+`IsMine` refers to the function used to determine whether a script belongs to the wallet.
+This is used to determine whether an output belongs to the wallet. `IsMine` in Legacy Wallets
+returns true if the wallet would be able to sign an input that spends an output with that script.
+Since keys can be involved in a variety of different scripts, this definition for `IsMine` can
+lead to many unexpected scripts being considered part of the wallet.
+
+With Descriptor Wallets, descriptors explicitly specify the set of scripts that are owned by
+the wallet. Since descriptors are deterministic and easily enumerable, users will know exactly
+what scripts the wallet will consider to belong to it. Additionally the implementation of `IsMine`
+in Descriptor Wallets is far simpler than for Legacy Wallets. Notably, in Legacy Wallets, `IsMine`
+allowed for users to take one type of address (e.g. P2PKH), mutate it into another address type
+(e.g. P2WPKH), and the wallet would still detect outputs sending to the new address type
+even without that address being requested from the wallet. Descriptor Wallets does not
+allow for this and will only watch for the addresses that were explicitly requested from the wallet.
+
+These changes to `IsMine` will make it easier to reason about what scripts the wallet will
+actually be watching for in outputs. However for the vast majority of users, this change is
+largely transparent and will not have noticeable effect.
+
+#### Imports and Exports
+
+In Legacy Wallets, raw scripts and keys could be imported to the wallet. Those imported scripts
+and keys are treated separately from the keys generated by the wallet. This complicates the `IsMine`
+logic as it has to distinguish between spendable and watchonly.
+
+Descriptor Wallets handle importing scripts and keys differently. Only complete descriptors can be
+imported. These descriptors are then added to the wallet as if it were a descriptor generated by
+the wallet itself. This simplifies the `IsMine` logic so that it no longer has to distinguish
+between spendable and watchonly. As such, the watchonly model for Descriptor Wallets is also
+different and described in more detail in the next section.
+
+To import into a Descriptor Wallet, a new `importdescriptors` RPC has been added that uses a syntax
+similar to that of `importmulti`.
+
+As Legacy Wallets and Descriptor Wallets use different mechanisms for storing and importing scripts and keys
+the existing import RPCs have been disabled for descriptor wallets.
+New export RPCs for Descriptor Wallets have not yet been added.
+
+The following RPCs are disabled for Descriptor Wallets:
+
+* importprivkey
+* importpubkey
+* importaddress
+* importwallet
+* dumpprivkey
+* dumpwallet
+* importmulti
+* addmultisigaddress
+* sethdseed
+
+#### Watchonly Wallets
+
+A Legacy Wallet contains both private keys and scripts that were being watched.
+Those watched scripts would not contribute to your normal balance. In order to see the watchonly
+balance and to use watchonly things in transactions, an `include_watchonly` option was added
+to many RPCs that would allow users to do that. However it is easy to forget to include this option.
+
+Descriptor Wallets move to a per-wallet watchonly model. Instead an entire wallet is considered to be
+watchonly depending on whether it was created with private keys disabled. This eliminates the need
+to distinguish between things that are watchonly and things that are not within a wallet itself.
+
+This change does have a caveat. If a Descriptor Wallet with private keys *enabled* has
+a multiple key descriptor without all of the private keys (e.g. `multi(...)` with only one private key),
+then the wallet will fail to sign and broadcast transactions. Such wallets would need to use the PSBT
+workflow but the typical GUI Send, `sendtoaddress`, etc. workflows would still be available, just
+non-functional.
+
+This issue is worsened if the wallet contains both single key (e.g. `wpkh(...)`) descriptors and such
+multiple key descriptors as some transactions could be signed and broadast and others not. This is
+due to some transactions containing only single key inputs, while others would contain both single
+key and multiple key inputs, depending on which are available and how the coin selection algorithm
+selects inputs. However this is not considered to be a supported use case; multisigs
+should be in their own wallets which do not already have descriptors. Although users cannot export
+descriptors with private keys for now as explained earlier.
+
+#### BIP 44/49/84 Support
+
+The change to using descriptors changes the default derivation paths used by Bitcoin Core
+to adhere to BIP 44/49/84. Descriptors with different derivation paths can be imported without
+issue.
diff --git a/src/Makefile.am b/src/Makefile.am
index 0f562433de..2b004691fd 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -85,6 +85,10 @@ if BUILD_BITCOIND
bin_PROGRAMS += bitcoind
endif
+if BUILD_BITCOIN_NODE
+ bin_PROGRAMS += bitcoin-node
+endif
+
if BUILD_BITCOIN_CLI
bin_PROGRAMS += bitcoin-cli
endif
@@ -223,6 +227,7 @@ BITCOIN_CORE_H = \
util/message.h \
util/moneystr.h \
util/rbf.h \
+ util/ref.h \
util/settings.h \
util/string.h \
util/threadnames.h \
@@ -548,22 +553,21 @@ libbitcoin_cli_a_SOURCES = \
nodist_libbitcoin_util_a_SOURCES = $(srcdir)/obj/build.h
#
-# bitcoind binary #
-bitcoind_SOURCES = bitcoind.cpp
-bitcoind_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-bitcoind_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-bitcoind_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+# bitcoind & bitcoin-node binaries #
+bitcoin_daemon_sources = bitcoind.cpp
+bitcoin_bin_cppflags = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+bitcoin_bin_cxxflags = $(AM_CXXFLAGS) $(PIE_FLAGS)
+bitcoin_bin_ldflags = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
if TARGET_WINDOWS
-bitcoind_SOURCES += bitcoind-res.rc
+bitcoin_daemon_sources += bitcoind-res.rc
endif
-bitcoind_LDADD = \
- $(LIBBITCOIN_SERVER) \
+bitcoin_bin_ldadd = \
$(LIBBITCOIN_WALLET) \
$(LIBBITCOIN_COMMON) \
- $(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
+ $(LIBUNIVALUE) \
$(LIBBITCOIN_ZMQ) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
@@ -572,7 +576,19 @@ bitcoind_LDADD = \
$(LIBMEMENV) \
$(LIBSECP256K1)
-bitcoind_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(ZMQ_LIBS)
+bitcoin_bin_ldadd += $(BOOST_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(ZMQ_LIBS)
+
+bitcoind_SOURCES = $(bitcoin_daemon_sources)
+bitcoind_CPPFLAGS = $(bitcoin_bin_cppflags)
+bitcoind_CXXFLAGS = $(bitcoin_bin_cxxflags)
+bitcoind_LDFLAGS = $(bitcoin_bin_ldflags)
+bitcoind_LDADD = $(LIBBITCOIN_SERVER) $(bitcoin_bin_ldadd)
+
+bitcoin_node_SOURCES = $(bitcoin_daemon_sources)
+bitcoin_node_CPPFLAGS = $(bitcoin_bin_cppflags)
+bitcoin_node_CXXFLAGS = $(bitcoin_bin_cxxflags)
+bitcoin_node_LDFLAGS = $(bitcoin_bin_ldflags)
+bitcoin_node_LDADD = $(LIBBITCOIN_SERVER) $(bitcoin_bin_ldadd)
# bitcoin-cli binary #
bitcoin_cli_SOURCES = bitcoin-cli.cpp
@@ -616,29 +632,14 @@ bitcoin_tx_LDADD += $(BOOST_LIBS)
# bitcoin-wallet binary #
bitcoin_wallet_SOURCES = bitcoin-wallet.cpp
-bitcoin_wallet_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-bitcoin_wallet_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-bitcoin_wallet_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+bitcoin_wallet_CPPFLAGS = $(bitcoin_bin_cppflags)
+bitcoin_wallet_CXXFLAGS = $(bitcoin_bin_cxxflags)
+bitcoin_wallet_LDFLAGS = $(bitcoin_bin_ldflags)
+bitcoin_wallet_LDADD = $(LIBBITCOIN_WALLET_TOOL) $(bitcoin_bin_ldadd)
if TARGET_WINDOWS
bitcoin_wallet_SOURCES += bitcoin-wallet-res.rc
endif
-
-bitcoin_wallet_LDADD = \
- $(LIBBITCOIN_WALLET_TOOL) \
- $(LIBBITCOIN_WALLET) \
- $(LIBBITCOIN_COMMON) \
- $(LIBBITCOIN_CONSENSUS) \
- $(LIBBITCOIN_UTIL) \
- $(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_ZMQ) \
- $(LIBLEVELDB) \
- $(LIBLEVELDB_SSE42) \
- $(LIBMEMENV) \
- $(LIBSECP256K1) \
- $(LIBUNIVALUE)
-
-bitcoin_wallet_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(MINIUPNPC_LIBS) $(ZMQ_LIBS)
#
# bitcoinconsensus library #
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index cf09eee2cb..13bfea7646 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -3,6 +3,11 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
bin_PROGRAMS += qt/bitcoin-qt
+
+if BUILD_BITCOIN_GUI
+ bin_PROGRAMS += bitcoin-gui
+endif
+
EXTRA_LIBRARIES += qt/libbitcoinqt.a
# bitcoin qt core #
@@ -294,29 +299,43 @@ QT_FORMS_H=$(join $(dir $(QT_FORMS_UI)),$(addprefix ui_, $(notdir $(QT_FORMS_UI:
# Most files will depend on the forms and moc files as includes. Generate them
# before anything else.
$(QT_MOC): $(QT_FORMS_H)
-$(qt_libbitcoinqt_a_OBJECTS) $(qt_bitcoin_qt_OBJECTS) : | $(QT_MOC)
+$(qt_libbitcoinqt_a_OBJECTS) $(qt_bitcoin_qt_OBJECTS) $(bitcoin_gui_OBJECTS) : | $(QT_MOC)
-# bitcoin-qt binary #
-qt_bitcoin_qt_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BITCOIN_QT_INCLUDES) \
+# bitcoin-qt and bitcoin-gui binaries #
+bitcoin_qt_cppflags = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BITCOIN_QT_INCLUDES) \
$(QT_INCLUDES) $(QR_CFLAGS)
-qt_bitcoin_qt_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
+bitcoin_qt_cxxflags = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
-qt_bitcoin_qt_SOURCES = qt/main.cpp
+bitcoin_qt_sources = qt/main.cpp
if TARGET_WINDOWS
- qt_bitcoin_qt_SOURCES += $(BITCOIN_RC)
+ bitcoin_qt_sources += $(BITCOIN_RC)
endif
-qt_bitcoin_qt_LDADD = qt/libbitcoinqt.a $(LIBBITCOIN_SERVER)
+bitcoin_qt_ldadd = qt/libbitcoinqt.a $(LIBBITCOIN_SERVER)
if ENABLE_WALLET
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_UTIL) $(LIBBITCOIN_WALLET)
+bitcoin_qt_ldadd += $(LIBBITCOIN_UTIL) $(LIBBITCOIN_WALLET)
endif
if ENABLE_ZMQ
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
+bitcoin_qt_ldadd += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
+bitcoin_qt_ldadd += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
$(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(BDB_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
-qt_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-qt_bitcoin_qt_LIBTOOLFLAGS = $(AM_LIBTOOLFLAGS) --tag CXX
+bitcoin_qt_ldflags = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+bitcoin_qt_libtoolflags = $(AM_LIBTOOLFLAGS) --tag CXX
+
+qt_bitcoin_qt_CPPFLAGS = $(bitcoin_qt_cppflags)
+qt_bitcoin_qt_CXXFLAGS = $(bitcoin_qt_cxxflags)
+qt_bitcoin_qt_SOURCES = $(bitcoin_qt_sources)
+qt_bitcoin_qt_LDADD = $(bitcoin_qt_ldadd)
+qt_bitcoin_qt_LDFLAGS = $(bitcoin_qt_ldflags)
+qt_bitcoin_qt_LIBTOOLFLAGS = $(bitcoin_qt_libtoolflags)
+
+bitcoin_gui_CPPFLAGS = $(bitcoin_qt_cppflags)
+bitcoin_gui_CXXFLAGS = $(bitcoin_qt_cxxflags)
+bitcoin_gui_SOURCES = $(bitcoin_qt_sources)
+bitcoin_gui_LDADD = $(bitcoin_qt_ldadd)
+bitcoin_gui_LDFLAGS = $(bitcoin_qt_ldflags)
+bitcoin_gui_LIBTOOLFLAGS = $(bitcoin_qt_libtoolflags)
#locale/foo.ts -> locale/foo.qm
QT_QM=$(QT_TS:.ts=.qm)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 3a0d4fdc15..2480cdadbb 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -31,6 +31,7 @@ FUZZ_TARGETS = \
test/fuzz/chain \
test/fuzz/checkqueue \
test/fuzz/coins_deserialize \
+ test/fuzz/coins_view \
test/fuzz/cuckoocache \
test/fuzz/decode_tx \
test/fuzz/descriptor_parse \
@@ -229,6 +230,7 @@ BITCOIN_TESTS =\
test/prevector_tests.cpp \
test/raii_event_tests.cpp \
test/random_tests.cpp \
+ test/ref_tests.cpp \
test/reverselock_tests.cpp \
test/rpc_tests.cpp \
test/sanity_tests.cpp \
@@ -465,6 +467,12 @@ test_fuzz_coins_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_coins_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_coins_deserialize_SOURCES = test/fuzz/deserialize.cpp
+test_fuzz_coins_view_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_coins_view_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_coins_view_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_coins_view_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_coins_view_SOURCES = test/fuzz/coins_view.cpp
+
test_fuzz_cuckoocache_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_cuckoocache_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_cuckoocache_LDADD = $(FUZZ_SUITE_LD_COMMON)
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index cdaabd6fab..45a586cd12 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -9,6 +9,7 @@
#include <chainparamsbase.h>
#include <clientversion.h>
+#include <optional.h>
#include <rpc/client.h>
#include <rpc/protocol.h>
#include <rpc/request.h>
@@ -250,7 +251,7 @@ public:
UniValue ProcessReply(const UniValue &batch_in) override
{
UniValue result(UniValue::VOBJ);
- std::vector<UniValue> batch = JSONRPCProcessBatchReply(batch_in, batch_in.size());
+ const std::vector<UniValue> batch = JSONRPCProcessBatchReply(batch_in);
// Errors in getnetworkinfo() and getblockchaininfo() are fatal, pass them on;
// getwalletinfo() and getbalances() are allowed to fail if there is no wallet.
if (!batch[ID_NETWORKINFO]["error"].isNull()) {
@@ -304,7 +305,7 @@ public:
}
};
-static UniValue CallRPC(BaseRequestHandler *rh, const std::string& strMethod, const std::vector<std::string>& args)
+static UniValue CallRPC(BaseRequestHandler* rh, const std::string& strMethod, const std::vector<std::string>& args, const Optional<std::string>& rpcwallet = {})
{
std::string host;
// In preference order, we choose the following for the port:
@@ -369,14 +370,12 @@ static UniValue CallRPC(BaseRequestHandler *rh, const std::string& strMethod, co
// check if we should use a special wallet endpoint
std::string endpoint = "/";
- if (!gArgs.GetArgs("-rpcwallet").empty()) {
- std::string walletName = gArgs.GetArg("-rpcwallet", "");
- char *encodedURI = evhttp_uriencode(walletName.data(), walletName.size(), false);
+ if (rpcwallet) {
+ char* encodedURI = evhttp_uriencode(rpcwallet->data(), rpcwallet->size(), false);
if (encodedURI) {
- endpoint = "/wallet/"+ std::string(encodedURI);
+ endpoint = "/wallet/" + std::string(encodedURI);
free(encodedURI);
- }
- else {
+ } else {
throw CConnectionFailed("uri-encode failed");
}
}
@@ -418,6 +417,65 @@ static UniValue CallRPC(BaseRequestHandler *rh, const std::string& strMethod, co
return reply;
}
+/**
+ * ConnectAndCallRPC wraps CallRPC with -rpcwait and an exception handler.
+ *
+ * @param[in] rh Pointer to RequestHandler.
+ * @param[in] strMethod Reference to const string method to forward to CallRPC.
+ * @param[in] rpcwallet Reference to const optional string wallet name to forward to CallRPC.
+ * @returns the RPC response as a UniValue object.
+ * @throws a CConnectionFailed std::runtime_error if connection failed or RPC server still in warmup.
+ */
+static UniValue ConnectAndCallRPC(BaseRequestHandler* rh, const std::string& strMethod, const std::vector<std::string>& args, const Optional<std::string>& rpcwallet = {})
+{
+ UniValue response(UniValue::VOBJ);
+ // Execute and handle connection failures with -rpcwait.
+ const bool fWait = gArgs.GetBoolArg("-rpcwait", false);
+ do {
+ try {
+ response = CallRPC(rh, strMethod, args, rpcwallet);
+ if (fWait) {
+ const UniValue& error = find_value(response, "error");
+ if (!error.isNull() && error["code"].get_int() == RPC_IN_WARMUP) {
+ throw CConnectionFailed("server in warmup");
+ }
+ }
+ break; // Connection succeeded, no need to retry.
+ } catch (const CConnectionFailed&) {
+ if (fWait) {
+ UninterruptibleSleep(std::chrono::milliseconds{1000});
+ } else {
+ throw;
+ }
+ }
+ } while (fWait);
+ return response;
+}
+
+/**
+ * GetWalletBalances calls listwallets; if more than one wallet is loaded, it then
+ * fetches mine.trusted balances for each loaded wallet and pushes them to `result`.
+ *
+ * @param result Reference to UniValue object the wallet names and balances are pushed to.
+ */
+static void GetWalletBalances(UniValue& result)
+{
+ std::unique_ptr<BaseRequestHandler> rh{MakeUnique<DefaultRequestHandler>()};
+ const UniValue listwallets = ConnectAndCallRPC(rh.get(), "listwallets", /* args=*/{});
+ if (!find_value(listwallets, "error").isNull()) return;
+ const UniValue& wallets = find_value(listwallets, "result");
+ if (wallets.size() <= 1) return;
+
+ UniValue balances(UniValue::VOBJ);
+ for (const UniValue& wallet : wallets.getValues()) {
+ const std::string wallet_name = wallet.get_str();
+ const UniValue getbalances = ConnectAndCallRPC(rh.get(), "getbalances", /* args=*/{}, wallet_name);
+ const UniValue& balance = find_value(getbalances, "result")["mine"]["trusted"];
+ balances.pushKV(wallet_name, balance);
+ }
+ result.pushKV("balances", balances);
+}
+
static int CommandLineRPC(int argc, char *argv[])
{
std::string strPrint;
@@ -474,9 +532,8 @@ static int CommandLineRPC(int argc, char *argv[])
}
std::unique_ptr<BaseRequestHandler> rh;
std::string method;
- if (gArgs.GetBoolArg("-getinfo", false)) {
+ if (gArgs.IsArgSet("-getinfo")) {
rh.reset(new GetinfoRequestHandler());
- method = "";
} else {
rh.reset(new DefaultRequestHandler());
if (args.size() < 1) {
@@ -485,62 +542,46 @@ static int CommandLineRPC(int argc, char *argv[])
method = args[0];
args.erase(args.begin()); // Remove trailing method name from arguments vector
}
-
- // Execute and handle connection failures with -rpcwait
- const bool fWait = gArgs.GetBoolArg("-rpcwait", false);
- do {
- try {
- const UniValue reply = CallRPC(rh.get(), method, args);
-
- // Parse reply
- const UniValue& result = find_value(reply, "result");
- const UniValue& error = find_value(reply, "error");
-
- if (!error.isNull()) {
- // Error
- int code = error["code"].get_int();
- if (fWait && code == RPC_IN_WARMUP)
- throw CConnectionFailed("server in warmup");
- strPrint = "error: " + error.write();
- nRet = abs(code);
- if (error.isObject())
- {
- UniValue errCode = find_value(error, "code");
- UniValue errMsg = find_value(error, "message");
- strPrint = errCode.isNull() ? "" : "error code: "+errCode.getValStr()+"\n";
-
- if (errMsg.isStr())
- strPrint += "error message:\n"+errMsg.get_str();
-
- if (errCode.isNum() && errCode.get_int() == RPC_WALLET_NOT_SPECIFIED) {
- strPrint += "\nTry adding \"-rpcwallet=<filename>\" option to bitcoin-cli command line.";
- }
- }
- } else {
- // Result
- if (result.isNull())
- strPrint = "";
- else if (result.isStr())
- strPrint = result.get_str();
- else
- strPrint = result.write(2);
+ Optional<std::string> wallet_name{};
+ if (gArgs.IsArgSet("-rpcwallet")) wallet_name = gArgs.GetArg("-rpcwallet", "");
+ const UniValue reply = ConnectAndCallRPC(rh.get(), method, args, wallet_name);
+
+ // Parse reply
+ UniValue result = find_value(reply, "result");
+ const UniValue& error = find_value(reply, "error");
+ if (!error.isNull()) {
+ // Error
+ strPrint = "error: " + error.write();
+ nRet = abs(error["code"].get_int());
+ if (error.isObject()) {
+ const UniValue& errCode = find_value(error, "code");
+ const UniValue& errMsg = find_value(error, "message");
+ strPrint = errCode.isNull() ? "" : ("error code: " + errCode.getValStr() + "\n");
+
+ if (errMsg.isStr()) {
+ strPrint += ("error message:\n" + errMsg.get_str());
+ }
+ if (errCode.isNum() && errCode.get_int() == RPC_WALLET_NOT_SPECIFIED) {
+ strPrint += "\nTry adding \"-rpcwallet=<filename>\" option to bitcoin-cli command line.";
}
- // Connection succeeded, no need to retry.
- break;
}
- catch (const CConnectionFailed&) {
- if (fWait)
- UninterruptibleSleep(std::chrono::milliseconds{1000});
- else
- throw;
+ } else {
+ if (gArgs.IsArgSet("-getinfo") && !gArgs.IsArgSet("-rpcwallet")) {
+ GetWalletBalances(result); // fetch multiwallet balances and append to result
}
- } while (fWait);
- }
- catch (const std::exception& e) {
+ // Result
+ if (result.isNull()) {
+ strPrint = "";
+ } else if (result.isStr()) {
+ strPrint = result.get_str();
+ } else {
+ strPrint = result.write(2);
+ }
+ }
+ } catch (const std::exception& e) {
strPrint = std::string("error: ") + e.what();
nRet = EXIT_FAILURE;
- }
- catch (...) {
+ } catch (...) {
PrintExceptionContinue(nullptr, "CommandLineRPC()");
throw;
}
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 43d3f3c5ac..b8e8717896 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -16,6 +16,7 @@
#include <noui.h>
#include <shutdown.h>
#include <ui_interface.h>
+#include <util/ref.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <util/threadnames.h>
@@ -77,6 +78,7 @@ static bool AppInit(int argc, char* argv[])
return true;
}
+ util::Ref context{node};
try
{
if (!CheckDataDirOption()) {
@@ -145,7 +147,7 @@ static bool AppInit(int argc, char* argv[])
// If locking the data directory failed, exit immediately
return false;
}
- fRet = AppInitMain(node);
+ fRet = AppInitMain(context, node);
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInit()");
diff --git a/src/compat/assumptions.h b/src/compat/assumptions.h
index 6e7b4d3ded..4b0b224c69 100644
--- a/src/compat/assumptions.h
+++ b/src/compat/assumptions.h
@@ -50,6 +50,7 @@ static_assert(sizeof(double) == 8, "64-bit double assumed");
// code.
static_assert(sizeof(short) == 2, "16-bit short assumed");
static_assert(sizeof(int) == 4, "32-bit int assumed");
+static_assert(sizeof(unsigned) == 4, "32-bit unsigned assumed");
// Assumption: We assume size_t to be 32-bit or 64-bit.
// Example(s): size_t assumed to be at least 32-bit in ecdsa_signature_parse_der_lax(...).
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 95e13998aa..f1b9997371 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -150,7 +150,7 @@ static bool RPCAuthorized(const std::string& strAuth, std::string& strAuthUserna
return multiUserAuthorized(strUserPass);
}
-static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &)
+static bool HTTPReq_JSONRPC(const util::Ref& context, HTTPRequest* req)
{
// JSONRPC handles only POST
if (req->GetRequestMethod() != HTTPRequest::POST) {
@@ -165,7 +165,7 @@ static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &)
return false;
}
- JSONRPCRequest jreq;
+ JSONRPCRequest jreq(context);
jreq.peerAddr = req->GetPeer().ToString();
if (!RPCAuthorized(authHeader.second, jreq.authUser)) {
LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr);
@@ -284,15 +284,16 @@ static bool InitRPCAuthentication()
return true;
}
-bool StartHTTPRPC()
+bool StartHTTPRPC(const util::Ref& context)
{
LogPrint(BCLog::RPC, "Starting HTTP RPC server\n");
if (!InitRPCAuthentication())
return false;
- RegisterHTTPHandler("/", true, HTTPReq_JSONRPC);
+ auto handle_rpc = [&context](HTTPRequest* req, const std::string&) { return HTTPReq_JSONRPC(context, req); };
+ RegisterHTTPHandler("/", true, handle_rpc);
if (g_wallet_init_interface.HasWalletSupport()) {
- RegisterHTTPHandler("/wallet/", false, HTTPReq_JSONRPC);
+ RegisterHTTPHandler("/wallet/", false, handle_rpc);
}
struct event_base* eventBase = EventBase();
assert(eventBase);
diff --git a/src/httprpc.h b/src/httprpc.h
index 99e4d59b8a..a6a38fc95a 100644
--- a/src/httprpc.h
+++ b/src/httprpc.h
@@ -5,11 +5,14 @@
#ifndef BITCOIN_HTTPRPC_H
#define BITCOIN_HTTPRPC_H
+namespace util {
+class Ref;
+} // namespace util
/** Start HTTP RPC subsystem.
* Precondition; HTTP and RPC has been started.
*/
-bool StartHTTPRPC();
+bool StartHTTPRPC(const util::Ref& context);
/** Interrupt HTTP RPC subsystem.
*/
void InterruptHTTPRPC();
@@ -21,7 +24,7 @@ void StopHTTPRPC();
/** Start HTTP REST subsystem.
* Precondition; HTTP and RPC has been started.
*/
-void StartREST();
+void StartREST(const util::Ref& context);
/** Interrupt RPC REST subsystem.
*/
void InterruptREST();
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index f2c3d66ebd..65a5f03a8e 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -31,6 +31,12 @@ constexpr char DB_FILTER_POS = 'P';
constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB
/** The pre-allocation chunk size for fltr?????.dat files */
constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000; // 1 MiB
+/** Maximum size of the cfheaders cache
+ * We have a limit to prevent a bug in filling this cache
+ * potentially turning into an OOM. At 2000 entries, this cache
+ * is big enough for a 2,000,000 length block chain, which
+ * we should be enough until ~2047. */
+constexpr size_t CF_HEADERS_CACHE_MAX_SZ{2000};
namespace {
@@ -377,13 +383,32 @@ bool BlockFilterIndex::LookupFilter(const CBlockIndex* block_index, BlockFilter&
return ReadFilterFromDisk(entry.pos, filter_out);
}
-bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) const
+bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out)
{
+ LOCK(m_cs_headers_cache);
+
+ bool is_checkpoint{block_index->nHeight % CFCHECKPT_INTERVAL == 0};
+
+ if (is_checkpoint) {
+ // Try to find the block in the headers cache if this is a checkpoint height.
+ auto header = m_headers_cache.find(block_index->GetBlockHash());
+ if (header != m_headers_cache.end()) {
+ header_out = header->second;
+ return true;
+ }
+ }
+
DBVal entry;
if (!LookupOne(*m_db, block_index, entry)) {
return false;
}
+ if (is_checkpoint &&
+ m_headers_cache.size() < CF_HEADERS_CACHE_MAX_SZ) {
+ // Add to the headers cache if this is a checkpoint height.
+ m_headers_cache.emplace(block_index->GetBlockHash(), entry.header);
+ }
+
header_out = entry.header;
return true;
}
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index 436d52515f..317f8c0e40 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -10,6 +10,14 @@
#include <flatfile.h>
#include <index/base.h>
+/** Interval between compact filter checkpoints. See BIP 157. */
+static constexpr int CFCHECKPT_INTERVAL = 1000;
+
+struct FilterHeaderHasher
+{
+ size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
+};
+
/**
* BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of
* blocks by height. An index is constructed for each supported filter type with its own database
@@ -30,6 +38,10 @@ private:
bool ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& filter) const;
size_t WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& filter);
+ Mutex m_cs_headers_cache;
+ /** cache of block hash to filter header, to avoid disk access when responding to getcfcheckpt. */
+ std::unordered_map<uint256, uint256, FilterHeaderHasher> m_headers_cache GUARDED_BY(m_cs_headers_cache);
+
protected:
bool Init() override;
@@ -54,7 +66,7 @@ public:
bool LookupFilter(const CBlockIndex* block_index, BlockFilter& filter_out) const;
/** Get a single filter header by block. */
- bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) const;
+ bool LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out);
/** Get a range of filters between two heights on a chain. */
bool LookupFilterRange(int start_height, const CBlockIndex* stop_index,
diff --git a/src/init.cpp b/src/init.cpp
index 341c37cc5a..37e6251295 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -244,9 +244,9 @@ void Shutdown(NodeContext& node)
}
// FlushStateToDisk generates a ChainStateFlushed callback, which we should avoid missing
- {
+ if (node.chainman) {
LOCK(cs_main);
- for (CChainState* chainstate : g_chainman.GetAll()) {
+ for (CChainState* chainstate : node.chainman->GetAll()) {
if (chainstate->CanFlushToDisk()) {
chainstate->ForceFlushStateToDisk();
}
@@ -271,9 +271,9 @@ void Shutdown(NodeContext& node)
// up with our current chain to avoid any strange pruning edge cases and make
// next startup faster by avoiding rescan.
- {
+ if (node.chainman) {
LOCK(cs_main);
- for (CChainState* chainstate : g_chainman.GetAll()) {
+ for (CChainState* chainstate : node.chainman->GetAll()) {
if (chainstate->CanFlushToDisk()) {
chainstate->ForceFlushStateToDisk();
chainstate->ResetCoinsViews();
@@ -299,7 +299,8 @@ void Shutdown(NodeContext& node)
globalVerifyHandle.reset();
ECC_Stop();
node.args = nullptr;
- if (node.mempool) node.mempool = nullptr;
+ node.mempool = nullptr;
+ node.chainman = nullptr;
node.scheduler.reset();
try {
@@ -689,7 +690,7 @@ static void CleanupBlockRevFiles()
}
}
-static void ThreadImport(std::vector<fs::path> vImportFiles)
+static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
{
const CChainParams& chainparams = Params();
util::ThreadRename("loadblk");
@@ -741,9 +742,9 @@ static void ThreadImport(std::vector<fs::path> vImportFiles)
// scan for better chains in the block chain database, that are not yet connected in the active best chain
// We can't hold cs_main during ActivateBestChain even though we're accessing
- // the g_chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
+ // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
// the relevant pointers before the ABC call.
- for (CChainState* chainstate : WITH_LOCK(::cs_main, return g_chainman.GetAll())) {
+ for (CChainState* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
BlockValidationState state;
if (!chainstate->ActivateBestChain(state, chainparams, nullptr)) {
LogPrintf("Failed to connect best block (%s)\n", state.ToString());
@@ -784,16 +785,16 @@ static bool InitSanityCheck()
return true;
}
-static bool AppInitServers()
+static bool AppInitServers(const util::Ref& context)
{
RPCServer::OnStarted(&OnRPCStarted);
RPCServer::OnStopped(&OnRPCStopped);
if (!InitHTTPServer())
return false;
StartRPC();
- if (!StartHTTPRPC())
+ if (!StartHTTPRPC(context))
return false;
- if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) StartREST();
+ if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) StartREST(context);
StartHTTPServer();
return true;
}
@@ -1238,7 +1239,7 @@ bool AppInitLockDataDirectory()
return true;
}
-bool AppInitMain(NodeContext& node)
+bool AppInitMain(const util::Ref& context, NodeContext& node)
{
const CChainParams& chainparams = Params();
// ********************************************************* Step 4a: application initialization
@@ -1340,7 +1341,6 @@ bool AppInitMain(NodeContext& node)
for (const auto& client : node.chain_clients) {
client->registerRpcs();
}
- g_rpc_node = &node;
#if ENABLE_ZMQ
RegisterZMQRPCCommands(tableRPC);
#endif
@@ -1353,7 +1353,7 @@ bool AppInitMain(NodeContext& node)
if (gArgs.GetBoolArg("-server", false))
{
uiInterface.InitMessage_connect(SetRPCWarmupStatus);
- if (!AppInitServers())
+ if (!AppInitServers(context))
return InitError(_("Unable to start HTTP server. See debug log for details."));
}
@@ -1378,8 +1378,11 @@ bool AppInitMain(NodeContext& node)
// which are all started after this, may use it from the node context.
assert(!node.mempool);
node.mempool = &::mempool;
+ assert(!node.chainman);
+ node.chainman = &g_chainman;
+ ChainstateManager& chainman = EnsureChainman(node);
- node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, *node.mempool));
+ node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, *node.chainman, *node.mempool));
RegisterValidationInterface(node.peer_logic.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1558,7 +1561,7 @@ bool AppInitMain(NodeContext& node)
const int64_t load_block_index_start_time = GetTimeMillis();
try {
LOCK(cs_main);
- g_chainman.InitializeChainstate();
+ chainman.InitializeChainstate();
UnloadBlockIndex();
// new CBlockTreeDB tries to delete the existing file, which
@@ -1579,7 +1582,7 @@ bool AppInitMain(NodeContext& node)
// block file from disk.
// Note that it also sets fReindex based on the disk flag!
// From here on out fReindex and fReset mean something different!
- if (!LoadBlockIndex(chainparams)) {
+ if (!chainman.LoadBlockIndex(chainparams)) {
if (ShutdownRequested()) break;
strLoadError = _("Error loading block database");
break;
@@ -1613,7 +1616,7 @@ bool AppInitMain(NodeContext& node)
bool failed_chainstate_init = false;
- for (CChainState* chainstate : g_chainman.GetAll()) {
+ for (CChainState* chainstate : chainman.GetAll()) {
LogPrintf("Initializing chainstate %s\n", chainstate->ToString());
chainstate->InitCoinsDB(
/* cache_size_bytes */ nCoinDBCache,
@@ -1668,7 +1671,7 @@ bool AppInitMain(NodeContext& node)
bool failed_rewind{false};
// Can't hold cs_main while calling RewindBlockIndex, so retrieve the relevant
// chainstates beforehand.
- for (CChainState* chainstate : WITH_LOCK(::cs_main, return g_chainman.GetAll())) {
+ for (CChainState* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
if (!fReset) {
// Note that RewindBlockIndex MUST run even if we're about to -reindex-chainstate.
// It both disconnects blocks based on the chainstate, and drops block data in
@@ -1693,7 +1696,7 @@ bool AppInitMain(NodeContext& node)
try {
LOCK(cs_main);
- for (CChainState* chainstate : g_chainman.GetAll()) {
+ for (CChainState* chainstate : chainman.GetAll()) {
if (!is_coinsview_empty(chainstate)) {
uiInterface.InitMessage(_("Verifying blocks...").translated);
if (fHavePruned && gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
@@ -1799,7 +1802,7 @@ bool AppInitMain(NodeContext& node)
nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
if (!fReindex) {
LOCK(cs_main);
- for (CChainState* chainstate : g_chainman.GetAll()) {
+ for (CChainState* chainstate : chainman.GetAll()) {
uiInterface.InitMessage(_("Pruning blockstore...").translated);
chainstate->PruneAndFlush();
}
@@ -1842,7 +1845,7 @@ bool AppInitMain(NodeContext& node)
vImportFiles.push_back(strFile);
}
- threadGroup.create_thread(std::bind(&ThreadImport, vImportFiles));
+ threadGroup.create_thread([=, &chainman] { ThreadImport(chainman, vImportFiles); });
// Wait for genesis block to be processed
{
diff --git a/src/init.h b/src/init.h
index ef568b6f38..33fe96e8ea 100644
--- a/src/init.h
+++ b/src/init.h
@@ -14,6 +14,9 @@ struct NodeContext;
namespace boost {
class thread_group;
} // namespace boost
+namespace util {
+class Ref;
+} // namespace util
/** Interrupt threads */
void Interrupt(NodeContext& node);
@@ -51,7 +54,7 @@ bool AppInitLockDataDirectory();
* @note This should only be done after daemonization. Call Shutdown() if this function fails.
* @pre Parameters should be parsed and config file should be read, AppInitLockDataDirectory should have been called.
*/
-bool AppInitMain(NodeContext& node);
+bool AppInitMain(const util::Ref& context, NodeContext& node);
/**
* Register all arguments with the ArgsManager
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index d619d2d67d..3c94e44b53 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -27,6 +27,7 @@
#include <sync.h>
#include <txmempool.h>
#include <ui_interface.h>
+#include <util/ref.h>
#include <util/system.h>
#include <util/translation.h>
#include <validation.h>
@@ -80,7 +81,7 @@ public:
bool appInitMain() override
{
m_context.chain = MakeChain(m_context);
- return AppInitMain(m_context);
+ return AppInitMain(m_context_ref, m_context);
}
void appShutdown() override
{
@@ -225,7 +226,7 @@ public:
CFeeRate getDustRelayFee() override { return ::dustRelayFee; }
UniValue executeRpc(const std::string& command, const UniValue& params, const std::string& uri) override
{
- JSONRPCRequest req;
+ JSONRPCRequest req(m_context_ref);
req.params = params;
req.strMethod = command;
req.URI = uri;
@@ -323,6 +324,7 @@ public:
}
NodeContext* context() override { return &m_context; }
NodeContext m_context;
+ util::Ref m_context_ref{m_context};
};
} // namespace
diff --git a/src/logging.cpp b/src/logging.cpp
index eb9da06d9b..56c44ae1ea 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -22,8 +22,8 @@ BCLog::Logger& LogInstance()
* access the logger. When the shutdown sequence is fully audited and tested,
* explicit destruction of these objects can be implemented by changing this
* from a raw pointer to a std::unique_ptr.
- * Since the destructor is never called, the logger and all its members must
- * have a trivial destructor.
+ * Since the ~Logger() destructor is never called, the Logger class and all
+ * its subclasses must have implicitly-defined destructors.
*
* This method of initialization was originally introduced in
* ee3374234c60aba2cc4c5cd5cac1c0aefc2d817c.
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 6d85b46831..159036a237 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -129,8 +129,8 @@ static constexpr unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_
static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
/** Maximum feefilter broadcast delay after significant change. */
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
-/** Interval between compact filter checkpoints. See BIP 157. */
-static constexpr int CFCHECKPT_INTERVAL = 1000;
+/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
+static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
@@ -819,7 +819,12 @@ void PeerLogicValidation::ReattemptInitialBroadcast(CScheduler& scheduler) const
std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
for (const uint256& txid : unbroadcast_txids) {
- RelayTransaction(txid, *connman);
+ // Sanity check: all unbroadcast txns should exist in the mempool
+ if (m_mempool.exists(txid)) {
+ RelayTransaction(txid, *connman);
+ } else {
+ m_mempool.RemoveUnbroadcastTx(txid, true);
+ }
}
// schedule next run for 10-15 minutes in the future
@@ -1150,9 +1155,10 @@ static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Para
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
}
-PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler& scheduler, CTxMemPool& pool)
+PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
: connman(connmanIn),
m_banman(banman),
+ m_chainman(chainman),
m_mempool(pool),
m_stale_tip_check_time(0)
{
@@ -1735,7 +1741,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
}
-bool static ProcessHeadersMessage(CNode* pfrom, CConnman* connman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
+bool static ProcessHeadersMessage(CNode* pfrom, CConnman* connman, ChainstateManager& chainman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
{
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
size_t nCount = headers.size();
@@ -1795,7 +1801,7 @@ bool static ProcessHeadersMessage(CNode* pfrom, CConnman* connman, CTxMemPool& m
}
BlockValidationState state;
- if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
+ if (!chainman.ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom->GetId(), state, via_compact_block, "invalid header received");
return false;
@@ -1985,16 +1991,18 @@ void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uin
* @param[in] pfrom The peer that we received the request from
* @param[in] chain_params Chain parameters
* @param[in] filter_type The filter type the request is for. Must be basic filters.
+ * @param[in] start_height The start height for the request
* @param[in] stop_hash The stop_hash for the request
+ * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157
* @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced.
* @param[out] filter_index The filter index, if the request can be serviced.
* @return True if the request can be serviced.
*/
static bool PrepareBlockFilterRequest(CNode* pfrom, const CChainParams& chain_params,
- BlockFilterType filter_type,
- const uint256& stop_hash,
+ BlockFilterType filter_type, uint32_t start_height,
+ const uint256& stop_hash, uint32_t max_height_diff,
const CBlockIndex*& stop_index,
- const BlockFilterIndex*& filter_index)
+ BlockFilterIndex*& filter_index)
{
const bool supported_filter_type =
(filter_type == BlockFilterType::BASIC &&
@@ -2019,6 +2027,21 @@ static bool PrepareBlockFilterRequest(CNode* pfrom, const CChainParams& chain_pa
}
}
+ uint32_t stop_height = stop_index->nHeight;
+ if (start_height > stop_height) {
+ LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
+ "start height %d and stop height %d\n",
+ pfrom->GetId(), start_height, stop_height);
+ pfrom->fDisconnect = true;
+ return false;
+ }
+ if (stop_height - start_height >= max_height_diff) {
+ LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
+ pfrom->GetId(), stop_height - start_height + 1, max_height_diff);
+ pfrom->fDisconnect = true;
+ return false;
+ }
+
filter_index = GetBlockFilterIndex(filter_type);
if (!filter_index) {
LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
@@ -2029,6 +2052,61 @@ static bool PrepareBlockFilterRequest(CNode* pfrom, const CChainParams& chain_pa
}
/**
+ * Handle a cfheaders request.
+ *
+ * May disconnect from the peer in the case of a bad request.
+ *
+ * @param[in] pfrom The peer that we received the request from
+ * @param[in] vRecv The raw message received
+ * @param[in] chain_params Chain parameters
+ * @param[in] connman Pointer to the connection manager
+ */
+static void ProcessGetCFHeaders(CNode* pfrom, CDataStream& vRecv, const CChainParams& chain_params,
+ CConnman* connman)
+{
+ uint8_t filter_type_ser;
+ uint32_t start_height;
+ uint256 stop_hash;
+
+ vRecv >> filter_type_ser >> start_height >> stop_hash;
+
+ const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
+
+ const CBlockIndex* stop_index;
+ BlockFilterIndex* filter_index;
+ if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash,
+ MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
+ return;
+ }
+
+ uint256 prev_header;
+ if (start_height > 0) {
+ const CBlockIndex* const prev_block =
+ stop_index->GetAncestor(static_cast<int>(start_height - 1));
+ if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
+ LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
+ BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
+ return;
+ }
+ }
+
+ std::vector<uint256> filter_hashes;
+ if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
+ LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
+ BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
+ return;
+ }
+
+ CSerializedNetMsg msg = CNetMsgMaker(pfrom->GetSendVersion())
+ .Make(NetMsgType::CFHEADERS,
+ filter_type_ser,
+ stop_index->GetBlockHash(),
+ prev_header,
+ filter_hashes);
+ connman->PushMessage(pfrom, std::move(msg));
+}
+
+/**
* Handle a getcfcheckpt request.
*
* May disconnect from the peer in the case of a bad request.
@@ -2049,8 +2127,9 @@ static void ProcessGetCFCheckPt(CNode* pfrom, CDataStream& vRecv, const CChainPa
const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
const CBlockIndex* stop_index;
- const BlockFilterIndex* filter_index;
- if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, stop_hash,
+ BlockFilterIndex* filter_index;
+ if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, /*start_height=*/0, stop_hash,
+ /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
stop_index, filter_index)) {
return;
}
@@ -2078,7 +2157,7 @@ static void ProcessGetCFCheckPt(CNode* pfrom, CDataStream& vRecv, const CChainPa
connman->PushMessage(pfrom, std::move(msg));
}
-bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
+bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, ChainstateManager& chainman, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom->GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
@@ -2675,8 +2754,8 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
if (msg_type == NetMsgType::TX) {
// Stop processing the transaction early if
- // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
- // or if this peer is supposed to be a block-relay-only peer
+ // 1) We are in blocks only mode and peer has no relay permission
+ // 2) This peer is a block-relay-only peer
if ((!g_relay_txes && !pfrom->HasPermission(PF_RELAY)) || (pfrom->m_tx_relay == nullptr))
{
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
@@ -2845,7 +2924,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
const CBlockIndex *pindex = nullptr;
BlockValidationState state;
- if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
+ if (!chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom->GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
return true;
@@ -2989,7 +3068,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
} // cs_main
if (fProcessBLOCKTXN)
- return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, mempool, connman, banman, interruptMsgProc);
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, chainman, mempool, connman, banman, interruptMsgProc);
if (fRevertToHeaderProcessing) {
// Headers received from HB compact block peers are permitted to be
@@ -2997,7 +3076,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
// the peer if the header turns out to be for an invalid block.
// Note that if a peer tries to build on an invalid chain, that
// will be detected and the peer will be banned.
- return ProcessHeadersMessage(pfrom, connman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
+ return ProcessHeadersMessage(pfrom, connman, chainman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
}
if (fBlockReconstructed) {
@@ -3017,7 +3096,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
// we have a chain with at least nMinimumChainWork), and we ignore
// compact blocks with less work than our tip, it is safe to treat
// reconstructed compact blocks as having been requested.
- ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
+ chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
@@ -3107,7 +3186,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
// disk-space attacks), but this should be safe due to the
// protections in the compact block handler -- see related comment
// in compact block optimistic reconstruction handling.
- ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
+ chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
@@ -3141,7 +3220,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
- return ProcessHeadersMessage(pfrom, connman, mempool, headers, chainparams, /*via_compact_block=*/false);
+ return ProcessHeadersMessage(pfrom, connman, chainman, mempool, headers, chainparams, /*via_compact_block=*/false);
}
if (msg_type == NetMsgType::BLOCK)
@@ -3170,7 +3249,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
}
bool fNewBlock = false;
- ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
+ chainman.ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
@@ -3387,6 +3466,11 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
return true;
}
+ if (msg_type == NetMsgType::GETCFHEADERS) {
+ ProcessGetCFHeaders(pfrom, vRecv, chainparams, connman);
+ return true;
+ }
+
if (msg_type == NetMsgType::GETCFCHECKPT) {
ProcessGetCFCheckPt(pfrom, vRecv, chainparams, connman);
return true;
@@ -3531,7 +3615,7 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
bool fRet = false;
try
{
- fRet = ProcessMessage(pfrom, msg_type, vRecv, msg.m_time, chainparams, m_mempool, connman, m_banman, interruptMsgProc);
+ fRet = ProcessMessage(pfrom, msg_type, vRecv, msg.m_time, chainparams, m_chainman, m_mempool, connman, m_banman, interruptMsgProc);
if (interruptMsgProc)
return false;
if (!pfrom->vRecvGetData.empty())
diff --git a/src/net_processing.h b/src/net_processing.h
index 4033c85d07..ec758c7537 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -12,6 +12,7 @@
#include <validationinterface.h>
class CTxMemPool;
+class ChainstateManager;
extern RecursiveMutex cs_main;
extern RecursiveMutex g_cs_orphans;
@@ -27,12 +28,13 @@ class PeerLogicValidation final : public CValidationInterface, public NetEventsI
private:
CConnman* const connman;
BanMan* const m_banman;
+ ChainstateManager& m_chainman;
CTxMemPool& m_mempool;
bool CheckIfBanned(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
public:
- PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler, CTxMemPool& pool);
+ PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool);
/**
* Overridden from CValidationInterface.
diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp
index ec52a08ace..e3c4c828b6 100644
--- a/src/node/coinstats.cpp
+++ b/src/node/coinstats.cpp
@@ -33,7 +33,7 @@ static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash,
}
//! Calculate statistics about the unspent transaction output set
-bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats)
+bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void()>& interruption_point)
{
stats = CCoinsStats();
std::unique_ptr<CCoinsViewCursor> pcursor(view->Cursor());
@@ -49,6 +49,7 @@ bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats)
uint256 prevkey;
std::map<uint32_t, Coin> outputs;
while (pcursor->Valid()) {
+ interruption_point();
COutPoint key;
Coin coin;
if (pcursor->GetKey(key) && pcursor->GetValue(coin)) {
diff --git a/src/node/coinstats.h b/src/node/coinstats.h
index a19af0fd1b..d9cdaa3036 100644
--- a/src/node/coinstats.h
+++ b/src/node/coinstats.h
@@ -10,6 +10,7 @@
#include <uint256.h>
#include <cstdint>
+#include <functional>
class CCoinsView;
@@ -29,6 +30,6 @@ struct CCoinsStats
};
//! Calculate statistics about the unspent transaction output set
-bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats);
+bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, const std::function<void()>& interruption_point = {});
#endif // BITCOIN_NODE_COINSTATS_H
diff --git a/src/node/context.h b/src/node/context.h
index 566ff170be..c45d9e6689 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -5,6 +5,7 @@
#ifndef BITCOIN_NODE_CONTEXT_H
#define BITCOIN_NODE_CONTEXT_H
+#include <cassert>
#include <memory>
#include <vector>
@@ -13,6 +14,7 @@ class BanMan;
class CConnman;
class CScheduler;
class CTxMemPool;
+class ChainstateManager;
class PeerLogicValidation;
namespace interfaces {
class Chain;
@@ -33,6 +35,7 @@ struct NodeContext {
std::unique_ptr<CConnman> connman;
CTxMemPool* mempool{nullptr}; // Currently a raw pointer because the memory is not managed by this struct
std::unique_ptr<PeerLogicValidation> peer_logic;
+ ChainstateManager* chainman{nullptr}; // Currently a raw pointer because the memory is not managed by this struct
std::unique_ptr<BanMan> banman;
ArgsManager* args{nullptr}; // Currently a raw pointer because the memory is not managed by this struct
std::unique_ptr<interfaces::Chain> chain;
@@ -46,4 +49,10 @@ struct NodeContext {
~NodeContext();
};
+inline ChainstateManager& EnsureChainman(const NodeContext& node)
+{
+ assert(node.chainman);
+ return *node.chainman;
+}
+
#endif // BITCOIN_NODE_CONTEXT_H
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 25851e786c..243111c449 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -40,6 +40,8 @@ const char *SENDCMPCT="sendcmpct";
const char *CMPCTBLOCK="cmpctblock";
const char *GETBLOCKTXN="getblocktxn";
const char *BLOCKTXN="blocktxn";
+const char *GETCFHEADERS="getcfheaders";
+const char *CFHEADERS="cfheaders";
const char *GETCFCHECKPT="getcfcheckpt";
const char *CFCHECKPT="cfcheckpt";
} // namespace NetMsgType
@@ -73,6 +75,8 @@ const static std::string allNetMessageTypes[] = {
NetMsgType::CMPCTBLOCK,
NetMsgType::GETBLOCKTXN,
NetMsgType::BLOCKTXN,
+ NetMsgType::GETCFHEADERS,
+ NetMsgType::CFHEADERS,
NetMsgType::GETCFCHECKPT,
NetMsgType::CFCHECKPT,
};
@@ -147,24 +151,6 @@ void SetServiceFlagsIBDCache(bool state) {
g_initial_block_download_completed = state;
}
-
-CAddress::CAddress() : CService()
-{
- Init();
-}
-
-CAddress::CAddress(CService ipIn, ServiceFlags nServicesIn) : CService(ipIn)
-{
- Init();
- nServices = nServicesIn;
-}
-
-void CAddress::Init()
-{
- nServices = NODE_NONE;
- nTime = 100000000;
-}
-
CInv::CInv()
{
type = 0;
diff --git a/src/protocol.h b/src/protocol.h
index 0bf9f1d7b5..9527dce960 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -226,6 +226,19 @@ extern const char* GETBLOCKTXN;
*/
extern const char* BLOCKTXN;
/**
+ * getcfheaders requests a compact filter header and the filter hashes for a
+ * range of blocks, which can then be used to reconstruct the filter headers
+ * for those blocks.
+ * Only available with service bit NODE_COMPACT_FILTERS as described by
+ * BIP 157 & 158.
+ */
+extern const char* GETCFHEADERS;
+/**
+ * cfheaders is a response to a getcfheaders request containing a filter header
+ * and a vector of filter hashes for each subsequent block in the requested range.
+ */
+extern const char* CFHEADERS;
+/**
* getcfcheckpt requests evenly spaced compact filter headers, enabling
* parallelized download and validation of the headers between them.
* Only available with service bit NODE_COMPACT_FILTERS as described by
@@ -235,8 +248,6 @@ extern const char* GETCFCHECKPT;
/**
* cfcheckpt is a response to a getcfcheckpt request containing a vector of
* evenly spaced filter headers for blocks on the requested chain.
- * Only available with service bit NODE_COMPACT_FILTERS as described by
- * BIP 157 & 158.
*/
extern const char* CFCHECKPT;
}; // namespace NetMsgType
@@ -328,15 +339,15 @@ static inline bool MayHaveUsefulAddressDB(ServiceFlags services)
/** A CService with information about it as peer */
class CAddress : public CService
{
-public:
- CAddress();
- explicit CAddress(CService ipIn, ServiceFlags nServicesIn);
+ static constexpr uint32_t TIME_INIT{100000000};
- void Init();
+public:
+ CAddress() : CService{} {};
+ explicit CAddress(CService ipIn, ServiceFlags nServicesIn) : CService{ipIn}, nServices{nServicesIn} {};
SERIALIZE_METHODS(CAddress, obj)
{
- SER_READ(obj, obj.Init());
+ SER_READ(obj, obj.nTime = TIME_INIT);
int nVersion = s.GetVersion();
if (s.GetType() & SER_DISK) {
READWRITE(nVersion);
@@ -349,10 +360,9 @@ public:
READWRITEAS(CService, obj);
}
- ServiceFlags nServices;
-
+ ServiceFlags nServices{NODE_NONE};
// disk and network only
- unsigned int nTime;
+ uint32_t nTime{TIME_INIT};
};
/** getdata message type flags */
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 4de4850903..2090c233ac 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -1251,7 +1251,7 @@ void BitcoinGUI::setEncryptionStatus(int status)
labelWalletEncryptionIcon->setToolTip(tr("Wallet is <b>encrypted</b> and currently <b>unlocked</b>"));
encryptWalletAction->setChecked(true);
changePassphraseAction->setEnabled(true);
- encryptWalletAction->setEnabled(false); // TODO: decrypt currently not supported
+ encryptWalletAction->setEnabled(false);
break;
case WalletModel::Locked:
labelWalletEncryptionIcon->show();
@@ -1259,7 +1259,7 @@ void BitcoinGUI::setEncryptionStatus(int status)
labelWalletEncryptionIcon->setToolTip(tr("Wallet is <b>encrypted</b> and currently <b>locked</b>"));
encryptWalletAction->setChecked(true);
changePassphraseAction->setEnabled(true);
- encryptWalletAction->setEnabled(false); // TODO: decrypt currently not supported
+ encryptWalletAction->setEnabled(false);
break;
}
}
diff --git a/src/qt/recentrequeststablemodel.h b/src/qt/recentrequeststablemodel.h
index addf5ad0ae..c0bd3461bb 100644
--- a/src/qt/recentrequeststablemodel.h
+++ b/src/qt/recentrequeststablemodel.h
@@ -24,19 +24,11 @@ public:
QDateTime date;
SendCoinsRecipient recipient;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- unsigned int nDate = date.toTime_t();
-
- READWRITE(this->nVersion);
- READWRITE(id);
- READWRITE(nDate);
- READWRITE(recipient);
-
- if (ser_action.ForRead())
- date = QDateTime::fromTime_t(nDate);
+ SERIALIZE_METHODS(RecentRequestEntry, obj) {
+ unsigned int date_timet;
+ SER_WRITE(obj, date_timet = obj.date.toTime_t());
+ READWRITE(obj.nVersion, obj.id, date_timet, obj.recipient);
+ SER_READ(obj, obj.date = QDateTime::fromTime_t(date_timet));
}
};
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 2d4af3f9e6..66f1c8fd9c 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -31,17 +31,14 @@
#include <QKeyEvent>
#include <QMenu>
#include <QMessageBox>
-#include <QScrollBar>
#include <QScreen>
+#include <QScrollBar>
#include <QSettings>
#include <QString>
#include <QStringList>
#include <QTime>
#include <QTimer>
-// TODO: add a scrollback limit, as there is currently none
-// TODO: make it possible to filter out categories (esp debug messages when implemented)
-// TODO: receive errors and debug messages through ClientModel
const int CONSOLE_HISTORY = 50;
const int INITIAL_TRAFFIC_GRAPH_MINS = 30;
diff --git a/src/qt/sendcoinsrecipient.h b/src/qt/sendcoinsrecipient.h
index 12279fab64..6619faf417 100644
--- a/src/qt/sendcoinsrecipient.h
+++ b/src/qt/sendcoinsrecipient.h
@@ -44,30 +44,21 @@ public:
static const int CURRENT_VERSION = 1;
int nVersion;
- ADD_SERIALIZE_METHODS;
+ SERIALIZE_METHODS(SendCoinsRecipient, obj)
+ {
+ std::string address_str, label_str, message_str, auth_merchant_str;
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- std::string sAddress = address.toStdString();
- std::string sLabel = label.toStdString();
- std::string sMessage = message.toStdString();
- std::string sAuthenticatedMerchant = authenticatedMerchant.toStdString();
+ SER_WRITE(obj, address_str = obj.address.toStdString());
+ SER_WRITE(obj, label_str = obj.label.toStdString());
+ SER_WRITE(obj, message_str = obj.message.toStdString());
+ SER_WRITE(obj, auth_merchant_str = obj.authenticatedMerchant.toStdString());
- READWRITE(this->nVersion);
- READWRITE(sAddress);
- READWRITE(sLabel);
- READWRITE(amount);
- READWRITE(sMessage);
- READWRITE(sPaymentRequest);
- READWRITE(sAuthenticatedMerchant);
+ READWRITE(obj.nVersion, address_str, label_str, obj.amount, message_str, obj.sPaymentRequest, auth_merchant_str);
- if (ser_action.ForRead())
- {
- address = QString::fromStdString(sAddress);
- label = QString::fromStdString(sLabel);
- message = QString::fromStdString(sMessage);
- authenticatedMerchant = QString::fromStdString(sAuthenticatedMerchant);
- }
+ SER_READ(obj, obj.address = QString::fromStdString(address_str));
+ SER_READ(obj, obj.label = QString::fromStdString(label_str));
+ SER_READ(obj, obj.message = QString::fromStdString(message_str));
+ SER_READ(obj, obj.authenticatedMerchant = QString::fromStdString(auth_merchant_str));
}
};
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 1084ec9725..b1e61e03b3 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -317,16 +317,10 @@ WalletModel::EncryptionStatus WalletModel::getEncryptionStatus() const
bool WalletModel::setWalletEncrypted(bool encrypted, const SecureString &passphrase)
{
- if(encrypted)
- {
- // Encrypt
+ if (encrypted) {
return m_wallet->encryptWallet(passphrase);
}
- else
- {
- // Decrypt -- TODO; not supported yet
- return false;
- }
+ return false;
}
bool WalletModel::setWalletLocked(bool locked, const SecureString &passPhrase)
diff --git a/src/rest.cpp b/src/rest.cpp
index 1d381696d1..cde8b472d3 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -18,6 +18,7 @@
#include <sync.h>
#include <txmempool.h>
#include <util/check.h>
+#include <util/ref.h>
#include <util/strencodings.h>
#include <validation.h>
#include <version.h>
@@ -75,13 +76,14 @@ static bool RESTERR(HTTPRequest* req, enum HTTPStatusCode status, std::string me
* @param[in] req the HTTP request
* return pointer to the mempool or nullptr if no mempool found
*/
-static CTxMemPool* GetMemPool(HTTPRequest* req)
+static CTxMemPool* GetMemPool(const util::Ref& context, HTTPRequest* req)
{
- if (!g_rpc_node || !g_rpc_node->mempool) {
+ NodeContext* node = context.Has<NodeContext>() ? &context.Get<NodeContext>() : nullptr;
+ if (!node || !node->mempool) {
RESTERR(req, HTTP_NOT_FOUND, "Mempool disabled or instance not found");
return nullptr;
}
- return g_rpc_node->mempool;
+ return node->mempool;
}
static RetFormat ParseDataFormat(std::string& param, const std::string& strReq)
@@ -129,7 +131,8 @@ static bool CheckWarmup(HTTPRequest* req)
return true;
}
-static bool rest_headers(HTTPRequest* req,
+static bool rest_headers(const util::Ref& context,
+ HTTPRequest* req,
const std::string& strURIPart)
{
if (!CheckWarmup(req))
@@ -270,12 +273,12 @@ static bool rest_block(HTTPRequest* req,
}
}
-static bool rest_block_extended(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_block_extended(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
return rest_block(req, strURIPart, true);
}
-static bool rest_block_notxdetails(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_block_notxdetails(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
return rest_block(req, strURIPart, false);
}
@@ -283,7 +286,7 @@ static bool rest_block_notxdetails(HTTPRequest* req, const std::string& strURIPa
// A bit of a hack - dependency on a function defined in rpc/blockchain.cpp
UniValue getblockchaininfo(const JSONRPCRequest& request);
-static bool rest_chaininfo(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_chaininfo(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
if (!CheckWarmup(req))
return false;
@@ -292,7 +295,7 @@ static bool rest_chaininfo(HTTPRequest* req, const std::string& strURIPart)
switch (rf) {
case RetFormat::JSON: {
- JSONRPCRequest jsonRequest;
+ JSONRPCRequest jsonRequest(context);
jsonRequest.params = UniValue(UniValue::VARR);
UniValue chainInfoObject = getblockchaininfo(jsonRequest);
std::string strJSON = chainInfoObject.write() + "\n";
@@ -306,11 +309,11 @@ static bool rest_chaininfo(HTTPRequest* req, const std::string& strURIPart)
}
}
-static bool rest_mempool_info(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_mempool_info(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
if (!CheckWarmup(req))
return false;
- const CTxMemPool* mempool = GetMemPool(req);
+ const CTxMemPool* mempool = GetMemPool(context, req);
if (!mempool) return false;
std::string param;
const RetFormat rf = ParseDataFormat(param, strURIPart);
@@ -330,10 +333,10 @@ static bool rest_mempool_info(HTTPRequest* req, const std::string& strURIPart)
}
}
-static bool rest_mempool_contents(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_mempool_contents(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
if (!CheckWarmup(req)) return false;
- const CTxMemPool* mempool = GetMemPool(req);
+ const CTxMemPool* mempool = GetMemPool(context, req);
if (!mempool) return false;
std::string param;
const RetFormat rf = ParseDataFormat(param, strURIPart);
@@ -353,7 +356,7 @@ static bool rest_mempool_contents(HTTPRequest* req, const std::string& strURIPar
}
}
-static bool rest_tx(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_tx(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
if (!CheckWarmup(req))
return false;
@@ -409,7 +412,7 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart)
}
}
-static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
+static bool rest_getutxos(const util::Ref& context, HTTPRequest* req, const std::string& strURIPart)
{
if (!CheckWarmup(req))
return false;
@@ -518,7 +521,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
};
if (fCheckMemPool) {
- const CTxMemPool* mempool = GetMemPool(req);
+ const CTxMemPool* mempool = GetMemPool(context, req);
if (!mempool) return false;
// use db+mempool as cache backend in case user likes to query mempool
LOCK2(cs_main, mempool->cs);
@@ -595,7 +598,7 @@ static bool rest_getutxos(HTTPRequest* req, const std::string& strURIPart)
}
}
-static bool rest_blockhash_by_height(HTTPRequest* req,
+static bool rest_blockhash_by_height(const util::Ref& context, HTTPRequest* req,
const std::string& str_uri_part)
{
if (!CheckWarmup(req)) return false;
@@ -643,7 +646,7 @@ static bool rest_blockhash_by_height(HTTPRequest* req,
static const struct {
const char* prefix;
- bool (*handler)(HTTPRequest* req, const std::string& strReq);
+ bool (*handler)(const util::Ref& context, HTTPRequest* req, const std::string& strReq);
} uri_prefixes[] = {
{"/rest/tx/", rest_tx},
{"/rest/block/notxdetails/", rest_block_notxdetails},
@@ -656,10 +659,12 @@ static const struct {
{"/rest/blockhashbyheight/", rest_blockhash_by_height},
};
-void StartREST()
+void StartREST(const util::Ref& context)
{
- for (unsigned int i = 0; i < ARRAYLEN(uri_prefixes); i++)
- RegisterHTTPHandler(uri_prefixes[i].prefix, false, uri_prefixes[i].handler);
+ for (const auto& up : uri_prefixes) {
+ auto handler = [&context, up](HTTPRequest* req, const std::string& prefix) { return up.handler(context, req, prefix); };
+ RegisterHTTPHandler(up.prefix, false, handler);
+ }
}
void InterruptREST()
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 2c984603ff..7d43de6646 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -29,6 +29,7 @@
#include <txdb.h>
#include <txmempool.h>
#include <undo.h>
+#include <util/ref.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <validation.h>
@@ -53,13 +54,27 @@ static Mutex cs_blockchange;
static std::condition_variable cond_blockchange;
static CUpdatedBlock latestblock;
-CTxMemPool& EnsureMemPool()
+NodeContext& EnsureNodeContext(const util::Ref& context)
{
- CHECK_NONFATAL(g_rpc_node);
- if (!g_rpc_node->mempool) {
+ if (!context.Has<NodeContext>()) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Node context not found");
+ }
+ return context.Get<NodeContext>();
+}
+
+CTxMemPool& EnsureMemPool(const util::Ref& context)
+{
+ NodeContext& node = EnsureNodeContext(context);
+ if (!node.mempool) {
throw JSONRPCError(RPC_CLIENT_MEMPOOL_DISABLED, "Mempool disabled or instance not found");
}
- return *g_rpc_node->mempool;
+ return *node.mempool;
+}
+
+ChainstateManager& EnsureChainman(const util::Ref& context)
+{
+ NodeContext& node = EnsureNodeContext(context);
+ return EnsureChainman(node);
}
/* Calculate the difficulty for a given block index.
@@ -399,6 +414,7 @@ static std::vector<RPCResult> MempoolEntryDescription() { return {
RPCResult{RPCResult::Type::ARR, "spentby", "unconfirmed transactions spending outputs from this transaction",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}},
RPCResult{RPCResult::Type::BOOL, "bip125-replaceable", "Whether this transaction could be replaced due to BIP125 (replace-by-fee)"},
+ RPCResult{RPCResult::Type::BOOL, "unbroadcast", "Whether this transaction is currently unbroadcast (initial broadcast not yet confirmed)"},
};}
static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
@@ -460,6 +476,7 @@ static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPool
}
info.pushKV("bip125-replaceable", rbfStatus);
+ info.pushKV("unbroadcast", pool.IsUnbroadcastTx(tx.GetHash()));
}
UniValue MempoolToJSON(const CTxMemPool& pool, bool verbose)
@@ -519,7 +536,7 @@ static UniValue getrawmempool(const JSONRPCRequest& request)
if (!request.params[0].isNull())
fVerbose = request.params[0].get_bool();
- return MempoolToJSON(EnsureMemPool(), fVerbose);
+ return MempoolToJSON(EnsureMemPool(request.context), fVerbose);
}
static UniValue getmempoolancestors(const JSONRPCRequest& request)
@@ -549,7 +566,7 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
uint256 hash = ParseHashV(request.params[0], "parameter 1");
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK(mempool.cs);
CTxMemPool::txiter it = mempool.mapTx.find(hash);
@@ -612,7 +629,7 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request)
uint256 hash = ParseHashV(request.params[0], "parameter 1");
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK(mempool.cs);
CTxMemPool::txiter it = mempool.mapTx.find(hash);
@@ -662,7 +679,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request)
uint256 hash = ParseHashV(request.params[0], "parameter 1");
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK(mempool.cs);
CTxMemPool::txiter it = mempool.mapTx.find(hash);
@@ -979,7 +996,7 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request)
::ChainstateActive().ForceFlushStateToDisk();
CCoinsView* coins_view = WITH_LOCK(cs_main, return &ChainstateActive().CoinsDB());
- if (GetUTXOStats(coins_view, stats)) {
+ if (GetUTXOStats(coins_view, stats, RpcInterruptionPoint)) {
ret.pushKV("height", (int64_t)stats.nHeight);
ret.pushKV("bestblock", stats.hashBlock.GetHex());
ret.pushKV("transactions", (int64_t)stats.nTransactions);
@@ -1045,7 +1062,7 @@ UniValue gettxout(const JSONRPCRequest& request)
CCoinsViewCache* coins_view = &::ChainstateActive().CoinsTip();
if (fMempool) {
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK(mempool.cs);
CCoinsViewMemPool view(coins_view, mempool);
if (!view.GetCoin(out, coin) || mempool.isSpent(out)) {
@@ -1389,7 +1406,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool)
ret.pushKV("maxmempool", (int64_t) maxmempool);
ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee).GetFeePerK()));
ret.pushKV("minrelaytxfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()));
-
+ ret.pushKV("unbroadcastcount", uint64_t{pool.GetUnbroadcastTxs().size()});
return ret;
}
@@ -1408,6 +1425,7 @@ static UniValue getmempoolinfo(const JSONRPCRequest& request)
{RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"},
{RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"},
{RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"},
+ {RPCResult::Type::NUM, "unbroadcastcount", "Current number of transactions that haven't passed initial broadcast yet"}
}},
RPCExamples{
HelpExampleCli("getmempoolinfo", "")
@@ -1415,7 +1433,7 @@ static UniValue getmempoolinfo(const JSONRPCRequest& request)
},
}.Check(request);
- return MempoolInfoToJSON(EnsureMemPool());
+ return MempoolInfoToJSON(EnsureMemPool(request.context));
}
static UniValue preciousblock(const JSONRPCRequest& request)
@@ -1934,7 +1952,7 @@ static UniValue savemempool(const JSONRPCRequest& request)
},
}.Check(request);
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
if (!mempool.IsLoaded()) {
throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet");
@@ -1956,6 +1974,7 @@ bool FindScriptPubKey(std::atomic<int>& scan_progress, const std::atomic<bool>&
Coin coin;
if (!cursor->GetKey(key) || !cursor->GetValue(coin)) return false;
if (++count % 8192 == 0) {
+ RpcInterruptionPoint();
if (should_abort) {
// allow to abort the scan via the abort reference
return false;
@@ -2299,7 +2318,7 @@ UniValue dumptxoutset(const JSONRPCRequest& request)
::ChainstateActive().ForceFlushStateToDisk();
- if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats)) {
+ if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats, RpcInterruptionPoint)) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set");
}
@@ -2317,9 +2336,7 @@ UniValue dumptxoutset(const JSONRPCRequest& request)
unsigned int iter{0};
while (pcursor->Valid()) {
- if (iter % 5000 == 0 && !IsRPCRunning()) {
- throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down");
- }
+ if (iter % 5000 == 0) RpcInterruptionPoint();
++iter;
if (pcursor->GetKey(key) && pcursor->GetValue(coin)) {
afile << key;
@@ -2385,5 +2402,3 @@ static const CRPCCommand commands[] =
for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
t.appendCommand(commands[vcidx].name, &commands[vcidx]);
}
-
-NodeContext* g_rpc_node = nullptr;
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index 54165af707..5c9a43b13e 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -16,8 +16,12 @@ extern RecursiveMutex cs_main;
class CBlock;
class CBlockIndex;
class CTxMemPool;
+class ChainstateManager;
class UniValue;
struct NodeContext;
+namespace util {
+class Ref;
+} // namespace util
static constexpr int NUM_GETBLOCKSTATS_PERCENTILES = 5;
@@ -47,11 +51,8 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex
/** Used by getblockstats to get feerates at different percentiles by weight */
void CalculatePercentilesByWeight(CAmount result[NUM_GETBLOCKSTATS_PERCENTILES], std::vector<std::pair<CAmount, int64_t>>& scores, int64_t total_weight);
-//! Pointer to node state that needs to be declared as a global to be accessible
-//! RPC methods. Due to limitations of the RPC framework, there's currently no
-//! direct way to pass in state to RPC methods without globals.
-extern NodeContext* g_rpc_node;
-
-CTxMemPool& EnsureMemPool();
+NodeContext& EnsureNodeContext(const util::Ref& context);
+CTxMemPool& EnsureMemPool(const util::Ref& context);
+ChainstateManager& EnsureChainman(const util::Ref& context);
#endif
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 59ab80bcd5..3612f14bbf 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -101,7 +101,7 @@ static UniValue getnetworkhashps(const JSONRPCRequest& request)
return GetNetworkHashPS(!request.params[0].isNull() ? request.params[0].get_int() : 120, !request.params[1].isNull() ? request.params[1].get_int() : -1);
}
-static bool GenerateBlock(CBlock& block, uint64_t& max_tries, unsigned int& extra_nonce, uint256& block_hash)
+static bool GenerateBlock(ChainstateManager& chainman, CBlock& block, uint64_t& max_tries, unsigned int& extra_nonce, uint256& block_hash)
{
block_hash.SetNull();
@@ -124,14 +124,15 @@ static bool GenerateBlock(CBlock& block, uint64_t& max_tries, unsigned int& extr
}
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
- if (!ProcessNewBlock(chainparams, shared_pblock, true, nullptr))
+ if (!chainman.ProcessNewBlock(chainparams, shared_pblock, true, nullptr)) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
+ }
block_hash = block.GetHash();
return true;
}
-static UniValue generateBlocks(const CTxMemPool& mempool, const CScript& coinbase_script, int nGenerate, uint64_t nMaxTries)
+static UniValue generateBlocks(ChainstateManager& chainman, const CTxMemPool& mempool, const CScript& coinbase_script, int nGenerate, uint64_t nMaxTries)
{
int nHeightEnd = 0;
int nHeight = 0;
@@ -151,7 +152,7 @@ static UniValue generateBlocks(const CTxMemPool& mempool, const CScript& coinbas
CBlock *pblock = &pblocktemplate->block;
uint256 block_hash;
- if (!GenerateBlock(*pblock, nMaxTries, nExtraNonce, block_hash)) {
+ if (!GenerateBlock(chainman, *pblock, nMaxTries, nExtraNonce, block_hash)) {
break;
}
@@ -227,9 +228,10 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, error);
}
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
+ ChainstateManager& chainman = EnsureChainman(request.context);
- return generateBlocks(mempool, coinbase_script, num_blocks, max_tries);
+ return generateBlocks(chainman, mempool, coinbase_script, num_blocks, max_tries);
}
static UniValue generatetoaddress(const JSONRPCRequest& request)
@@ -265,11 +267,12 @@ static UniValue generatetoaddress(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address");
}
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
+ ChainstateManager& chainman = EnsureChainman(request.context);
CScript coinbase_script = GetScriptForDestination(destination);
- return generateBlocks(mempool, coinbase_script, nGenerate, nMaxTries);
+ return generateBlocks(chainman, mempool, coinbase_script, nGenerate, nMaxTries);
}
static UniValue generateblock(const JSONRPCRequest& request)
@@ -311,7 +314,7 @@ static UniValue generateblock(const JSONRPCRequest& request)
coinbase_script = GetScriptForDestination(destination);
}
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
std::vector<CTransactionRef> txs;
const auto raw_txs_or_txids = request.params[1].get_array();
@@ -370,7 +373,7 @@ static UniValue generateblock(const JSONRPCRequest& request)
uint64_t max_tries{1000000};
unsigned int extra_nonce{0};
- if (!GenerateBlock(block, max_tries, extra_nonce, block_hash) || block_hash.IsNull()) {
+ if (!GenerateBlock(EnsureChainman(request.context), block, max_tries, extra_nonce, block_hash) || block_hash.IsNull()) {
throw JSONRPCError(RPC_MISC_ERROR, "Failed to make block.");
}
@@ -403,7 +406,7 @@ static UniValue getmininginfo(const JSONRPCRequest& request)
}.Check(request);
LOCK(cs_main);
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
UniValue obj(UniValue::VOBJ);
obj.pushKV("blocks", (int)::ChainActive().Height());
@@ -449,7 +452,7 @@ static UniValue prioritisetransaction(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.");
}
- EnsureMemPool().PrioritiseTransaction(hash, nAmount);
+ EnsureMemPool(request.context).PrioritiseTransaction(hash, nAmount);
return true;
}
@@ -635,17 +638,18 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
if (strMode != "template")
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode");
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
- if (g_rpc_node->connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0)
+ if (node.connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0)
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, PACKAGE_NAME " is not connected!");
if (::ChainstateActive().IsInitialBlockDownload())
throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, PACKAGE_NAME " is in initial sync and waiting for blocks...");
static unsigned int nTransactionsUpdatedLast;
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
if (!lpval.isNull())
{
@@ -946,7 +950,7 @@ static UniValue submitblock(const JSONRPCRequest& request)
bool new_block;
auto sc = std::make_shared<submitblock_StateCatcher>(block.GetHash());
RegisterSharedValidationInterface(sc);
- bool accepted = ProcessNewBlock(Params(), blockptr, /* fForceProcessing */ true, /* fNewBlock */ &new_block);
+ bool accepted = EnsureChainman(request.context).ProcessNewBlock(Params(), blockptr, /* fForceProcessing */ true, /* fNewBlock */ &new_block);
UnregisterSharedValidationInterface(sc);
if (!new_block && accepted) {
return "duplicate";
@@ -985,7 +989,7 @@ static UniValue submitheader(const JSONRPCRequest& request)
}
BlockValidationState state;
- ProcessNewBlockHeaders({h}, state, Params());
+ EnsureChainman(request.context).ProcessNewBlockHeaders({h}, state, Params());
if (state.IsValid()) return NullUniValue;
if (state.IsError()) {
throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index f3c5fed858..ce98a7c937 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -15,6 +15,7 @@
#include <script/descriptor.h>
#include <util/check.h>
#include <util/message.h> // For MessageSign(), MessageVerify()
+#include <util/ref.h>
#include <util/strencodings.h>
#include <util/system.h>
@@ -366,8 +367,8 @@ static UniValue setmocktime(const JSONRPCRequest& request)
RPCTypeCheck(request.params, {UniValue::VNUM});
int64_t time = request.params[0].get_int64();
SetMockTime(time);
- if (g_rpc_node) {
- for (const auto& chain_client : g_rpc_node->chain_clients) {
+ if (request.context.Has<NodeContext>()) {
+ for (const auto& chain_client : request.context.Get<NodeContext>().chain_clients) {
chain_client->setMockTime(time);
}
}
@@ -398,9 +399,10 @@ static UniValue mockscheduler(const JSONRPCRequest& request)
}
// protect against null pointer dereference
- CHECK_NONFATAL(g_rpc_node);
- CHECK_NONFATAL(g_rpc_node->scheduler);
- g_rpc_node->scheduler->MockForward(std::chrono::seconds(delta_seconds));
+ CHECK_NONFATAL(request.context.Has<NodeContext>());
+ NodeContext& node = request.context.Get<NodeContext>();
+ CHECK_NONFATAL(node.scheduler);
+ node.scheduler->MockForward(std::chrono::seconds(delta_seconds));
return NullUniValue;
}
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index d6d15f8b56..e29aa03695 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -42,10 +42,11 @@ static UniValue getconnectioncount(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
- return (int)g_rpc_node->connman->GetNodeCount(CConnman::CONNECTIONS_ALL);
+ return (int)node.connman->GetNodeCount(CConnman::CONNECTIONS_ALL);
}
static UniValue ping(const JSONRPCRequest& request)
@@ -62,11 +63,12 @@ static UniValue ping(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
// Request that each node send a ping during next message processing pass
- g_rpc_node->connman->ForEachNode([](CNode* pnode) {
+ node.connman->ForEachNode([](CNode* pnode) {
pnode->fPingQueued = true;
});
return NullUniValue;
@@ -139,11 +141,12 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
std::vector<CNodeStats> vstats;
- g_rpc_node->connman->GetNodeStats(vstats);
+ node.connman->GetNodeStats(vstats);
UniValue ret(UniValue::VARR);
@@ -248,7 +251,8 @@ static UniValue addnode(const JSONRPCRequest& request)
},
}.ToString());
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
std::string strNode = request.params[0].get_str();
@@ -256,18 +260,18 @@ static UniValue addnode(const JSONRPCRequest& request)
if (strCommand == "onetry")
{
CAddress addr;
- g_rpc_node->connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
+ node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
return NullUniValue;
}
if (strCommand == "add")
{
- if(!g_rpc_node->connman->AddNode(strNode))
+ if(!node.connman->AddNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added");
}
else if(strCommand == "remove")
{
- if(!g_rpc_node->connman->RemoveAddedNode(strNode))
+ if(!node.connman->RemoveAddedNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
}
@@ -293,7 +297,8 @@ static UniValue disconnectnode(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
bool success;
@@ -302,11 +307,11 @@ static UniValue disconnectnode(const JSONRPCRequest& request)
if (!address_arg.isNull() && id_arg.isNull()) {
/* handle disconnect-by-address */
- success = g_rpc_node->connman->DisconnectNode(address_arg.get_str());
+ success = node.connman->DisconnectNode(address_arg.get_str());
} else if (!id_arg.isNull() && (address_arg.isNull() || (address_arg.isStr() && address_arg.get_str().empty()))) {
/* handle disconnect-by-id */
NodeId nodeid = (NodeId) id_arg.get_int64();
- success = g_rpc_node->connman->DisconnectNode(nodeid);
+ success = node.connman->DisconnectNode(nodeid);
} else {
throw JSONRPCError(RPC_INVALID_PARAMS, "Only one of address and nodeid should be provided.");
}
@@ -350,10 +355,11 @@ static UniValue getaddednodeinfo(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
- std::vector<AddedNodeInfo> vInfo = g_rpc_node->connman->GetAddedNodeInfo();
+ std::vector<AddedNodeInfo> vInfo = node.connman->GetAddedNodeInfo();
if (!request.params[0].isNull()) {
bool found = false;
@@ -417,21 +423,22 @@ static UniValue getnettotals(const JSONRPCRequest& request)
+ HelpExampleRpc("getnettotals", "")
},
}.Check(request);
- if(!g_rpc_node->connman)
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
UniValue obj(UniValue::VOBJ);
- obj.pushKV("totalbytesrecv", g_rpc_node->connman->GetTotalBytesRecv());
- obj.pushKV("totalbytessent", g_rpc_node->connman->GetTotalBytesSent());
+ obj.pushKV("totalbytesrecv", node.connman->GetTotalBytesRecv());
+ obj.pushKV("totalbytessent", node.connman->GetTotalBytesSent());
obj.pushKV("timemillis", GetTimeMillis());
UniValue outboundLimit(UniValue::VOBJ);
- outboundLimit.pushKV("timeframe", g_rpc_node->connman->GetMaxOutboundTimeframe());
- outboundLimit.pushKV("target", g_rpc_node->connman->GetMaxOutboundTarget());
- outboundLimit.pushKV("target_reached", g_rpc_node->connman->OutboundTargetReached(false));
- outboundLimit.pushKV("serve_historical_blocks", !g_rpc_node->connman->OutboundTargetReached(true));
- outboundLimit.pushKV("bytes_left_in_cycle", g_rpc_node->connman->GetOutboundTargetBytesLeft());
- outboundLimit.pushKV("time_left_in_cycle", g_rpc_node->connman->GetMaxOutboundTimeLeftInCycle());
+ outboundLimit.pushKV("timeframe", node.connman->GetMaxOutboundTimeframe());
+ outboundLimit.pushKV("target", node.connman->GetMaxOutboundTarget());
+ outboundLimit.pushKV("target_reached", node.connman->OutboundTargetReached(false));
+ outboundLimit.pushKV("serve_historical_blocks", !node.connman->OutboundTargetReached(true));
+ outboundLimit.pushKV("bytes_left_in_cycle", node.connman->GetOutboundTargetBytesLeft());
+ outboundLimit.pushKV("time_left_in_cycle", node.connman->GetMaxOutboundTimeLeftInCycle());
obj.pushKV("uploadtarget", outboundLimit);
return obj;
}
@@ -513,16 +520,17 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
obj.pushKV("version", CLIENT_VERSION);
obj.pushKV("subversion", strSubVersion);
obj.pushKV("protocolversion",PROTOCOL_VERSION);
- if (g_rpc_node->connman) {
- ServiceFlags services = g_rpc_node->connman->GetLocalServices();
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (node.connman) {
+ ServiceFlags services = node.connman->GetLocalServices();
obj.pushKV("localservices", strprintf("%016x", services));
obj.pushKV("localservicesnames", GetServicesNames(services));
}
obj.pushKV("localrelay", g_relay_txes);
obj.pushKV("timeoffset", GetTimeOffset());
- if (g_rpc_node->connman) {
- obj.pushKV("networkactive", g_rpc_node->connman->GetNetworkActive());
- obj.pushKV("connections", (int)g_rpc_node->connman->GetNodeCount(CConnman::CONNECTIONS_ALL));
+ if (node.connman) {
+ obj.pushKV("networkactive", node.connman->GetNetworkActive());
+ obj.pushKV("connections", (int)node.connman->GetNodeCount(CConnman::CONNECTIONS_ALL));
}
obj.pushKV("networks", GetNetworksInfo());
obj.pushKV("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()));
@@ -567,7 +575,8 @@ static UniValue setban(const JSONRPCRequest& request)
if (request.fHelp || !help.IsValidNumArgs(request.params.size()) || (strCommand != "add" && strCommand != "remove")) {
throw std::runtime_error(help.ToString());
}
- if (!g_rpc_node->banman) {
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (!node.banman) {
throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
}
@@ -591,7 +600,7 @@ static UniValue setban(const JSONRPCRequest& request)
if (strCommand == "add")
{
- if (isSubnet ? g_rpc_node->banman->IsBanned(subNet) : g_rpc_node->banman->IsBanned(netAddr)) {
+ if (isSubnet ? node.banman->IsBanned(subNet) : node.banman->IsBanned(netAddr)) {
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned");
}
@@ -604,20 +613,20 @@ static UniValue setban(const JSONRPCRequest& request)
absolute = true;
if (isSubnet) {
- g_rpc_node->banman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute);
- if (g_rpc_node->connman) {
- g_rpc_node->connman->DisconnectNode(subNet);
+ node.banman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute);
+ if (node.connman) {
+ node.connman->DisconnectNode(subNet);
}
} else {
- g_rpc_node->banman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute);
- if (g_rpc_node->connman) {
- g_rpc_node->connman->DisconnectNode(netAddr);
+ node.banman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute);
+ if (node.connman) {
+ node.connman->DisconnectNode(netAddr);
}
}
}
else if(strCommand == "remove")
{
- if (!( isSubnet ? g_rpc_node->banman->Unban(subNet) : g_rpc_node->banman->Unban(netAddr) )) {
+ if (!( isSubnet ? node.banman->Unban(subNet) : node.banman->Unban(netAddr) )) {
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously banned.");
}
}
@@ -645,12 +654,13 @@ static UniValue listbanned(const JSONRPCRequest& request)
},
}.Check(request);
- if(!g_rpc_node->banman) {
+ NodeContext& node = EnsureNodeContext(request.context);
+ if(!node.banman) {
throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
}
banmap_t banMap;
- g_rpc_node->banman->GetBanned(banMap);
+ node.banman->GetBanned(banMap);
UniValue bannedAddresses(UniValue::VARR);
for (const auto& entry : banMap)
@@ -679,11 +689,12 @@ static UniValue clearbanned(const JSONRPCRequest& request)
+ HelpExampleRpc("clearbanned", "")
},
}.Check(request);
- if (!g_rpc_node->banman) {
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (!node.banman) {
throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
}
- g_rpc_node->banman->ClearBanned();
+ node.banman->ClearBanned();
return NullUniValue;
}
@@ -699,13 +710,14 @@ static UniValue setnetworkactive(const JSONRPCRequest& request)
RPCExamples{""},
}.Check(request);
- if (!g_rpc_node->connman) {
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (!node.connman) {
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
}
- g_rpc_node->connman->SetNetworkActive(request.params[0].get_bool());
+ node.connman->SetNetworkActive(request.params[0].get_bool());
- return g_rpc_node->connman->GetNetworkActive();
+ return node.connman->GetNetworkActive();
}
static UniValue getnodeaddresses(const JSONRPCRequest& request)
@@ -732,7 +744,8 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
+ HelpExampleRpc("getnodeaddresses", "8")
},
}.Check(request);
- if (!g_rpc_node->connman) {
+ NodeContext& node = EnsureNodeContext(request.context);
+ if (!node.connman) {
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
}
@@ -744,7 +757,7 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
}
}
// returns a shuffled list of CAddress
- std::vector<CAddress> vAddr = g_rpc_node->connman->GetAddresses();
+ std::vector<CAddress> vAddr = node.connman->GetAddresses();
UniValue ret(UniValue::VARR);
int address_return_count = std::min<int>(count, vAddr.size());
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 063ee1697c..e14217c307 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -653,7 +653,7 @@ static UniValue combinerawtransaction(const JSONRPCRequest& request)
CCoinsView viewDummy;
CCoinsViewCache view(&viewDummy);
{
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK(cs_main);
LOCK(mempool.cs);
CCoinsViewCache &viewChain = ::ChainstateActive().CoinsTip();
@@ -778,7 +778,8 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request)
for (const CTxIn& txin : mtx.vin) {
coins[txin.prevout]; // Create empty map entry keyed by prevout.
}
- FindCoins(*g_rpc_node, coins);
+ NodeContext& node = EnsureNodeContext(request.context);
+ FindCoins(node, coins);
// Parse the prevtxs array
ParsePrevouts(request.params[2], &keystore, coins);
@@ -837,7 +838,8 @@ static UniValue sendrawtransaction(const JSONRPCRequest& request)
std::string err_string;
AssertLockNotHeld(cs_main);
- const TransactionError err = BroadcastTransaction(*g_rpc_node, tx, err_string, max_raw_tx_fee, /*relay*/ true, /*wait_callback*/ true);
+ NodeContext& node = EnsureNodeContext(request.context);
+ const TransactionError err = BroadcastTransaction(node, tx, err_string, max_raw_tx_fee, /*relay*/ true, /*wait_callback*/ true);
if (TransactionError::OK != err) {
throw JSONRPCTransactionError(err, err_string);
}
@@ -904,7 +906,7 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request)
DEFAULT_MAX_RAW_TX_FEE_RATE :
CFeeRate(AmountFromValue(request.params[1]));
- CTxMemPool& mempool = EnsureMemPool();
+ CTxMemPool& mempool = EnsureMemPool(request.context);
int64_t virtual_size = GetVirtualTransactionSize(*tx);
CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size);
@@ -1555,7 +1557,7 @@ UniValue utxoupdatepsbt(const JSONRPCRequest& request)
CCoinsView viewDummy;
CCoinsViewCache view(&viewDummy);
{
- const CTxMemPool& mempool = EnsureMemPool();
+ const CTxMemPool& mempool = EnsureMemPool(request.context);
LOCK2(cs_main, mempool.cs);
CCoinsViewCache &viewChain = ::ChainstateActive().CoinsTip();
CCoinsViewMemPool viewMempool(&viewChain, mempool);
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 56cac6661e..7fef45f50e 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -130,20 +130,20 @@ void DeleteAuthCookie()
}
}
-std::vector<UniValue> JSONRPCProcessBatchReply(const UniValue &in, size_t num)
+std::vector<UniValue> JSONRPCProcessBatchReply(const UniValue& in)
{
if (!in.isArray()) {
throw std::runtime_error("Batch must be an array");
}
+ const size_t num {in.size()};
std::vector<UniValue> batch(num);
- for (size_t i=0; i<in.size(); ++i) {
- const UniValue &rec = in[i];
+ for (const UniValue& rec : in.getValues()) {
if (!rec.isObject()) {
- throw std::runtime_error("Batch member must be object");
+ throw std::runtime_error("Batch member must be an object");
}
size_t id = rec["id"].get_int();
if (id >= num) {
- throw std::runtime_error("Batch member id larger than size");
+ throw std::runtime_error("Batch member id is larger than batch size");
}
batch[id] = rec;
}
diff --git a/src/rpc/request.h b/src/rpc/request.h
index 99eb4f9354..02ec5393a7 100644
--- a/src/rpc/request.h
+++ b/src/rpc/request.h
@@ -10,6 +10,10 @@
#include <univalue.h>
+namespace util {
+class Ref;
+} // namespace util
+
UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id);
UniValue JSONRPCReplyObj(const UniValue& result, const UniValue& error, const UniValue& id);
std::string JSONRPCReply(const UniValue& result, const UniValue& error, const UniValue& id);
@@ -22,7 +26,7 @@ bool GetAuthCookie(std::string *cookie_out);
/** Delete RPC authentication cookie from disk */
void DeleteAuthCookie();
/** Parse JSON-RPC batch reply into a vector */
-std::vector<UniValue> JSONRPCProcessBatchReply(const UniValue &in, size_t num);
+std::vector<UniValue> JSONRPCProcessBatchReply(const UniValue& in);
class JSONRPCRequest
{
@@ -34,8 +38,9 @@ public:
std::string URI;
std::string authUser;
std::string peerAddr;
+ const util::Ref& context;
- JSONRPCRequest() : id(NullUniValue), params(NullUniValue), fHelp(false) {}
+ JSONRPCRequest(const util::Ref& context) : id(NullUniValue), params(NullUniValue), fHelp(false), context(context) {}
void parse(const UniValue& valRequest);
};
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 219979f095..99c649d15a 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -11,9 +11,9 @@
#include <util/strencodings.h>
#include <util/system.h>
-#include <boost/signals2/signal.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
+#include <boost/signals2/signal.hpp>
#include <memory> // for unique_ptr
#include <unordered_map>
@@ -309,6 +309,11 @@ bool IsRPCRunning()
return g_rpc_running;
}
+void RpcInterruptionPoint()
+{
+ if (!IsRPCRunning()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down");
+}
+
void SetRPCWarmupStatus(const std::string& newStatus)
{
LOCK(cs_rpcWarmup);
diff --git a/src/rpc/server.h b/src/rpc/server.h
index c91bf1f613..d7a04ff6e8 100644
--- a/src/rpc/server.h
+++ b/src/rpc/server.h
@@ -9,10 +9,10 @@
#include <amount.h>
#include <rpc/request.h>
+#include <functional>
#include <map>
#include <stdint.h>
#include <string>
-#include <functional>
#include <univalue.h>
@@ -29,6 +29,9 @@ namespace RPCServer
/** Query whether RPC is running */
bool IsRPCRunning();
+/** Throw JSONRPCError if RPC is not running */
+void RpcInterruptionPoint();
+
/**
* Set the RPC warmup status. When this is done, all RPC calls will error out
* immediately with RPC_IN_WARMUP.
diff --git a/src/serialize.h b/src/serialize.h
index af75c50ff9..71c2cfa164 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -43,26 +43,6 @@ static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
struct deserialize_type {};
constexpr deserialize_type deserialize {};
-/**
- * Used to bypass the rule against non-const reference to temporary
- * where it makes sense with wrappers.
- */
-template<typename T>
-inline T& REF(const T& val)
-{
- return const_cast<T&>(val);
-}
-
-/**
- * Used to acquire a non-const pointer "this" to generate bodies
- * of const serialization operations from a template
- */
-template<typename T>
-inline T* NCONST_PTR(const T* val)
-{
- return const_cast<T*>(val);
-}
-
//! Safely convert odd char pointer types to standard ones.
inline char* CharCast(char* c) { return c; }
inline char* CharCast(unsigned char* c) { return (char*)c; }
@@ -194,22 +174,6 @@ template<typename X> const X& ReadWriteAsHelper(const X& x) { return x; }
#define SER_WRITE(obj, code) ::SerWrite(s, ser_action, obj, [&](Stream& s, const Type& obj) { code; })
/**
- * Implement three methods for serializable objects. These are actually wrappers over
- * "SerializationOp" template, which implements the body of each class' serialization
- * code. Adding "ADD_SERIALIZE_METHODS" in the body of the class causes these wrappers to be
- * added as members.
- */
-#define ADD_SERIALIZE_METHODS \
- template<typename Stream> \
- void Serialize(Stream& s) const { \
- NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \
- } \
- template<typename Stream> \
- void Unserialize(Stream& s) { \
- SerializationOp(s, CSerActionUnserialize()); \
- }
-
-/**
* Implement the Ser and Unser methods needed for implementing a formatter (see Using below).
*
* Both Ser and Unser are delegated to a single static method SerializationOps, which is polymorphic
@@ -503,7 +467,7 @@ static inline Wrapper<Formatter, T&> Using(T&& t) { return Wrapper<Formatter, T&
#define VARINT_MODE(obj, mode) Using<VarIntFormatter<mode>>(obj)
#define VARINT(obj) Using<VarIntFormatter<VarIntMode::DEFAULT>>(obj)
#define COMPACTSIZE(obj) Using<CompactSizeFormatter>(obj)
-#define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj))
+#define LIMITED_STRING(obj,n) Using<LimitedStringFormatter<n>>(obj)
/** Serialization wrapper class for integers in VarInt format. */
template<VarIntMode Mode>
@@ -588,31 +552,23 @@ struct CompactSizeFormatter
};
template<size_t Limit>
-class LimitedString
+struct LimitedStringFormatter
{
-protected:
- std::string& string;
-public:
- explicit LimitedString(std::string& _string) : string(_string) {}
-
template<typename Stream>
- void Unserialize(Stream& s)
+ void Unser(Stream& s, std::string& v)
{
size_t size = ReadCompactSize(s);
if (size > Limit) {
throw std::ios_base::failure("String length limit exceeded");
}
- string.resize(size);
- if (size != 0)
- s.read((char*)string.data(), size);
+ v.resize(size);
+ if (size != 0) s.read((char*)v.data(), size);
}
template<typename Stream>
- void Serialize(Stream& s) const
+ void Ser(Stream& s, const std::string& v)
{
- WriteCompactSize(s, string.size());
- if (!string.empty())
- s.write((char*)string.data(), string.size());
+ s << v;
}
};
@@ -1012,7 +968,7 @@ void Unserialize(Stream& is, std::shared_ptr<const T>& p)
/**
- * Support for ADD_SERIALIZE_METHODS and READWRITE macro
+ * Support for SERIALIZE_METHODS and READWRITE macro.
*/
struct CSerActionSerialize
{
diff --git a/src/sync.cpp b/src/sync.cpp
index b86c57e498..c3312b5a00 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -7,15 +7,19 @@
#endif
#include <sync.h>
-#include <tinyformat.h>
#include <logging.h>
+#include <tinyformat.h>
#include <util/strencodings.h>
#include <util/threadnames.h>
#include <map>
#include <set>
#include <system_error>
+#include <thread>
+#include <unordered_map>
+#include <utility>
+#include <vector>
#ifdef DEBUG_LOCKCONTENTION
#if !defined(HAVE_THREAD_LOCAL)
@@ -73,35 +77,35 @@ private:
int sourceLine;
};
-typedef std::vector<std::pair<void*, CLockLocation> > LockStack;
-typedef std::map<std::pair<void*, void*>, LockStack> LockOrders;
-typedef std::set<std::pair<void*, void*> > InvLockOrders;
+using LockStackItem = std::pair<void*, CLockLocation>;
+using LockStack = std::vector<LockStackItem>;
+using LockStacks = std::unordered_map<std::thread::id, LockStack>;
-struct LockData {
- // Very ugly hack: as the global constructs and destructors run single
- // threaded, we use this boolean to know whether LockData still exists,
- // as DeleteLock can get called by global RecursiveMutex destructors
- // after LockData disappears.
- bool available;
- LockData() : available(true) {}
- ~LockData() { available = false; }
+using LockPair = std::pair<void*, void*>;
+using LockOrders = std::map<LockPair, LockStack>;
+using InvLockOrders = std::set<LockPair>;
+struct LockData {
+ LockStacks m_lock_stacks;
LockOrders lockorders;
InvLockOrders invlockorders;
std::mutex dd_mutex;
};
+
LockData& GetLockData() {
- static LockData lockdata;
- return lockdata;
+ // This approach guarantees that the object is not destroyed until after its last use.
+ // The operating system automatically reclaims all the memory in a program's heap when that program exits.
+ // Since the ~LockData() destructor is never called, the LockData class and all
+ // its subclasses must have implicitly-defined destructors.
+ static LockData& lock_data = *new LockData();
+ return lock_data;
}
-static thread_local LockStack g_lockstack;
-
-static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2)
+static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2)
{
LogPrintf("POTENTIAL DEADLOCK DETECTED\n");
LogPrintf("Previous lock order was:\n");
- for (const std::pair<void*, CLockLocation> & i : s2) {
+ for (const LockStackItem& i : s2) {
if (i.first == mismatch.first) {
LogPrintf(" (1)"); /* Continued */
}
@@ -111,7 +115,7 @@ static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch,
LogPrintf(" %s\n", i.second.ToString());
}
LogPrintf("Current lock order is:\n");
- for (const std::pair<void*, CLockLocation> & i : s1) {
+ for (const LockStackItem& i : s1) {
if (i.first == mismatch.first) {
LogPrintf(" (1)"); /* Continued */
}
@@ -132,18 +136,18 @@ static void push_lock(void* c, const CLockLocation& locklocation)
LockData& lockdata = GetLockData();
std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
- g_lockstack.push_back(std::make_pair(c, locklocation));
-
- for (const std::pair<void*, CLockLocation>& i : g_lockstack) {
+ LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ lock_stack.emplace_back(c, locklocation);
+ for (const LockStackItem& i : lock_stack) {
if (i.first == c)
break;
- std::pair<void*, void*> p1 = std::make_pair(i.first, c);
+ const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, g_lockstack);
+ lockdata.lockorders.emplace(p1, lock_stack);
- std::pair<void*, void*> p2 = std::make_pair(c, i.first);
+ const LockPair p2 = std::make_pair(c, i.first);
lockdata.invlockorders.insert(p2);
if (lockdata.lockorders.count(p2))
potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
@@ -152,7 +156,14 @@ static void push_lock(void* c, const CLockLocation& locklocation)
static void pop_lock()
{
- g_lockstack.pop_back();
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ lock_stack.pop_back();
+ if (lock_stack.empty()) {
+ lockdata.m_lock_stacks.erase(std::this_thread::get_id());
+ }
}
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry)
@@ -162,11 +173,17 @@ void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs
void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
{
- if (!g_lockstack.empty()) {
- const auto& lastlock = g_lockstack.back();
- if (lastlock.first == cs) {
- lockname = lastlock.second.Name();
- return;
+ {
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ if (!lock_stack.empty()) {
+ const auto& lastlock = lock_stack.back();
+ if (lastlock.first == cs) {
+ lockname = lastlock.second.Name();
+ return;
+ }
}
}
throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
@@ -179,49 +196,57 @@ void LeaveCritical()
std::string LocksHeld()
{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
std::string result;
- for (const std::pair<void*, CLockLocation>& i : g_lockstack)
+ for (const LockStackItem& i : lock_stack)
result += i.second.ToString() + std::string("\n");
return result;
}
+static bool LockHeld(void* mutex)
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ for (const LockStackItem& i : lock_stack) {
+ if (i.first == mutex) return true;
+ }
+
+ return false;
+}
+
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs)
{
- for (const std::pair<void*, CLockLocation>& i : g_lockstack)
- if (i.first == cs)
- return;
+ if (LockHeld(cs)) return;
tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
abort();
}
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs)
{
- for (const std::pair<void*, CLockLocation>& i : g_lockstack) {
- if (i.first == cs) {
- tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
- abort();
- }
- }
+ if (!LockHeld(cs)) return;
+ tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
+ abort();
}
void DeleteLock(void* cs)
{
LockData& lockdata = GetLockData();
- if (!lockdata.available) {
- // We're already shutting down.
- return;
- }
std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
- std::pair<void*, void*> item = std::make_pair(cs, nullptr);
+ const LockPair item = std::make_pair(cs, nullptr);
LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
while (it != lockdata.lockorders.end() && it->first.first == cs) {
- std::pair<void*, void*> invitem = std::make_pair(it->first.second, it->first.first);
+ const LockPair invitem = std::make_pair(it->first.second, it->first.first);
lockdata.invlockorders.erase(invitem);
lockdata.lockorders.erase(it++);
}
InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
while (invit != lockdata.invlockorders.end() && invit->first == cs) {
- std::pair<void*, void*> invinvitem = std::make_pair(invit->second, invit->first);
+ const LockPair invinvitem = std::make_pair(invit->second, invit->first);
lockdata.lockorders.erase(invinvitem);
lockdata.invlockorders.erase(invit++);
}
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index e5043f6816..7dff2e6e86 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -94,7 +94,7 @@ bool BuildChainTestingSetup::BuildChain(const CBlockIndex* pindex,
CBlockHeader header = block->GetBlockHeader();
BlockValidationState state;
- if (!ProcessNewBlockHeaders({header}, state, Params(), &pindex)) {
+ if (!EnsureChainman(m_node).ProcessNewBlockHeaders({header}, state, Params(), &pindex)) {
return false;
}
}
@@ -171,7 +171,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
uint256 chainA_last_header = last_header;
for (size_t i = 0; i < 2; i++) {
const auto& block = chainA[i];
- BOOST_REQUIRE(ProcessNewBlock(Params(), block, true, nullptr));
+ BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr));
}
for (size_t i = 0; i < 2; i++) {
const auto& block = chainA[i];
@@ -189,7 +189,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
uint256 chainB_last_header = last_header;
for (size_t i = 0; i < 3; i++) {
const auto& block = chainB[i];
- BOOST_REQUIRE(ProcessNewBlock(Params(), block, true, nullptr));
+ BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr));
}
for (size_t i = 0; i < 3; i++) {
const auto& block = chainB[i];
@@ -220,7 +220,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
// Reorg back to chain A.
for (size_t i = 2; i < 4; i++) {
const auto& block = chainA[i];
- BOOST_REQUIRE(ProcessNewBlock(Params(), block, true, nullptr));
+ BOOST_REQUIRE(EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, nullptr));
}
// Check that chain A and B blocks can be retrieved.
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 75b38670c9..348b170536 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -78,7 +78,7 @@ BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
{
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -148,7 +148,7 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidat
BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
auto connman = MakeUnique<CConnmanTest>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
const Consensus::Params& consensusParams = Params().GetConsensus();
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
@@ -221,7 +221,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -276,7 +276,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
banman->ClearBanned();
gArgs.ForceSetArg("-banscore", "111"); // because 11 is my favorite number
@@ -323,7 +323,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp
new file mode 100644
index 0000000000..52dd62a145
--- /dev/null
+++ b/src/test/fuzz/coins_view.cpp
@@ -0,0 +1,294 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <amount.h>
+#include <chainparams.h>
+#include <chainparamsbase.h>
+#include <coins.h>
+#include <consensus/tx_verify.h>
+#include <consensus/validation.h>
+#include <key.h>
+#include <node/coinstats.h>
+#include <policy/policy.h>
+#include <primitives/transaction.h>
+#include <pubkey.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <validation.h>
+
+#include <cstdint>
+#include <limits>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace {
+const Coin EMPTY_COIN{};
+
+bool operator==(const Coin& a, const Coin& b)
+{
+ if (a.IsSpent() && b.IsSpent()) return true;
+ return a.fCoinBase == b.fCoinBase && a.nHeight == b.nHeight && a.out == b.out;
+}
+} // namespace
+
+void initialize()
+{
+ static const ECCVerifyHandle ecc_verify_handle;
+ ECC_Start();
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
+ CCoinsView backend_coins_view;
+ CCoinsViewCache coins_view_cache{&backend_coins_view};
+ COutPoint random_out_point;
+ Coin random_coin;
+ CMutableTransaction random_mutable_transaction;
+ while (fuzzed_data_provider.ConsumeBool()) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 9)) {
+ case 0: {
+ if (random_coin.IsSpent()) {
+ break;
+ }
+ Coin coin = random_coin;
+ bool expected_code_path = false;
+ const bool possible_overwrite = fuzzed_data_provider.ConsumeBool();
+ try {
+ coins_view_cache.AddCoin(random_out_point, std::move(coin), possible_overwrite);
+ expected_code_path = true;
+ } catch (const std::logic_error& e) {
+ if (e.what() == std::string{"Attempted to overwrite an unspent coin (when possible_overwrite is false)"}) {
+ assert(!possible_overwrite);
+ expected_code_path = true;
+ }
+ }
+ assert(expected_code_path);
+ break;
+ }
+ case 1: {
+ (void)coins_view_cache.Flush();
+ break;
+ }
+ case 2: {
+ coins_view_cache.SetBestBlock(ConsumeUInt256(fuzzed_data_provider));
+ break;
+ }
+ case 3: {
+ Coin move_to;
+ (void)coins_view_cache.SpendCoin(random_out_point, fuzzed_data_provider.ConsumeBool() ? &move_to : nullptr);
+ break;
+ }
+ case 4: {
+ coins_view_cache.Uncache(random_out_point);
+ break;
+ }
+ case 5: {
+ if (fuzzed_data_provider.ConsumeBool()) {
+ backend_coins_view = CCoinsView{};
+ }
+ coins_view_cache.SetBackend(backend_coins_view);
+ break;
+ }
+ case 6: {
+ const std::optional<COutPoint> opt_out_point = ConsumeDeserializable<COutPoint>(fuzzed_data_provider);
+ if (!opt_out_point) {
+ break;
+ }
+ random_out_point = *opt_out_point;
+ break;
+ }
+ case 7: {
+ const std::optional<Coin> opt_coin = ConsumeDeserializable<Coin>(fuzzed_data_provider);
+ if (!opt_coin) {
+ break;
+ }
+ random_coin = *opt_coin;
+ break;
+ }
+ case 8: {
+ const std::optional<CMutableTransaction> opt_mutable_transaction = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider);
+ if (!opt_mutable_transaction) {
+ break;
+ }
+ random_mutable_transaction = *opt_mutable_transaction;
+ break;
+ }
+ case 9: {
+ CCoinsMap coins_map;
+ while (fuzzed_data_provider.ConsumeBool()) {
+ CCoinsCacheEntry coins_cache_entry;
+ coins_cache_entry.flags = fuzzed_data_provider.ConsumeIntegral<unsigned char>();
+ if (fuzzed_data_provider.ConsumeBool()) {
+ coins_cache_entry.coin = random_coin;
+ } else {
+ const std::optional<Coin> opt_coin = ConsumeDeserializable<Coin>(fuzzed_data_provider);
+ if (!opt_coin) {
+ break;
+ }
+ coins_cache_entry.coin = *opt_coin;
+ }
+ coins_map.emplace(random_out_point, std::move(coins_cache_entry));
+ }
+ bool expected_code_path = false;
+ try {
+ coins_view_cache.BatchWrite(coins_map, fuzzed_data_provider.ConsumeBool() ? ConsumeUInt256(fuzzed_data_provider) : coins_view_cache.GetBestBlock());
+ expected_code_path = true;
+ } catch (const std::logic_error& e) {
+ if (e.what() == std::string{"FRESH flag misapplied to coin that exists in parent cache"}) {
+ expected_code_path = true;
+ }
+ }
+ assert(expected_code_path);
+ break;
+ }
+ }
+ }
+
+ {
+ const Coin& coin_using_access_coin = coins_view_cache.AccessCoin(random_out_point);
+ const bool exists_using_access_coin = !(coin_using_access_coin == EMPTY_COIN);
+ const bool exists_using_have_coin = coins_view_cache.HaveCoin(random_out_point);
+ const bool exists_using_have_coin_in_cache = coins_view_cache.HaveCoinInCache(random_out_point);
+ Coin coin_using_get_coin;
+ const bool exists_using_get_coin = coins_view_cache.GetCoin(random_out_point, coin_using_get_coin);
+ if (exists_using_get_coin) {
+ assert(coin_using_get_coin == coin_using_access_coin);
+ }
+ assert((exists_using_access_coin && exists_using_have_coin_in_cache && exists_using_have_coin && exists_using_get_coin) ||
+ (!exists_using_access_coin && !exists_using_have_coin_in_cache && !exists_using_have_coin && !exists_using_get_coin));
+ const bool exists_using_have_coin_in_backend = backend_coins_view.HaveCoin(random_out_point);
+ if (exists_using_have_coin_in_backend) {
+ assert(exists_using_have_coin);
+ }
+ Coin coin_using_backend_get_coin;
+ if (backend_coins_view.GetCoin(random_out_point, coin_using_backend_get_coin)) {
+ assert(exists_using_have_coin_in_backend);
+ assert(coin_using_get_coin == coin_using_backend_get_coin);
+ } else {
+ assert(!exists_using_have_coin_in_backend);
+ }
+ }
+
+ {
+ bool expected_code_path = false;
+ try {
+ (void)coins_view_cache.Cursor();
+ } catch (const std::logic_error&) {
+ expected_code_path = true;
+ }
+ assert(expected_code_path);
+ (void)coins_view_cache.DynamicMemoryUsage();
+ (void)coins_view_cache.EstimateSize();
+ (void)coins_view_cache.GetBestBlock();
+ (void)coins_view_cache.GetCacheSize();
+ (void)coins_view_cache.GetHeadBlocks();
+ (void)coins_view_cache.HaveInputs(CTransaction{random_mutable_transaction});
+ }
+
+ {
+ const CCoinsViewCursor* coins_view_cursor = backend_coins_view.Cursor();
+ assert(coins_view_cursor == nullptr);
+ (void)backend_coins_view.EstimateSize();
+ (void)backend_coins_view.GetBestBlock();
+ (void)backend_coins_view.GetHeadBlocks();
+ }
+
+ if (fuzzed_data_provider.ConsumeBool()) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 6)) {
+ case 0: {
+ const CTransaction transaction{random_mutable_transaction};
+ bool is_spent = false;
+ for (const CTxOut& tx_out : transaction.vout) {
+ if (Coin{tx_out, 0, transaction.IsCoinBase()}.IsSpent()) {
+ is_spent = true;
+ }
+ }
+ if (is_spent) {
+ // Avoid:
+ // coins.cpp:69: void CCoinsViewCache::AddCoin(const COutPoint &, Coin &&, bool): Assertion `!coin.IsSpent()' failed.
+ break;
+ }
+ bool expected_code_path = false;
+ const int height = fuzzed_data_provider.ConsumeIntegral<int>();
+ const bool possible_overwrite = fuzzed_data_provider.ConsumeBool();
+ try {
+ AddCoins(coins_view_cache, transaction, height, possible_overwrite);
+ expected_code_path = true;
+ } catch (const std::logic_error& e) {
+ if (e.what() == std::string{"Attempted to overwrite an unspent coin (when possible_overwrite is false)"}) {
+ assert(!possible_overwrite);
+ expected_code_path = true;
+ }
+ }
+ assert(expected_code_path);
+ break;
+ }
+ case 1: {
+ (void)AreInputsStandard(CTransaction{random_mutable_transaction}, coins_view_cache);
+ break;
+ }
+ case 2: {
+ TxValidationState state;
+ CAmount tx_fee_out;
+ const CTransaction transaction{random_mutable_transaction};
+ if (ContainsSpentInput(transaction, coins_view_cache)) {
+ // Avoid:
+ // consensus/tx_verify.cpp:171: bool Consensus::CheckTxInputs(const CTransaction &, TxValidationState &, const CCoinsViewCache &, int, CAmount &): Assertion `!coin.IsSpent()' failed.
+ break;
+ }
+ try {
+ (void)Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange<int>(0, std::numeric_limits<int>::max()), tx_fee_out);
+ assert(MoneyRange(tx_fee_out));
+ } catch (const std::runtime_error&) {
+ }
+ break;
+ }
+ case 3: {
+ const CTransaction transaction{random_mutable_transaction};
+ if (ContainsSpentInput(transaction, coins_view_cache)) {
+ // Avoid:
+ // consensus/tx_verify.cpp:130: unsigned int GetP2SHSigOpCount(const CTransaction &, const CCoinsViewCache &): Assertion `!coin.IsSpent()' failed.
+ break;
+ }
+ (void)GetP2SHSigOpCount(transaction, coins_view_cache);
+ break;
+ }
+ case 4: {
+ const CTransaction transaction{random_mutable_transaction};
+ if (ContainsSpentInput(transaction, coins_view_cache)) {
+ // Avoid:
+ // consensus/tx_verify.cpp:130: unsigned int GetP2SHSigOpCount(const CTransaction &, const CCoinsViewCache &): Assertion `!coin.IsSpent()' failed.
+ break;
+ }
+ const int flags = fuzzed_data_provider.ConsumeIntegral<int>();
+ if (!transaction.vin.empty() && (flags & SCRIPT_VERIFY_WITNESS) != 0 && (flags & SCRIPT_VERIFY_P2SH) == 0) {
+ // Avoid:
+ // script/interpreter.cpp:1705: size_t CountWitnessSigOps(const CScript &, const CScript &, const CScriptWitness *, unsigned int): Assertion `(flags & SCRIPT_VERIFY_P2SH) != 0' failed.
+ break;
+ }
+ (void)GetTransactionSigOpCost(transaction, coins_view_cache, flags);
+ break;
+ }
+ case 5: {
+ CCoinsStats stats;
+ bool expected_code_path = false;
+ try {
+ (void)GetUTXOStats(&coins_view_cache, stats);
+ } catch (const std::logic_error&) {
+ expected_code_path = true;
+ }
+ assert(expected_code_path);
+ break;
+ }
+ case 6: {
+ (void)IsWitnessStandard(CTransaction{random_mutable_transaction}, coins_view_cache);
+ break;
+ }
+ }
+ }
+}
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index 6e2188fe86..82e1d55c0b 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -19,8 +19,6 @@ static bool read_stdin(std::vector<uint8_t>& data)
ssize_t length = 0;
while ((length = read(STDIN_FILENO, buffer, 1024)) > 0) {
data.insert(data.end(), buffer, buffer + length);
-
- if (data.size() > (1 << 20)) return false;
}
return length == 0;
}
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index c03365199a..665a6224b4 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -29,7 +29,7 @@
#include <string>
#include <vector>
-bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc);
+bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, ChainstateManager& chainman, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc);
namespace {
@@ -74,7 +74,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
p2p_node.SetSendVersion(PROTOCOL_VERSION);
g_setup->m_node.peer_logic->InitializeNode(&p2p_node);
try {
- (void)ProcessMessage(&p2p_node, random_message_type, random_bytes_data_stream, GetTimeMillis(), Params(), *g_setup->m_node.mempool, g_setup->m_node.connman.get(), g_setup->m_node.banman.get(), std::atomic<bool>{false});
+ (void)ProcessMessage(&p2p_node, random_message_type, random_bytes_data_stream, GetTimeMillis(), Params(), *g_setup->m_node.chainman, *g_setup->m_node.mempool, g_setup->m_node.connman.get(), g_setup->m_node.banman.get(), std::atomic<bool>{false});
} catch (const std::ios_base::failure&) {
}
SyncWithValidationInterfaceQueue();
diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp
index 3c1f911f7e..50984b1aef 100644
--- a/src/test/fuzz/string.cpp
+++ b/src/test/fuzz/string.cpp
@@ -93,7 +93,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
{
CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
std::string s;
- LimitedString<10> limited_string = LIMITED_STRING(s, 10);
+ auto limited_string = LIMITED_STRING(s, 10);
data_stream << random_string_1;
try {
data_stream >> limited_string;
@@ -108,7 +108,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
{
CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
- const LimitedString<10> limited_string = LIMITED_STRING(random_string_1, 10);
+ const auto limited_string = LIMITED_STRING(random_string_1, 10);
data_stream << limited_string;
std::string deserialized_string;
data_stream >> deserialized_string;
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index f72d9380eb..9d0fb02128 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -8,6 +8,7 @@
#include <amount.h>
#include <arith_uint256.h>
#include <attributes.h>
+#include <coins.h>
#include <consensus/consensus.h>
#include <primitives/transaction.h>
#include <script/script.h>
@@ -149,4 +150,15 @@ NODISCARD bool AdditionOverflow(const T i, const T j) noexcept
return std::numeric_limits<T>::max() - i < j;
}
+NODISCARD inline bool ContainsSpentInput(const CTransaction& tx, const CCoinsViewCache& inputs) noexcept
+{
+ for (const CTxIn& tx_in : tx.vin) {
+ const Coin& coin = inputs.AccessCoin(tx_in.prevout);
+ if (coin.IsSpent()) {
+ return true;
+ }
+ }
+ return false;
+}
+
#endif // BITCOIN_TEST_FUZZ_UTIL_H
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 9f3ca87206..57eee94330 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -253,7 +253,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
pblock->nNonce = blockinfo[i].nonce;
}
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
- BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, nullptr));
+ BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlock(chainparams, shared_pblock, true, nullptr));
pblock->hashPrevBlock = pblock->GetHash();
}
diff --git a/src/test/ref_tests.cpp b/src/test/ref_tests.cpp
new file mode 100644
index 0000000000..0ec0799fbc
--- /dev/null
+++ b/src/test/ref_tests.cpp
@@ -0,0 +1,33 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <util/ref.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(ref_tests)
+
+BOOST_AUTO_TEST_CASE(ref_test)
+{
+ util::Ref ref;
+ BOOST_CHECK(!ref.Has<int>());
+ BOOST_CHECK_THROW(ref.Get<int>(), NonFatalCheckError);
+ int value = 5;
+ ref.Set(value);
+ BOOST_CHECK(ref.Has<int>());
+ BOOST_CHECK_EQUAL(ref.Get<int>(), 5);
+ ++ref.Get<int>();
+ BOOST_CHECK_EQUAL(ref.Get<int>(), 6);
+ BOOST_CHECK_EQUAL(value, 6);
+ ++value;
+ BOOST_CHECK_EQUAL(value, 7);
+ BOOST_CHECK_EQUAL(ref.Get<int>(), 7);
+ BOOST_CHECK(!ref.Has<bool>());
+ BOOST_CHECK_THROW(ref.Get<bool>(), NonFatalCheckError);
+ ref.Clear();
+ BOOST_CHECK(!ref.Has<int>());
+ BOOST_CHECK_THROW(ref.Get<int>(), NonFatalCheckError);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/rpc_tests.cpp b/src/test/rpc_tests.cpp
index d9c66f1c19..b54cbb3f00 100644
--- a/src/test/rpc_tests.cpp
+++ b/src/test/rpc_tests.cpp
@@ -10,6 +10,7 @@
#include <interfaces/chain.h>
#include <node/context.h>
#include <test/util/setup_common.h>
+#include <util/ref.h>
#include <util/time.h>
#include <boost/algorithm/string.hpp>
@@ -19,13 +20,20 @@
#include <rpc/blockchain.h>
-UniValue CallRPC(std::string args)
+class RPCTestingSetup : public TestingSetup
+{
+public:
+ UniValue CallRPC(std::string args);
+};
+
+UniValue RPCTestingSetup::CallRPC(std::string args)
{
std::vector<std::string> vArgs;
boost::split(vArgs, args, boost::is_any_of(" \t"));
std::string strMethod = vArgs[0];
vArgs.erase(vArgs.begin());
- JSONRPCRequest request;
+ util::Ref context{m_node};
+ JSONRPCRequest request(context);
request.strMethod = strMethod;
request.params = RPCConvertValues(strMethod, vArgs);
request.fHelp = false;
@@ -40,7 +48,7 @@ UniValue CallRPC(std::string args)
}
-BOOST_FIXTURE_TEST_SUITE(rpc_tests, TestingSetup)
+BOOST_FIXTURE_TEST_SUITE(rpc_tests, RPCTestingSetup)
BOOST_AUTO_TEST_CASE(rpc_rawparams)
{
diff --git a/src/test/util/mining.cpp b/src/test/util/mining.cpp
index 1df6844062..dac7f1a07b 100644
--- a/src/test/util/mining.cpp
+++ b/src/test/util/mining.cpp
@@ -31,7 +31,7 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey)
assert(block->nNonce);
}
- bool processed{ProcessNewBlock(Params(), block, true, nullptr)};
+ bool processed{EnsureChainman(node).ProcessNewBlock(Params(), block, true, nullptr)};
assert(processed);
return CTxIn{block->vtx[0]->GetHash(), 0};
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index bf0afc4171..3b7a7c8d12 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -123,7 +123,6 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
const CChainParams& chainparams = Params();
// Ideally we'd move all the RPC tests to the functional testing framework
// instead of unit tests, but for now we need these here.
- g_rpc_node = &m_node;
RegisterAllCoreRPCCommands(tableRPC);
m_node.scheduler = MakeUnique<CScheduler>();
@@ -131,11 +130,12 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
// We have to run a scheduler thread to prevent ActivateBestChain
// from blocking due to queue overrun.
threadGroup.create_thread([&]{ m_node.scheduler->serviceQueue(); });
- GetMainSignals().RegisterBackgroundSignalScheduler(*g_rpc_node->scheduler);
+ GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
pblocktree.reset(new CBlockTreeDB(1 << 20, true));
- g_chainman.InitializeChainstate();
+ m_node.chainman = &::g_chainman;
+ m_node.chainman->InitializeChainstate();
::ChainstateActive().InitCoinsDB(
/* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
assert(!::ChainstateActive().CanFlushToDisk());
@@ -161,7 +161,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
m_node.mempool->setSanityCheck(1.0);
m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
- m_node.peer_logic = MakeUnique<PeerLogicValidation>(m_node.connman.get(), m_node.banman.get(), *m_node.scheduler, *m_node.mempool);
+ m_node.peer_logic = MakeUnique<PeerLogicValidation>(m_node.connman.get(), m_node.banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
{
CConnman::Options options;
options.m_msgproc = m_node.peer_logic.get();
@@ -176,14 +176,14 @@ TestingSetup::~TestingSetup()
threadGroup.join_all();
GetMainSignals().FlushBackgroundCallbacks();
GetMainSignals().UnregisterBackgroundSignalScheduler();
- g_rpc_node = nullptr;
m_node.connman.reset();
m_node.banman.reset();
m_node.args = nullptr;
m_node.mempool = nullptr;
m_node.scheduler.reset();
UnloadBlockIndex();
- g_chainman.Reset();
+ m_node.chainman->Reset();
+ m_node.chainman = nullptr;
pblocktree.reset();
}
@@ -228,7 +228,7 @@ CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransa
while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce;
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
- ProcessNewBlock(chainparams, shared_pblock, true, nullptr);
+ EnsureChainman(m_node).ProcessNewBlock(chainparams, shared_pblock, true, nullptr);
CBlock result = block;
return result;
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index 899f054b83..45e0c5484e 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -163,10 +163,10 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
std::transform(blocks.begin(), blocks.end(), std::back_inserter(headers), [](std::shared_ptr<const CBlock> b) { return b->GetBlockHeader(); });
// Process all the headers so we understand the toplogy of the chain
- BOOST_CHECK(ProcessNewBlockHeaders(headers, state, Params()));
+ BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlockHeaders(headers, state, Params()));
// Connect the genesis block and drain any outstanding events
- BOOST_CHECK(ProcessNewBlock(Params(), std::make_shared<CBlock>(Params().GenesisBlock()), true, &ignored));
+ BOOST_CHECK(EnsureChainman(m_node).ProcessNewBlock(Params(), std::make_shared<CBlock>(Params().GenesisBlock()), true, &ignored));
SyncWithValidationInterfaceQueue();
// subscribe to events (this subscriber will validate event ordering)
@@ -183,18 +183,18 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
// will subscribe to events generated during block validation and assert on ordering invariance
std::vector<std::thread> threads;
for (int i = 0; i < 10; i++) {
- threads.emplace_back([&blocks]() {
+ threads.emplace_back([&]() {
bool ignored;
FastRandomContext insecure;
for (int i = 0; i < 1000; i++) {
auto block = blocks[insecure.randrange(blocks.size() - 1)];
- ProcessNewBlock(Params(), block, true, &ignored);
+ EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, &ignored);
}
// to make sure that eventually we process the full chain - do it here
for (auto block : blocks) {
if (block->vtx.size() == 1) {
- bool processed = ProcessNewBlock(Params(), block, true, &ignored);
+ bool processed = EnsureChainman(m_node).ProcessNewBlock(Params(), block, true, &ignored);
assert(processed);
}
}
@@ -232,8 +232,8 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
BOOST_AUTO_TEST_CASE(mempool_locks_reorg)
{
bool ignored;
- auto ProcessBlock = [&ignored](std::shared_ptr<const CBlock> block) -> bool {
- return ProcessNewBlock(Params(), block, /* fForceProcessing */ true, /* fNewBlock */ &ignored);
+ auto ProcessBlock = [&](std::shared_ptr<const CBlock> block) -> bool {
+ return EnsureChainman(m_node).ProcessNewBlock(Params(), block, /* fForceProcessing */ true, /* fNewBlock */ &ignored);
};
// Process all mined blocks
diff --git a/src/txmempool.h b/src/txmempool.h
index 4bee78b8d6..4568eb928d 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -704,7 +704,10 @@ public:
/** Adds a transaction to the unbroadcast set */
void AddUnbroadcastTx(const uint256& txid) {
LOCK(cs);
- m_unbroadcast_txids.insert(txid);
+ /** Sanity Check: the transaction should also be in the mempool */
+ if (exists(txid)) {
+ m_unbroadcast_txids.insert(txid);
+ }
}
/** Removes a transaction from the unbroadcast set */
@@ -716,6 +719,12 @@ public:
return m_unbroadcast_txids;
}
+ // Returns if a txid is in the unbroadcast set
+ bool IsUnbroadcastTx(const uint256& txid) const {
+ LOCK(cs);
+ return (m_unbroadcast_txids.count(txid) != 0);
+ }
+
private:
/** UpdateForDescendants is used by UpdateTransactionsFromBlock to update
* the descendants for a single transaction that has been added to the
diff --git a/src/util/check.h b/src/util/check.h
index d18887ae95..5c0f32cf51 100644
--- a/src/util/check.h
+++ b/src/util/check.h
@@ -5,6 +5,10 @@
#ifndef BITCOIN_UTIL_CHECK_H
#define BITCOIN_UTIL_CHECK_H
+#if defined(HAVE_CONFIG_H)
+#include <config/bitcoin-config.h>
+#endif
+
#include <tinyformat.h>
#include <stdexcept>
diff --git a/src/util/ref.h b/src/util/ref.h
new file mode 100644
index 0000000000..9685ea9fec
--- /dev/null
+++ b/src/util/ref.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_REF_H
+#define BITCOIN_UTIL_REF_H
+
+#include <util/check.h>
+
+#include <typeindex>
+
+namespace util {
+
+/**
+ * Type-safe dynamic reference.
+ *
+ * This implements a small subset of the functionality in C++17's std::any
+ * class, and can be dropped when the project updates to C++17
+ * (https://github.com/bitcoin/bitcoin/issues/16684)
+ */
+class Ref
+{
+public:
+ Ref() = default;
+ template<typename T> Ref(T& value) { Set(value); }
+ template<typename T> T& Get() const { CHECK_NONFATAL(Has<T>()); return *static_cast<T*>(m_value); }
+ template<typename T> void Set(T& value) { m_value = &value; m_type = std::type_index(typeid(T)); }
+ template<typename T> bool Has() const { return m_value && m_type == std::type_index(typeid(T)); }
+ void Clear() { m_value = nullptr; m_type = std::type_index(typeid(void)); }
+
+private:
+ void* m_value = nullptr;
+ std::type_index m_type = std::type_index(typeid(void));
+};
+
+} // namespace util
+
+#endif // BITCOIN_UTIL_REF_H
diff --git a/src/validation.cpp b/src/validation.cpp
index a9dfa5c171..dbdf5028fd 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -196,8 +196,8 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc
std::unique_ptr<CBlockTreeDB> pblocktree;
// See definition for documentation
-static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
-static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
+static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight);
+static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
@@ -2282,11 +2282,11 @@ bool CChainState::FlushStateToDisk(
if (nManualPruneHeight > 0) {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
- FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
+ FindFilesToPruneManual(g_chainman, setFilesToPrune, nManualPruneHeight);
} else {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
- FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
+ FindFilesToPrune(g_chainman, setFilesToPrune, chainparams.PruneAfterHeight());
fCheckForPruning = false;
}
if (!setFilesToPrune.empty()) {
@@ -3691,13 +3691,14 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
}
// Exposed wrapper for AcceptBlockHeader
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
+bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
{
+ AssertLockNotHeld(cs_main);
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
- bool accepted = g_chainman.m_blockman.AcceptBlockHeader(
+ bool accepted = m_blockman.AcceptBlockHeader(
header, state, chainparams, &pindex);
::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus());
@@ -3819,7 +3820,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
return true;
}
-bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
+bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
{
AssertLockNotHeld(cs_main);
@@ -3895,12 +3896,12 @@ uint64_t CalculateCurrentUsage()
return retval;
}
-/* Prune a block file (modify associated database entries)*/
-void PruneOneBlockFile(const int fileNumber)
+void ChainstateManager::PruneOneBlockFile(const int fileNumber)
{
+ AssertLockHeld(cs_main);
LOCK(cs_LastBlockFile);
- for (const auto& entry : g_chainman.BlockIndex()) {
+ for (const auto& entry : m_blockman.m_block_index) {
CBlockIndex* pindex = entry.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
@@ -3914,12 +3915,12 @@ void PruneOneBlockFile(const int fileNumber)
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
// m_blocks_unlinked or setBlockIndexCandidates.
- auto range = g_chainman.m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
+ auto range = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
range.first++;
if (_it->second == pindex) {
- g_chainman.m_blockman.m_blocks_unlinked.erase(_it);
+ m_blockman.m_blocks_unlinked.erase(_it);
}
}
}
@@ -3941,7 +3942,7 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
}
/* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
-static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight)
+static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight)
{
assert(fPruneMode && nManualPruneHeight > 0);
@@ -3955,7 +3956,7 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
- PruneOneBlockFile(fileNumber);
+ chainman.PruneOneBlockFile(fileNumber);
setFilesToPrune.insert(fileNumber);
count++;
}
@@ -3988,7 +3989,7 @@ void PruneBlockFilesManual(int nManualPruneHeight)
*
* @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
*/
-static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
+static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
@@ -4030,7 +4031,7 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
- PruneOneBlockFile(fileNumber);
+ chainman.PruneOneBlockFile(fileNumber);
// Queue up the files for removal
setFilesToPrune.insert(fileNumber);
nCurrentUsage -= nBytesToPrune;
@@ -4154,9 +4155,9 @@ void BlockManager::Unload() {
m_block_index.clear();
}
-bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- if (!g_chainman.m_blockman.LoadBlockIndex(
+ if (!chainman.m_blockman.LoadBlockIndex(
chainparams.GetConsensus(), *pblocktree,
::ChainstateActive().setBlockIndexCandidates)) {
return false;
@@ -4182,8 +4183,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
std::set<int> setBlkDataFiles;
- for (const std::pair<const uint256, CBlockIndex*>& item : g_chainman.BlockIndex())
- {
+ for (const std::pair<const uint256, CBlockIndex*>& item : chainman.BlockIndex()) {
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
@@ -4600,14 +4600,15 @@ void UnloadBlockIndex()
fHavePruned = false;
}
-bool LoadBlockIndex(const CChainParams& chainparams)
+bool ChainstateManager::LoadBlockIndex(const CChainParams& chainparams)
{
+ AssertLockHeld(cs_main);
// Load block index from databases
bool needs_init = fReindex;
if (!fReindex) {
- bool ret = LoadBlockIndexDB(chainparams);
+ bool ret = LoadBlockIndexDB(*this, chainparams);
if (!ret) return false;
- needs_init = g_chainman.m_blockman.m_block_index.empty();
+ needs_init = m_blockman.m_block_index.empty();
}
if (needs_init) {
diff --git a/src/validation.h b/src/validation.h
index cbab65e79e..8112e38704 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -43,6 +43,7 @@ class CConnman;
class CScriptCheck;
class CBlockPolicyEstimator;
class CTxMemPool;
+class ChainstateManager;
class TxValidationState;
struct ChainTxData;
@@ -149,41 +150,6 @@ extern bool fPruneMode;
/** Number of MiB of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
-/**
- * Process an incoming block. This only returns after the best known valid
- * block is made active. Note that it does not, however, guarantee that the
- * specific block passed to it has been checked for validity!
- *
- * If you want to *possibly* get feedback on whether pblock is valid, you must
- * install a CValidationInterface (see validationinterface.h) - this will have
- * its BlockChecked method called whenever *any* block completes validation.
- *
- * Note that we guarantee that either the proof-of-work is valid on pblock, or
- * (and possibly also) BlockChecked will have been called.
- *
- * May not be called in a
- * validationinterface callback.
- *
- * @param[in] pblock The block we want to process.
- * @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
- * @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call
- * @returns If the block was processed, independently of block validity
- */
-bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock) LOCKS_EXCLUDED(cs_main);
-
-/**
- * Process incoming block headers.
- *
- * May not be called in a
- * validationinterface callback.
- *
- * @param[in] block The block headers themselves
- * @param[out] state This may be set to an Error state if any error occurred processing them
- * @param[in] chainparams The params for the chain we want to connect to
- * @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
- */
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex = nullptr) LOCKS_EXCLUDED(cs_main);
-
/** Open a block file (blk?????.dat) */
FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false);
/** Translation to a filesystem path */
@@ -192,9 +158,6 @@ fs::path GetBlockPosFilename(const FlatFilePos &pos);
void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp = nullptr);
/** Ensures we have a genesis block in the block tree, possibly writing one to disk. */
bool LoadGenesisBlock(const CChainParams& chainparams);
-/** Load the block tree and coins database from disk,
- * initializing state if we're running with -reindex. */
-bool LoadBlockIndex(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Unload database information */
void UnloadBlockIndex();
/** Run an instance of the script checking thread */
@@ -217,11 +180,6 @@ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pin
uint64_t CalculateCurrentUsage();
/**
- * Mark one block file as pruned.
- */
-void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
-/**
* Actually unlink the specified files
*/
void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
@@ -493,9 +451,6 @@ enum class CoinsCacheSizeState
OK = 0
};
-// Defined below, but needed for `friend` usage in CChainState.
-class ChainstateManager;
-
/**
* CChainState stores and provides an API to update our local knowledge of the
* current best chain.
@@ -870,6 +825,47 @@ public:
CChain& ValidatedChain() const { return ValidatedChainstate().m_chain; }
CBlockIndex* ValidatedTip() const { return ValidatedChain().Tip(); }
+ /**
+ * Process an incoming block. This only returns after the best known valid
+ * block is made active. Note that it does not, however, guarantee that the
+ * specific block passed to it has been checked for validity!
+ *
+ * If you want to *possibly* get feedback on whether pblock is valid, you must
+ * install a CValidationInterface (see validationinterface.h) - this will have
+ * its BlockChecked method called whenever *any* block completes validation.
+ *
+ * Note that we guarantee that either the proof-of-work is valid on pblock, or
+ * (and possibly also) BlockChecked will have been called.
+ *
+ * May not be called in a
+ * validationinterface callback.
+ *
+ * @param[in] pblock The block we want to process.
+ * @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
+ * @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call
+ * @returns If the block was processed, independently of block validity
+ */
+ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock) LOCKS_EXCLUDED(cs_main);
+
+ /**
+ * Process incoming block headers.
+ *
+ * May not be called in a
+ * validationinterface callback.
+ *
+ * @param[in] block The block headers themselves
+ * @param[out] state This may be set to an Error state if any error occurred processing them
+ * @param[in] chainparams The params for the chain we want to connect to
+ * @param[out] ppindex If set, the pointer will be set to point to the last new block index object for the given headers
+ */
+ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& block, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex = nullptr) LOCKS_EXCLUDED(cs_main);
+
+ //! Mark one block file as pruned (modify associated database entries)
+ void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ //! Load the block tree and coins database from disk, initializing state if we're running with -reindex
+ bool LoadBlockIndex(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
//! Unload block index and chain data before shutdown.
void Unload() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
@@ -877,6 +873,7 @@ public:
void Reset();
};
+/** DEPRECATED! Please use node.chainman instead. May only be used in validation.cpp internally */
extern ChainstateManager g_chainman GUARDED_BY(::cs_main);
/** @returns the most-work valid chainstate. */
diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h
index f59c63260e..f2df786e2e 100644
--- a/src/wallet/crypter.h
+++ b/src/wallet/crypter.h
@@ -43,15 +43,9 @@ public:
//! such as the various parameters to scrypt
std::vector<unsigned char> vchOtherDerivationParameters;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(vchCryptedKey);
- READWRITE(vchSalt);
- READWRITE(nDerivationMethod);
- READWRITE(nDeriveIterations);
- READWRITE(vchOtherDerivationParameters);
+ SERIALIZE_METHODS(CMasterKey, obj)
+ {
+ READWRITE(obj.vchCryptedKey, obj.vchSalt, obj.nDerivationMethod, obj.nDeriveIterations, obj.vchOtherDerivationParameters);
}
CMasterKey()
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 2a57248705..2a9ac189ea 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2726,6 +2726,7 @@ static UniValue createwallet(const JSONRPCRequest& request)
}
if (!request.params[5].isNull() && request.params[5].get_bool()) {
flags |= WALLET_FLAG_DESCRIPTORS;
+ warnings.emplace_back(Untranslated("Wallet is an experimental descriptor wallet"));
}
bilingual_str error;
@@ -3979,10 +3980,6 @@ UniValue sethdseed(const JSONRPCRequest& request)
LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet, true);
- if (pwallet->chain().isInitialBlockDownload()) {
- throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Cannot set a new HD seed while still in Initial Block Download");
- }
-
if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
throw JSONRPCError(RPC_WALLET_ERROR, "Cannot set a HD seed to a wallet with private keys disabled");
}
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index e4be5045e1..8a2a798644 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -12,6 +12,9 @@
#include <util/translation.h>
#include <wallet/scriptpubkeyman.h>
+//! Value for the first BIP 32 hardened derivation. Can be used as a bit mask and as a value. See BIP 32 for more details.
+const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
+
bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error)
{
LOCK(cs_KeyStore);
@@ -220,6 +223,7 @@ bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key
bool keyPass = mapCryptedKeys.empty(); // Always pass when there are no encrypted keys
bool keyFail = false;
CryptedKeyMap::const_iterator mi = mapCryptedKeys.begin();
+ WalletBatch batch(m_storage.GetDatabase());
for (; mi != mapCryptedKeys.end(); ++mi)
{
const CPubKey &vchPubKey = (*mi).second.first;
@@ -233,6 +237,10 @@ bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key
keyPass = true;
if (fDecryptionThoroughlyChecked)
break;
+ else {
+ // Rewrite these encrypted keys with checksums
+ batch.WriteCryptedKey(vchPubKey, vchCryptedSecret, mapKeyMetadata[vchPubKey.GetID()]);
+ }
}
if (keyPass && keyFail)
{
@@ -290,6 +298,43 @@ bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool i
return true;
}
+bool LegacyScriptPubKeyMan::TopUpInactiveHDChain(const CKeyID seed_id, int64_t index, bool internal)
+{
+ LOCK(cs_KeyStore);
+
+ if (m_storage.IsLocked()) return false;
+
+ auto it = m_inactive_hd_chains.find(seed_id);
+ if (it == m_inactive_hd_chains.end()) {
+ return false;
+ }
+
+ CHDChain& chain = it->second;
+
+ // Top up key pool
+ int64_t target_size = std::max(gArgs.GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), (int64_t) 1);
+
+ // "size" of the keypools. Not really the size, actually the difference between index and the chain counter
+ // Since chain counter is 1 based and index is 0 based, one of them needs to be offset by 1.
+ int64_t kp_size = (internal ? chain.nInternalChainCounter : chain.nExternalChainCounter) - (index + 1);
+
+ // make sure the keypool fits the user-selected target (-keypool)
+ int64_t missing = std::max(target_size - kp_size, (int64_t) 0);
+
+ if (missing > 0) {
+ WalletBatch batch(m_storage.GetDatabase());
+ for (int64_t i = missing; i > 0; --i) {
+ GenerateNewKey(batch, chain, internal);
+ }
+ if (internal) {
+ WalletLogPrintf("inactive seed with id %s added %d internal keys\n", HexStr(seed_id), missing);
+ } else {
+ WalletLogPrintf("inactive seed with id %s added %d keys\n", HexStr(seed_id), missing);
+ }
+ }
+ return true;
+}
+
void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script)
{
LOCK(cs_KeyStore);
@@ -297,13 +342,28 @@ void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script)
for (const auto& keyid : GetAffectedKeys(script, *this)) {
std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid);
if (mi != m_pool_key_to_index.end()) {
- WalletLogPrintf("%s: Detected a used keypool key, mark all keypool key up to this key as used\n", __func__);
+ WalletLogPrintf("%s: Detected a used keypool key, mark all keypool keys up to this key as used\n", __func__);
MarkReserveKeysAsUsed(mi->second);
if (!TopUp()) {
WalletLogPrintf("%s: Topping up keypool failed (locked wallet)\n", __func__);
}
}
+
+ // Find the key's metadata and check if it's seed id (if it has one) is inactive, i.e. it is not the current m_hd_chain seed id.
+ // If so, TopUp the inactive hd chain
+ auto it = mapKeyMetadata.find(keyid);
+ if (it != mapKeyMetadata.end()){
+ CKeyMetadata meta = it->second;
+ if (!meta.hd_seed_id.IsNull() && meta.hd_seed_id != m_hd_chain.seed_id) {
+ bool internal = (meta.key_origin.path[1] & ~BIP32_HARDENED_KEY_LIMIT) != 0;
+ int64_t index = meta.key_origin.path[2] & ~BIP32_HARDENED_KEY_LIMIT;
+
+ if (!TopUpInactiveHDChain(meta.hd_seed_id, index, internal)) {
+ WalletLogPrintf("%s: Adding inactive seed keys failed\n", __func__);
+ }
+ }
+ }
}
}
@@ -357,7 +417,7 @@ bool LegacyScriptPubKeyMan::SetupGeneration(bool force)
bool LegacyScriptPubKeyMan::IsHDEnabled() const
{
- return !hdChain.seed_id.IsNull();
+ return !m_hd_chain.seed_id.IsNull();
}
bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) const
@@ -713,8 +773,13 @@ bool LegacyScriptPubKeyMan::AddKeyPubKeyInner(const CKey& key, const CPubKey &pu
return true;
}
-bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret)
+bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid)
{
+ // Set fDecryptionThoroughlyChecked to false when the checksum is invalid
+ if (!checksum_valid) {
+ fDecryptionThoroughlyChecked = false;
+ }
+
return AddCryptedKeyInner(vchPubKey, vchCryptedSecret);
}
@@ -838,10 +903,27 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim
void LegacyScriptPubKeyMan::SetHDChain(const CHDChain& chain, bool memonly)
{
LOCK(cs_KeyStore);
- if (!memonly && !WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain))
- throw std::runtime_error(std::string(__func__) + ": writing chain failed");
+ // memonly == true means we are loading the wallet file
+ // memonly == false means that the chain is actually being changed
+ if (!memonly) {
+ // Store the new chain
+ if (!WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain)) {
+ throw std::runtime_error(std::string(__func__) + ": writing chain failed");
+ }
+ // When there's an old chain, add it as an inactive chain as we are now rotating hd chains
+ if (!m_hd_chain.seed_id.IsNull()) {
+ AddInactiveHDChain(m_hd_chain);
+ }
+ }
+
+ m_hd_chain = chain;
+}
- hdChain = chain;
+void LegacyScriptPubKeyMan::AddInactiveHDChain(const CHDChain& chain)
+{
+ LOCK(cs_KeyStore);
+ assert(!chain.seed_id.IsNull());
+ m_inactive_hd_chains[chain.seed_id] = chain;
}
bool LegacyScriptPubKeyMan::HaveKey(const CKeyID &address) const
@@ -920,7 +1002,7 @@ bool LegacyScriptPubKeyMan::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyO
return GetWatchPubKey(address, vchPubKeyOut);
}
-CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, bool internal)
+CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, CHDChain& hd_chain, bool internal)
{
assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS));
assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET));
@@ -935,7 +1017,7 @@ CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, bool internal)
// use HD key derivation if HD was enabled during wallet creation and a seed is present
if (IsHDEnabled()) {
- DeriveNewChildKey(batch, metadata, secret, (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false));
+ DeriveNewChildKey(batch, metadata, secret, hd_chain, (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false));
} else {
secret.MakeNewKey(fCompressed);
}
@@ -957,9 +1039,7 @@ CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, bool internal)
return pubkey;
}
-const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
-
-void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata& metadata, CKey& secret, bool internal)
+void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata& metadata, CKey& secret, CHDChain& hd_chain, bool internal)
{
// for now we use a fixed keypath scheme of m/0'/0'/k
CKey seed; //seed (256bit)
@@ -969,7 +1049,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
CExtKey childKey; //key at m/0'/0'/<n>'
// try to get the seed
- if (!GetKey(hdChain.seed_id, seed))
+ if (!GetKey(hd_chain.seed_id, seed))
throw std::runtime_error(std::string(__func__) + ": seed not found");
masterKey.SetSeed(seed.begin(), seed.size());
@@ -988,30 +1068,30 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
// childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened child-index-range
// example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
if (internal) {
- chainChildKey.Derive(childKey, hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- metadata.hdKeypath = "m/0'/1'/" + ToString(hdChain.nInternalChainCounter) + "'";
+ chainChildKey.Derive(childKey, hd_chain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ metadata.hdKeypath = "m/0'/1'/" + ToString(hd_chain.nInternalChainCounter) + "'";
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(1 | BIP32_HARDENED_KEY_LIMIT);
- metadata.key_origin.path.push_back(hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- hdChain.nInternalChainCounter++;
+ metadata.key_origin.path.push_back(hd_chain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ hd_chain.nInternalChainCounter++;
}
else {
- chainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- metadata.hdKeypath = "m/0'/0'/" + ToString(hdChain.nExternalChainCounter) + "'";
+ chainChildKey.Derive(childKey, hd_chain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ metadata.hdKeypath = "m/0'/0'/" + ToString(hd_chain.nExternalChainCounter) + "'";
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
- metadata.key_origin.path.push_back(hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- hdChain.nExternalChainCounter++;
+ metadata.key_origin.path.push_back(hd_chain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ hd_chain.nExternalChainCounter++;
}
} while (HaveKey(childKey.key.GetPubKey().GetID()));
secret = childKey.key;
- metadata.hd_seed_id = hdChain.seed_id;
+ metadata.hd_seed_id = hd_chain.seed_id;
CKeyID master_id = masterKey.key.GetPubKey().GetID();
std::copy(master_id.begin(), master_id.begin() + 4, metadata.key_origin.fingerprint);
metadata.has_key_origin = true;
// update the chain model in the database
- if (!batch.WriteHDChain(hdChain))
- throw std::runtime_error(std::string(__func__) + ": Writing HD chain model failed");
+ if (hd_chain.seed_id == m_hd_chain.seed_id && !batch.WriteHDChain(hd_chain))
+ throw std::runtime_error(std::string(__func__) + ": writing HD chain model failed");
}
void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
@@ -1166,7 +1246,7 @@ bool LegacyScriptPubKeyMan::TopUp(unsigned int kpSize)
internal = true;
}
- CPubKey pubkey(GenerateNewKey(batch, internal));
+ CPubKey pubkey(GenerateNewKey(batch, m_hd_chain, internal));
AddKeypoolPubkeyWithDB(pubkey, internal, batch);
}
if (missingInternal + missingExternal > 0) {
@@ -1239,7 +1319,7 @@ bool LegacyScriptPubKeyMan::GetKeyFromPool(CPubKey& result, const OutputType typ
if (!ReserveKeyFromKeyPool(nIndex, keypool, internal) && !m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
if (m_storage.IsLocked()) return false;
WalletBatch batch(m_storage.GetDatabase());
- result = GenerateNewKey(batch, internal);
+ result = GenerateNewKey(batch, m_hd_chain, internal);
return true;
}
KeepDestination(nIndex, type);
@@ -1497,7 +1577,7 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const
return set_address;
}
-void LegacyScriptPubKeyMan::SetType(OutputType type, bool internal) {}
+void LegacyScriptPubKeyMan::SetInternal(bool internal) {}
bool DescriptorScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error)
{
@@ -1509,7 +1589,9 @@ bool DescriptorScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDest
{
LOCK(cs_desc_man);
assert(m_wallet_descriptor.descriptor->IsSingleType()); // This is a combo descriptor which should not be an active descriptor
- if (type != m_address_type) {
+ Optional<OutputType> desc_addr_type = m_wallet_descriptor.descriptor->GetOutputType();
+ assert(desc_addr_type);
+ if (type != *desc_addr_type) {
throw std::runtime_error(std::string(__func__) + ": Types are inconsistent");
}
@@ -1777,7 +1859,7 @@ bool DescriptorScriptPubKeyMan::AddDescriptorKeyWithDB(WalletBatch& batch, const
}
}
-bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key)
+bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, OutputType addr_type)
{
LOCK(cs_desc_man);
assert(m_storage.IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS));
@@ -1794,7 +1876,7 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_
// Build descriptor string
std::string desc_prefix;
std::string desc_suffix = "/*)";
- switch (m_address_type) {
+ switch (addr_type) {
case OutputType::LEGACY: {
desc_prefix = "pkh(" + xpub + "/44'";
break;
@@ -2076,9 +2158,8 @@ uint256 DescriptorScriptPubKeyMan::GetID() const
return id;
}
-void DescriptorScriptPubKeyMan::SetType(OutputType type, bool internal)
+void DescriptorScriptPubKeyMan::SetInternal(bool internal)
{
- this->m_address_type = type;
this->m_internal = internal;
}
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index 4c002edf2d..d62d30f339 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -18,6 +18,8 @@
#include <boost/signals2/signal.hpp>
+#include <unordered_map>
+
enum class OutputType;
struct bilingual_str;
@@ -110,40 +112,52 @@ public:
CKeyPool();
CKeyPool(const CPubKey& vchPubKeyIn, bool internalIn);
- ADD_SERIALIZE_METHODS;
+ template<typename Stream>
+ void Serialize(Stream& s) const
+ {
+ int nVersion = s.GetVersion();
+ if (!(s.GetType() & SER_GETHASH)) {
+ s << nVersion;
+ }
+ s << nTime << vchPubKey << fInternal << m_pre_split;
+ }
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
+ template<typename Stream>
+ void Unserialize(Stream& s)
+ {
int nVersion = s.GetVersion();
- if (!(s.GetType() & SER_GETHASH))
- READWRITE(nVersion);
- READWRITE(nTime);
- READWRITE(vchPubKey);
- if (ser_action.ForRead()) {
- try {
- READWRITE(fInternal);
- }
- catch (std::ios_base::failure&) {
- /* flag as external address if we can't read the internal boolean
- (this will be the case for any wallet before the HD chain split version) */
- fInternal = false;
- }
- try {
- READWRITE(m_pre_split);
- }
- catch (std::ios_base::failure&) {
- /* flag as postsplit address if we can't read the m_pre_split boolean
- (this will be the case for any wallet that upgrades to HD chain split)*/
- m_pre_split = false;
- }
+ if (!(s.GetType() & SER_GETHASH)) {
+ s >> nVersion;
+ }
+ s >> nTime >> vchPubKey;
+ try {
+ s >> fInternal;
+ } catch (std::ios_base::failure&) {
+ /* flag as external address if we can't read the internal boolean
+ (this will be the case for any wallet before the HD chain split version) */
+ fInternal = false;
}
- else {
- READWRITE(fInternal);
- READWRITE(m_pre_split);
+ try {
+ s >> m_pre_split;
+ } catch (std::ios_base::failure&) {
+ /* flag as postsplit address if we can't read the m_pre_split boolean
+ (this will be the case for any wallet that upgrades to HD chain split) */
+ m_pre_split = false;
}
}
};
+class KeyIDHasher
+{
+public:
+ KeyIDHasher() {}
+
+ size_t operator()(const CKeyID& id) const
+ {
+ return id.GetUint64(0);
+ }
+};
+
/*
* A class implementing ScriptPubKeyMan manages some (or all) scriptPubKeys used in a wallet.
* It contains the scripts and keys related to the scriptPubKeys it manages.
@@ -224,7 +238,7 @@ public:
virtual uint256 GetID() const { return uint256(); }
- virtual void SetType(OutputType type, bool internal) {}
+ virtual void SetInternal(bool internal) {}
/** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
template<typename... Params>
@@ -243,7 +257,7 @@ class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProv
{
private:
//! keeps track of whether Unlock has run a thorough check before
- bool fDecryptionThoroughlyChecked = false;
+ bool fDecryptionThoroughlyChecked = true;
using WatchOnlySet = std::set<CScript>;
using WatchKeyMap = std::map<CKeyID, CPubKey>;
@@ -288,10 +302,11 @@ private:
bool AddKeyOriginWithDB(WalletBatch& batch, const CPubKey& pubkey, const KeyOriginInfo& info);
/* the HD chain data model (external chain counters) */
- CHDChain hdChain;
+ CHDChain m_hd_chain;
+ std::unordered_map<CKeyID, CHDChain, KeyIDHasher> m_inactive_hd_chains;
/* HD derive new child key (on internal or external chain) */
- void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore);
std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore);
@@ -320,6 +335,18 @@ private:
*/
bool ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRequestedInternal);
+ /**
+ * Like TopUp() but adds keys for inactive HD chains.
+ * Ensures that there are at least -keypool number of keys derived after the given index.
+ *
+ * @param seed_id the CKeyID for the HD seed.
+ * @param index the index to start generating keys from
+ * @param internal whether the internal chain should be used. true for internal chain, false for external chain.
+ *
+ * @return true if seed was found and keys were derived. false if unable to derive seeds
+ */
+ bool TopUpInactiveHDChain(const CKeyID seed_id, int64_t index, bool internal);
+
public:
using ScriptPubKeyMan::ScriptPubKeyMan;
@@ -370,7 +397,7 @@ public:
uint256 GetID() const override;
- void SetType(OutputType type, bool internal) override;
+ void SetInternal(bool internal) override;
// Map from Key ID to key metadata.
std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore);
@@ -385,7 +412,7 @@ public:
//! Adds an encrypted key to the store, and saves it to disk.
bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
//! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet)
- bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
+ bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid);
void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Adds a CScript to the store
bool LoadCScript(const CScript& redeemScript);
@@ -393,11 +420,12 @@ public:
void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata);
void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata);
//! Generate a new key
- CPubKey GenerateNewKey(WalletBatch& batch, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ CPubKey GenerateNewKey(WalletBatch& batch, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
/* Set the HD chain model (chain child index counters) */
void SetHDChain(const CHDChain& chain, bool memonly);
- const CHDChain& GetHDChain() const { return hdChain; }
+ const CHDChain& GetHDChain() const { return m_hd_chain; }
+ void AddInactiveHDChain(const CHDChain& chain);
//! Adds a watch-only address to the store, without saving it to disk (used by LoadWallet)
bool LoadWatchOnly(const CScript &dest);
@@ -497,14 +525,11 @@ private:
PubKeyMap m_map_pubkeys GUARDED_BY(cs_desc_man);
int32_t m_max_cached_index = -1;
- OutputType m_address_type;
bool m_internal = false;
KeyMap m_map_keys GUARDED_BY(cs_desc_man);
CryptedKeyMap m_map_crypted_keys GUARDED_BY(cs_desc_man);
- bool SetCrypted();
-
//! keeps track of whether Unlock has run a thorough check before
bool m_decryption_thoroughly_checked = false;
@@ -524,9 +549,9 @@ public:
: ScriptPubKeyMan(storage),
m_wallet_descriptor(descriptor)
{}
- DescriptorScriptPubKeyMan(WalletStorage& storage, OutputType address_type, bool internal)
+ DescriptorScriptPubKeyMan(WalletStorage& storage, bool internal)
: ScriptPubKeyMan(storage),
- m_address_type(address_type), m_internal(internal)
+ m_internal(internal)
{}
mutable RecursiveMutex cs_desc_man;
@@ -551,7 +576,7 @@ public:
bool IsHDEnabled() const override;
//! Setup descriptors based on the given CExtkey
- bool SetupDescriptorGeneration(const CExtKey& master_key);
+ bool SetupDescriptorGeneration(const CExtKey& master_key, OutputType addr_type);
bool HavePrivateKeys() const override;
@@ -575,7 +600,7 @@ public:
uint256 GetID() const override;
- void SetType(OutputType type, bool internal) override;
+ void SetInternal(bool internal) override;
void SetCache(const DescriptorCache& cache);
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index d888b8f842..3654420eb2 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -15,6 +15,7 @@
#include <rpc/server.h>
#include <test/util/logging.h>
#include <test/util/setup_common.h>
+#include <util/ref.h>
#include <util/translation.h>
#include <validation.h>
#include <wallet/coincontrol.h>
@@ -117,7 +118,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Prune the older block file.
{
LOCK(cs_main);
- PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ EnsureChainman(m_node).PruneOneBlockFile(oldTip->GetBlockPos().nFile);
}
UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
@@ -143,7 +144,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Prune the remaining block file.
{
LOCK(cs_main);
- PruneOneBlockFile(newTip->GetBlockPos().nFile);
+ EnsureChainman(m_node).PruneOneBlockFile(newTip->GetBlockPos().nFile);
}
UnlinkPrunedFiles({newTip->GetBlockPos().nFile});
@@ -180,7 +181,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
// Prune the older block file.
{
LOCK(cs_main);
- PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ EnsureChainman(m_node).PruneOneBlockFile(oldTip->GetBlockPos().nFile);
}
UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
@@ -208,7 +209,8 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
key.pushKV("timestamp", newTip->GetBlockTimeMax() + TIMESTAMP_WINDOW + 1);
key.pushKV("internal", UniValue(true));
keys.push_back(key);
- JSONRPCRequest request;
+ util::Ref context;
+ JSONRPCRequest request(context);
request.params.setArray();
request.params.push_back(keys);
@@ -262,7 +264,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
AddWallet(wallet);
wallet->SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
}
- JSONRPCRequest request;
+ util::Ref context;
+ JSONRPCRequest request(context);
request.params.setArray();
request.params.push_back(backup_file);
@@ -277,7 +280,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
LOCK(wallet->cs_wallet);
wallet->SetupLegacyScriptPubKeyMan();
- JSONRPCRequest request;
+ util::Ref context;
+ JSONRPCRequest request(context);
request.params.setArray();
request.params.push_back(backup_file);
AddWallet(wallet);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 6826782073..7824563254 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1982,10 +1982,6 @@ void CWallet::ResendWalletTransactions()
nNextResend = GetTime() + (12 * 60 * 60) + GetRand(24 * 60 * 60);
if (fFirst) return;
- // Only do it if there's been a new block since last time
- if (m_best_block_time < nLastResend) return;
- nLastResend = GetTime();
-
int submitted_tx_count = 0;
{ // cs_wallet scope
@@ -4357,7 +4353,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans()
for (bool internal : {false, true}) {
for (OutputType t : OUTPUT_TYPES) {
- auto spk_manager = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, t, internal));
+ auto spk_manager = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, internal));
if (IsCrypted()) {
if (IsLocked()) {
throw std::runtime_error(std::string(__func__) + ": Wallet is locked, cannot setup new descriptors");
@@ -4366,7 +4362,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans()
throw std::runtime_error(std::string(__func__) + ": Could not encrypt new descriptors");
}
}
- spk_manager->SetupDescriptorGeneration(master_key);
+ spk_manager->SetupDescriptorGeneration(master_key, t);
uint256 id = spk_manager->GetID();
m_spk_managers[id] = std::move(spk_manager);
SetActiveScriptPubKeyMan(id, t, internal);
@@ -4379,7 +4375,7 @@ void CWallet::SetActiveScriptPubKeyMan(uint256 id, OutputType type, bool interna
WalletLogPrintf("Setting spkMan to active: id = %s, type = %d, internal = %d\n", id.ToString(), static_cast<int>(type), static_cast<int>(internal));
auto& spk_mans = internal ? m_internal_spk_managers : m_external_spk_managers;
auto spk_man = m_spk_managers.at(id).get();
- spk_man->SetType(type, internal);
+ spk_man->SetInternal(internal);
spk_mans[type] = spk_man;
if (!memonly) {
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index fc4cc9495c..29d04a0cba 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -641,7 +641,6 @@ private:
int nWalletMaxVersion GUARDED_BY(cs_wallet) = FEATURE_BASE;
int64_t nNextResend = 0;
- int64_t nLastResend = 0;
bool fBroadcastTransactions = false;
// Local time that the tip block was received. Used to schedule wallet rebroadcasts.
std::atomic<int64_t> m_best_block_time {0};
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 32fc002660..e7adbfea77 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -10,6 +10,7 @@
#include <protocol.h>
#include <serialize.h>
#include <sync.h>
+#include <util/bip32.h>
#include <util/system.h>
#include <util/time.h>
#include <wallet/wallet.h>
@@ -115,8 +116,19 @@ bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey,
return false;
}
- if (!WriteIC(std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey), vchCryptedSecret, false)) {
- return false;
+ // Compute a checksum of the encrypted key
+ uint256 checksum = Hash(vchCryptedSecret.begin(), vchCryptedSecret.end());
+
+ const auto key = std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey);
+ if (!WriteIC(key, std::make_pair(vchCryptedSecret, checksum), false)) {
+ // It may already exist, so try writing just the checksum
+ std::vector<unsigned char> val;
+ if (!m_batch.Read(key, val)) {
+ return false;
+ }
+ if (!WriteIC(key, std::make_pair(val, checksum), true)) {
+ return false;
+ }
}
EraseIC(std::make_pair(DBKeys::KEY, vchPubKey));
return true;
@@ -245,6 +257,7 @@ public:
std::map<uint256, DescriptorCache> m_descriptor_caches;
std::map<std::pair<uint256, CKeyID>, CKey> m_descriptor_keys;
std::map<std::pair<uint256, CKeyID>, std::pair<CPubKey, std::vector<unsigned char>>> m_descriptor_crypt_keys;
+ std::map<uint160, CHDChain> m_hd_chains;
CWalletScanState() {
}
@@ -397,9 +410,21 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
}
std::vector<unsigned char> vchPrivKey;
ssValue >> vchPrivKey;
+
+ // Get the checksum and check it
+ bool checksum_valid = false;
+ if (!ssValue.eof()) {
+ uint256 checksum;
+ ssValue >> checksum;
+ if ((checksum_valid = Hash(vchPrivKey.begin(), vchPrivKey.end()) != checksum)) {
+ strErr = "Error reading wallet database: Crypted key corrupt";
+ return false;
+ }
+ }
+
wss.nCKeys++;
- if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey))
+ if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey, checksum_valid))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCryptedKey failed";
return false;
@@ -412,6 +437,65 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
ssValue >> keyMeta;
wss.nKeyMeta++;
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta);
+
+ // Extract some CHDChain info from this metadata if it has any
+ if (keyMeta.nVersion >= CKeyMetadata::VERSION_WITH_HDDATA && !keyMeta.hd_seed_id.IsNull() && keyMeta.hdKeypath.size() > 0) {
+ // Get the path from the key origin or from the path string
+ // Not applicable when path is "s" as that indicates a seed
+ bool internal = false;
+ uint32_t index = 0;
+ if (keyMeta.hdKeypath != "s") {
+ std::vector<uint32_t> path;
+ if (keyMeta.has_key_origin) {
+ // We have a key origin, so pull it from its path vector
+ path = keyMeta.key_origin.path;
+ } else {
+ // No key origin, have to parse the string
+ if (!ParseHDKeypath(keyMeta.hdKeypath, path)) {
+ strErr = "Error reading wallet database: keymeta with invalid HD keypath";
+ return false;
+ }
+ }
+
+ // Extract the index and internal from the path
+ // Path string is m/0'/k'/i'
+ // Path vector is [0', k', i'] (but as ints OR'd with the hardened bit
+ // k == 0 for external, 1 for internal. i is the index
+ if (path.size() != 3) {
+ strErr = "Error reading wallet database: keymeta found with unexpected path";
+ return false;
+ }
+ if (path[0] != 0x80000000) {
+ strErr = strprintf("Unexpected path index of 0x%08x (expected 0x80000000) for the element at index 0", path[0]);
+ return false;
+ }
+ if (path[1] != 0x80000000 && path[1] != (1 | 0x80000000)) {
+ strErr = strprintf("Unexpected path index of 0x%08x (expected 0x80000000 or 0x80000001) for the element at index 1", path[1]);
+ return false;
+ }
+ if ((path[2] & 0x80000000) == 0) {
+ strErr = strprintf("Unexpected path index of 0x%08x (expected to be greater than or equal to 0x80000000)", path[2]);
+ return false;
+ }
+ internal = path[1] == (1 | 0x80000000);
+ index = path[2] & ~0x80000000;
+ }
+
+ // Insert a new CHDChain, or get the one that already exists
+ auto ins = wss.m_hd_chains.emplace(keyMeta.hd_seed_id, CHDChain());
+ CHDChain& chain = ins.first->second;
+ if (ins.second) {
+ // For new chains, we want to default to VERSION_HD_BASE until we see an internal
+ chain.nVersion = CHDChain::VERSION_HD_BASE;
+ chain.seed_id = keyMeta.hd_seed_id;
+ }
+ if (internal) {
+ chain.nVersion = CHDChain::VERSION_HD_CHAIN_SPLIT;
+ chain.nInternalChainCounter = std::max(chain.nInternalChainCounter, index);
+ } else {
+ chain.nExternalChainCounter = std::max(chain.nExternalChainCounter, index);
+ }
+ }
} else if (strType == DBKeys::WATCHMETA) {
CScript script;
ssKey >> script;
@@ -742,6 +826,20 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
result = DBErrors::CORRUPT;
}
+ // Set the inactive chain
+ if (wss.m_hd_chains.size() > 0) {
+ LegacyScriptPubKeyMan* legacy_spkm = pwallet->GetLegacyScriptPubKeyMan();
+ if (!legacy_spkm) {
+ pwallet->WalletLogPrintf("Inactive HD Chains found but no Legacy ScriptPubKeyMan\n");
+ return DBErrors::CORRUPT;
+ }
+ for (const auto& chain_pair : wss.m_hd_chains) {
+ if (chain_pair.first != pwallet->GetLegacyScriptPubKeyMan()->GetHDChain().seed_id) {
+ pwallet->GetLegacyScriptPubKeyMan()->AddInactiveHDChain(chain_pair.second);
+ }
+ }
+ }
+
return result;
}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index bcd1f9303d..b95ed24d12 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -98,15 +98,13 @@ public:
int nVersion;
CHDChain() { SetNull(); }
- ADD_SERIALIZE_METHODS;
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action)
+
+ SERIALIZE_METHODS(CHDChain, obj)
{
- READWRITE(this->nVersion);
- READWRITE(nExternalChainCounter);
- READWRITE(seed_id);
- if (this->nVersion >= VERSION_HD_CHAIN_SPLIT)
- READWRITE(nInternalChainCounter);
+ READWRITE(obj.nVersion, obj.nExternalChainCounter, obj.seed_id);
+ if (obj.nVersion >= VERSION_HD_CHAIN_SPLIT) {
+ READWRITE(obj.nInternalChainCounter);
+ }
}
void SetNull()
@@ -116,6 +114,11 @@ public:
nInternalChainCounter = 0;
seed_id.SetNull();
}
+
+ bool operator==(const CHDChain& chain) const
+ {
+ return seed_id == chain.seed_id;
+ }
};
class CKeyMetadata
@@ -142,21 +145,16 @@ public:
nCreateTime = nCreateTime_;
}
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(this->nVersion);
- READWRITE(nCreateTime);
- if (this->nVersion >= VERSION_WITH_HDDATA)
- {
- READWRITE(hdKeypath);
- READWRITE(hd_seed_id);
+ SERIALIZE_METHODS(CKeyMetadata, obj)
+ {
+ READWRITE(obj.nVersion, obj.nCreateTime);
+ if (obj.nVersion >= VERSION_WITH_HDDATA) {
+ READWRITE(obj.hdKeypath, obj.hd_seed_id);
}
- if (this->nVersion >= VERSION_WITH_KEY_ORIGIN)
+ if (obj.nVersion >= VERSION_WITH_KEY_ORIGIN)
{
- READWRITE(key_origin);
- READWRITE(has_key_origin);
+ READWRITE(obj.key_origin);
+ READWRITE(obj.has_key_origin);
}
}
diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h
index 599b1a9f5a..a4e4fda8a1 100644
--- a/src/wallet/walletutil.h
+++ b/src/wallet/walletutil.h
@@ -98,26 +98,22 @@ public:
int32_t next_index = 0; // Position of the next item to generate
DescriptorCache cache;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- if (ser_action.ForRead()) {
- std::string desc;
- std::string error;
- READWRITE(desc);
- FlatSigningProvider keys;
- descriptor = Parse(desc, keys, error, true);
- if (!descriptor) {
- throw std::ios_base::failure("Invalid descriptor: " + error);
- }
- } else {
- READWRITE(descriptor->ToString());
+ void DeserializeDescriptor(const std::string& str)
+ {
+ std::string error;
+ FlatSigningProvider keys;
+ descriptor = Parse(str, keys, error, true);
+ if (!descriptor) {
+ throw std::ios_base::failure("Invalid descriptor: " + error);
}
- READWRITE(creation_time);
- READWRITE(next_index);
- READWRITE(range_start);
- READWRITE(range_end);
+ }
+
+ SERIALIZE_METHODS(WalletDescriptor, obj)
+ {
+ std::string descriptor_str;
+ SER_WRITE(obj, descriptor_str = obj.descriptor->ToString());
+ READWRITE(descriptor_str, obj.creation_time, obj.next_index, obj.range_start, obj.range_end);
+ SER_READ(obj, obj.DeserializeDescriptor(descriptor_str));
}
WalletDescriptor() {}
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index 70dfe81d4e..5d782026dc 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -15,7 +15,7 @@ from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
-from test_framework.messages import CInv
+from test_framework.messages import CInv, MSG_BLOCK
from test_framework.mininode import (
P2PInterface,
mininode_lock,
@@ -198,7 +198,7 @@ class ExampleTest(BitcoinTestFramework):
getdata_request = msg_getdata()
for block in blocks:
- getdata_request.inv.append(CInv(2, block))
+ getdata_request.inv.append(CInv(MSG_BLOCK, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index d4a8f8a715..9579a1715d 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -13,7 +13,7 @@ if uploadtarget has been reached.
from collections import defaultdict
import time
-from test_framework.messages import CInv, msg_getdata
+from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
@@ -84,7 +84,7 @@ class MaxUploadTest(BitcoinTestFramework):
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
- getdata_request.inv.append(CInv(2, big_old_block))
+ getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
@@ -109,7 +109,7 @@ class MaxUploadTest(BitcoinTestFramework):
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
- getdata_request.inv = [CInv(2, big_new_block)]
+ getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
@@ -117,7 +117,7 @@ class MaxUploadTest(BitcoinTestFramework):
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
- getdata_request.inv = [CInv(2, big_old_block)]
+ getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
@@ -145,12 +145,12 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
- getdata_request.inv = [CInv(2, big_new_block)]
+ getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
- getdata_request.inv = [CInv(2, big_old_block)]
+ getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
index 1c94305220..7530e7daf6 100755
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -67,6 +67,7 @@ class TestBitcoinCli(BitcoinTestFramework):
if self.is_wallet_compiled():
self.log.info("Test -getinfo and bitcoin-cli getwalletinfo return expected wallet info")
assert_equal(cli_get_info['balance'], BALANCE)
+ assert 'balances' not in cli_get_info.keys()
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['unlocked_until'], wallet_info['unlocked_until'])
@@ -76,42 +77,60 @@ class TestBitcoinCli(BitcoinTestFramework):
# Setup to test -getinfo and -rpcwallet= with multiple wallets.
wallets = ['', 'Encrypted', 'secret']
- amounts = [Decimal('59.999928'), Decimal(9), Decimal(31)]
+ amounts = [BALANCE + Decimal('9.999928'), Decimal(9), Decimal(31)]
self.nodes[0].createwallet(wallet_name=wallets[1])
self.nodes[0].createwallet(wallet_name=wallets[2])
w1 = self.nodes[0].get_wallet_rpc(wallets[0])
w2 = self.nodes[0].get_wallet_rpc(wallets[1])
w3 = self.nodes[0].get_wallet_rpc(wallets[2])
w1.walletpassphrase(password, self.rpc_timeout)
+ w2.encryptwallet(password)
w1.sendtoaddress(w2.getnewaddress(), amounts[1])
w1.sendtoaddress(w3.getnewaddress(), amounts[2])
# Mine a block to confirm; adds a block reward (50 BTC) to the default wallet.
self.nodes[0].generate(1)
- self.log.info("Test -getinfo with multiple wallets loaded returns no balance")
- assert_equal(set(self.nodes[0].listwallets()), set(wallets))
- assert 'balance' not in self.nodes[0].cli('-getinfo').send_cli().keys()
-
self.log.info("Test -getinfo with multiple wallets and -rpcwallet returns specified wallet balance")
for i in range(len(wallets)):
- cli_get_info = self.nodes[0].cli('-getinfo').send_cli('-rpcwallet={}'.format(wallets[i]))
+ cli_get_info = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[i])).send_cli()
+ assert 'balances' not in cli_get_info.keys()
assert_equal(cli_get_info['balance'], amounts[i])
- self.log.info("Test -getinfo with multiple wallets and -rpcwallet=non-existing-wallet returns no balance")
- assert 'balance' not in self.nodes[0].cli('-getinfo').send_cli('-rpcwallet=does-not-exist').keys()
+ self.log.info("Test -getinfo with multiple wallets and -rpcwallet=non-existing-wallet returns no balances")
+ cli_get_info_keys = self.nodes[0].cli('-getinfo', '-rpcwallet=does-not-exist').send_cli().keys()
+ assert 'balance' not in cli_get_info_keys
+ assert 'balances' not in cli_get_info_keys
- self.log.info("Test -getinfo after unloading all wallets except a non-default one returns its balance")
+ self.log.info("Test -getinfo with multiple wallets returns all loaded wallet names and balances")
+ assert_equal(set(self.nodes[0].listwallets()), set(wallets))
+ cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
+ assert 'balance' not in cli_get_info.keys()
+ assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets, amounts)})
+
+ # Unload the default wallet and re-verify.
self.nodes[0].unloadwallet(wallets[0])
+ assert wallets[0] not in self.nodes[0].listwallets()
+ cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
+ assert 'balance' not in cli_get_info.keys()
+ assert_equal(cli_get_info['balances'], {k: v for k, v in zip(wallets[1:], amounts[1:])})
+
+ self.log.info("Test -getinfo after unloading all wallets except a non-default one returns its balance")
self.nodes[0].unloadwallet(wallets[2])
assert_equal(self.nodes[0].listwallets(), [wallets[1]])
- assert_equal(self.nodes[0].cli('-getinfo').send_cli()['balance'], amounts[1])
-
- self.log.info("Test -getinfo -rpcwallet=remaining-non-default-wallet returns its balance")
- assert_equal(self.nodes[0].cli('-getinfo').send_cli('-rpcwallet={}'.format(wallets[1]))['balance'], amounts[1])
-
- self.log.info("Test -getinfo with -rpcwallet=unloaded wallet returns no balance")
- assert 'balance' not in self.nodes[0].cli('-getinfo').send_cli('-rpcwallet={}'.format(wallets[2])).keys()
+ cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
+ assert 'balances' not in cli_get_info.keys()
+ assert_equal(cli_get_info['balance'], amounts[1])
+
+ self.log.info("Test -getinfo with -rpcwallet=remaining-non-default-wallet returns only its balance")
+ cli_get_info = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[1])).send_cli()
+ assert 'balances' not in cli_get_info.keys()
+ assert_equal(cli_get_info['balance'], amounts[1])
+
+ self.log.info("Test -getinfo with -rpcwallet=unloaded wallet returns no balances")
+ cli_get_info = self.nodes[0].cli('-getinfo', '-rpcwallet={}'.format(wallets[2])).send_cli()
+ assert 'balance' not in cli_get_info_keys
+ assert 'balances' not in cli_get_info_keys
else:
self.log.info("*** Wallet not compiled; cli getwalletinfo and -getinfo wallet tests skipped")
self.nodes[0].generate(1) # maintain block parity with the wallet_compiled conditional branch
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index a07dad18d6..5b7216b253 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -7,6 +7,7 @@
from decimal import Decimal
from test_framework.messages import COIN
+from test_framework.mininode import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
@@ -58,6 +59,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
def run_test(self):
# Mine some blocks and have them mature.
+ self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
@@ -72,6 +74,10 @@ class MempoolPackagesTest(BitcoinTestFramework):
value = sent_value
chain.append(txid)
+ # Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
+ # Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
+ self.nodes[0].p2p.wait_for_broadcast(chain)
+
# Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
@@ -212,6 +218,10 @@ class MempoolPackagesTest(BitcoinTestFramework):
for tx in chain[:MAX_ANCESTORS_CUSTOM]:
assert tx in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
+ # check transaction unbroadcast info (should be false if in both mempools)
+ mempool = self.nodes[0].getrawmempool(True)
+ for tx in mempool:
+ assert_equal(mempool[tx]['unbroadcast'], False)
# TODO: test ancestor size limits
diff --git a/test/functional/mempool_unbroadcast.py b/test/functional/mempool_unbroadcast.py
index a561f28b91..dedf5b8a47 100755
--- a/test/functional/mempool_unbroadcast.py
+++ b/test/functional/mempool_unbroadcast.py
@@ -53,6 +53,13 @@ class MempoolUnbroadcastTest(BitcoinTestFramework):
txFS = node.signrawtransactionwithwallet(txF["hex"])
rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])
+ # check transactions are in unbroadcast using rpc
+ mempoolinfo = self.nodes[0].getmempoolinfo()
+ assert_equal(mempoolinfo['unbroadcastcount'], 2)
+ mempool = self.nodes[0].getrawmempool(True)
+ for tx in mempool:
+ assert_equal(mempool[tx]['unbroadcast'], True)
+
# check that second node doesn't have these two txns
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh not in mempool
@@ -71,6 +78,11 @@ class MempoolUnbroadcastTest(BitcoinTestFramework):
assert rpc_tx_hsh in mempool
assert wallet_tx_hsh in mempool
+ # check that transactions are no longer in first node's unbroadcast set
+ mempool = self.nodes[0].getrawmempool(True)
+ for tx in mempool:
+ assert_equal(mempool[tx]['unbroadcast'], False)
+
self.log.info("Add another connection & ensure transactions aren't broadcast again")
conn = node.add_p2p_connection(P2PTxInvStore())
diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py
index 4d00a6dc07..9ff76b4b3d 100755
--- a/test/functional/p2p_blockfilters.py
+++ b/test/functional/p2p_blockfilters.py
@@ -5,12 +5,16 @@
"""Tests NODE_COMPACT_FILTERS (BIP 157/158).
Tests that a node configured with -blockfilterindex and -peerblockfilters can serve
-cfcheckpts.
+cfheaders and cfcheckpts.
"""
from test_framework.messages import (
FILTER_TYPE_BASIC,
+ hash256,
msg_getcfcheckpt,
+ msg_getcfheaders,
+ ser_uint256,
+ uint256_from_str,
)
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
@@ -100,12 +104,45 @@ class CompactFiltersTest(BitcoinTestFramework):
[int(header, 16) for header in (stale_cfcheckpt,)]
)
+ self.log.info("Check that peers can fetch cfheaders on active chain.")
+ request = msg_getcfheaders(
+ filter_type=FILTER_TYPE_BASIC,
+ start_height=1,
+ stop_hash=int(main_block_hash, 16)
+ )
+ node0.send_and_ping(request)
+ response = node0.last_message['cfheaders']
+ assert_equal(len(response.hashes), 1000)
+ assert_equal(
+ compute_last_header(response.prev_header, response.hashes),
+ int(main_cfcheckpt, 16)
+ )
+
+ self.log.info("Check that peers can fetch cfheaders on stale chain.")
+ request = msg_getcfheaders(
+ filter_type=FILTER_TYPE_BASIC,
+ start_height=1,
+ stop_hash=int(stale_block_hash, 16)
+ )
+ node0.send_and_ping(request)
+ response = node0.last_message['cfheaders']
+ assert_equal(len(response.hashes), 1000)
+ assert_equal(
+ compute_last_header(response.prev_header, response.hashes),
+ int(stale_cfcheckpt, 16)
+ )
+
self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")
requests = [
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(main_block_hash, 16)
),
+ msg_getcfheaders(
+ filter_type=FILTER_TYPE_BASIC,
+ start_height=1000,
+ stop_hash=int(main_block_hash, 16)
+ ),
]
for request in requests:
node1 = self.nodes[1].add_p2p_connection(P2PInterface())
@@ -114,6 +151,12 @@ class CompactFiltersTest(BitcoinTestFramework):
self.log.info("Check that invalid requests result in disconnection.")
requests = [
+ # Requesting too many filter headers results in disconnection.
+ msg_getcfheaders(
+ filter_type=FILTER_TYPE_BASIC,
+ start_height=0,
+ stop_hash=int(tip_hash, 16)
+ ),
# Requesting unknown filter type results in disconnection.
msg_getcfcheckpt(
filter_type=255,
@@ -130,5 +173,12 @@ class CompactFiltersTest(BitcoinTestFramework):
node0.send_message(request)
node0.wait_for_disconnect()
+def compute_last_header(prev_header, hashes):
+ """Compute the last filter header from a starting header and a sequence of filter hashes."""
+ header = ser_uint256(prev_header)
+ for filter_hash in hashes:
+ header = hash256(ser_uint256(filter_hash) + header)
+ return uint256_from_str(header)
+
if __name__ == '__main__':
CompactFiltersTest().main()
diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py
index 3258a38e3c..c155dda664 100755
--- a/test/functional/p2p_blocksonly.py
+++ b/test/functional/p2p_blocksonly.py
@@ -57,6 +57,29 @@ class P2PBlocksOnly(BitcoinTestFramework):
self.nodes[0].p2p.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
+ self.log.info('Check that txs from whitelisted peers are not rejected and relayed to others')
+ self.log.info("Restarting node 0 with whitelist permission and blocksonly")
+ self.restart_node(0, ["-persistmempool=0", "-whitelist=127.0.0.1", "-whitelistforcerelay", "-blocksonly"])
+ assert_equal(self.nodes[0].getrawmempool(),[])
+ first_peer = self.nodes[0].add_p2p_connection(P2PInterface())
+ second_peer = self.nodes[0].add_p2p_connection(P2PInterface())
+ peer_1_info = self.nodes[0].getpeerinfo()[0]
+ assert_equal(peer_1_info['whitelisted'], True)
+ assert_equal(peer_1_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool'])
+ peer_2_info = self.nodes[0].getpeerinfo()[1]
+ assert_equal(peer_2_info['whitelisted'], True)
+ assert_equal(peer_2_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool'])
+ assert_equal(self.nodes[0].testmempoolaccept([sigtx])[0]['allowed'], True)
+ txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
+
+ self.log.info('Check that the tx from whitelisted first_peer is relayed to others (ie.second_peer)')
+ with self.nodes[0].assert_debug_log(["received getdata"]):
+ first_peer.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
+ self.log.info('Check that the whitelisted peer is still connected after sending the transaction')
+ assert_equal(first_peer.is_connected, True)
+ second_peer.wait_for_tx(txid)
+ assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
+ self.log.info("Whitelisted peer's transaction is accepted and relayed")
if __name__ == '__main__':
P2PBlocksOnly().main()
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 66e6f8c424..d77a744758 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -10,7 +10,7 @@ Version 2 compact blocks are post-segwit (wtxids)
import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
-from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
+from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_BLOCK, MSG_CMPCT_BLOCK, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
@@ -44,7 +44,7 @@ class TestP2PConn(P2PInterface):
def on_inv(self, message):
for x in self.last_message["inv"].inv:
- if x.type == 2:
+ if x.type == MSG_BLOCK:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
@@ -307,7 +307,7 @@ class CompactBlocksTest(BitcoinTestFramework):
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
- inv = CInv(4, block_hash) # 4 == "CompactBlock"
+ inv = CInv(MSG_CMPCT_BLOCK, block_hash)
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
@@ -380,7 +380,7 @@ class CompactBlocksTest(BitcoinTestFramework):
block = self.build_block_on_tip(node, segwit=segwit)
if announce == "inv":
- test_node.send_message(msg_inv([CInv(2, block.sha256)]))
+ test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
@@ -564,7 +564,8 @@ class CompactBlocksTest(BitcoinTestFramework):
# We should receive a getdata request
test_node.wait_for_getdata([block.sha256], timeout=10)
- assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
+ assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \
+ test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG
# Deliver the block
if version == 2:
@@ -633,7 +634,7 @@ class CompactBlocksTest(BitcoinTestFramework):
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
- test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
@@ -642,7 +643,7 @@ class CompactBlocksTest(BitcoinTestFramework):
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
- test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py
index 4f242bd94a..805cb1e84f 100755
--- a/test/functional/p2p_feefilter.py
+++ b/test/functional/p2p_feefilter.py
@@ -7,7 +7,7 @@
from decimal import Decimal
import time
-from test_framework.messages import msg_feefilter
+from test_framework.messages import MSG_TX, msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
@@ -31,7 +31,7 @@ class TestP2PConn(P2PInterface):
def on_inv(self, message):
for i in message.inv:
- if (i.type == 1):
+ if (i.type == MSG_TX):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py
index c9fbb830c8..d743abe681 100755
--- a/test/functional/p2p_fingerprint.py
+++ b/test/functional/p2p_fingerprint.py
@@ -11,7 +11,7 @@ the node should pretend that it does not have it to avoid fingerprinting.
import time
from test_framework.blocktools import (create_block, create_coinbase)
-from test_framework.messages import CInv
+from test_framework.messages import CInv, MSG_BLOCK
from test_framework.mininode import (
P2PInterface,
msg_headers,
@@ -48,7 +48,7 @@ class P2PFingerprintTest(BitcoinTestFramework):
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
- msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
+ msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
diff --git a/test/functional/p2p_getdata.py b/test/functional/p2p_getdata.py
index fd94a09d80..d1b11c2c61 100755
--- a/test/functional/p2p_getdata.py
+++ b/test/functional/p2p_getdata.py
@@ -9,15 +9,11 @@ from test_framework.messages import (
CInv,
msg_getdata,
)
-from test_framework.mininode import (
- mininode_lock,
- P2PInterface,
-)
+from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import wait_until
-class P2PStoreBlock(P2PInterface):
+class P2PStoreBlock(P2PInterface):
def __init__(self):
super().__init__()
self.blocks = defaultdict(int)
@@ -26,26 +22,28 @@ class P2PStoreBlock(P2PInterface):
message.block.calc_sha256()
self.blocks[message.block.sha256] += 1
+
class GetdataTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
- self.nodes[0].add_p2p_connection(P2PStoreBlock())
+ p2p_block_store = self.nodes[0].add_p2p_connection(P2PStoreBlock())
self.log.info("test that an invalid GETDATA doesn't prevent processing of future messages")
# Send invalid message and verify that node responds to later ping
invalid_getdata = msg_getdata()
invalid_getdata.inv.append(CInv(t=0, h=0)) # INV type 0 is invalid.
- self.nodes[0].p2ps[0].send_and_ping(invalid_getdata)
+ p2p_block_store.send_and_ping(invalid_getdata)
# Check getdata still works by fetching tip block
best_block = int(self.nodes[0].getbestblockhash(), 16)
good_getdata = msg_getdata()
good_getdata.inv.append(CInv(t=2, h=best_block))
- self.nodes[0].p2ps[0].send_and_ping(good_getdata)
- wait_until(lambda: self.nodes[0].p2ps[0].blocks[best_block] == 1, timeout=30, lock=mininode_lock)
+ p2p_block_store.send_and_ping(good_getdata)
+ p2p_block_store.wait_until(lambda: self.nodes[0].p2ps[0].blocks[best_block] == 1)
+
if __name__ == '__main__':
GetdataTest().main()
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 4bd832e8f7..81302374c9 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -7,7 +7,16 @@ import asyncio
import struct
import sys
-from test_framework import messages
+from test_framework.messages import (
+ CBlockHeader,
+ CInv,
+ msg_getdata,
+ msg_headers,
+ msg_inv,
+ msg_ping,
+ MSG_TX,
+ ser_string,
+)
from test_framework.mininode import (
NetworkThread,
P2PDataStore,
@@ -25,7 +34,7 @@ class msg_unrecognized:
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
- return messages.ser_string(self.str_data)
+ return ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.msgtype, self.str_data)
@@ -135,7 +144,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
- node.p2p.send_message(messages.msg_ping(nonce=123123))
+ node.p2p.send_message(msg_ping(nonce=123123))
except IOError:
pass
@@ -158,7 +167,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
- conn.send_message(messages.msg_ping(nonce=0xff))
+ conn.send_message(msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
@@ -206,13 +215,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_large_inv(self):
conn = self.nodes[0].add_p2p_connection(P2PInterface())
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (0 -> 20): message inv size() = 50001']):
- msg = messages.msg_inv([messages.CInv(1, 1)] * 50001)
+ msg = msg_inv([CInv(MSG_TX, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (20 -> 40): message getdata size() = 50001']):
- msg = messages.msg_getdata([messages.CInv(1, 1)] * 50001)
+ msg = msg_getdata([CInv(MSG_TX, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (40 -> 60): headers message size = 2001']):
- msg = messages.msg_headers([messages.CBlockHeader()] * 2001)
+ msg = msg_headers([CBlockHeader()] * 2001)
conn.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
diff --git a/test/functional/p2p_leak_tx.py b/test/functional/p2p_leak_tx.py
index 6b3436fa5f..da30ad5977 100755
--- a/test/functional/p2p_leak_tx.py
+++ b/test/functional/p2p_leak_tx.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we don't leak txs to inbound peers that we haven't yet announced to"""
-from test_framework.messages import msg_getdata, CInv
+from test_framework.messages import msg_getdata, CInv, MSG_TX
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -37,7 +37,7 @@ class P2PLeakTxTest(BitcoinTestFramework):
txid = gen_node.sendtoaddress(gen_node.getnewaddress(), 0.01)
want_tx = msg_getdata()
- want_tx.inv.append(CInv(t=1, h=int(txid, 16)))
+ want_tx.inv.append(CInv(t=MSG_TX, h=int(txid, 16)))
inbound_peer.last_message.pop('notfound', None)
inbound_peer.send_and_ping(want_tx)
diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py
index e6451d9f18..9c8c36c89e 100755
--- a/test/functional/p2p_node_network_limited.py
+++ b/test/functional/p2p_node_network_limited.py
@@ -8,7 +8,7 @@ Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correc
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
-from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
+from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -31,7 +31,7 @@ class P2PIgnoreInv(P2PInterface):
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
- getdata_request.inv.append(CInv(2, int(blockhash, 16)))
+ getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 6fb0fec32b..8a989097b4 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -22,6 +22,8 @@ from test_framework.messages import (
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
+ MSG_BLOCK,
+ MSG_TX,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
@@ -157,7 +159,7 @@ class TestP2PConn(P2PInterface):
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
- self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
+ self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)]))
if success:
self.wait_for_getdata([tx.sha256], timeout)
else:
@@ -173,7 +175,7 @@ class TestP2PConn(P2PInterface):
if use_header:
self.send_message(msg)
else:
- self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
+ self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata([block.sha256])
@@ -576,7 +578,7 @@ class SegWitTest(BitcoinTestFramework):
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
- assert self.old_node.last_message["getdata"].inv[0].type == 1
+ assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
@@ -1310,9 +1312,9 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
- self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
+ self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
- self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
+ self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index a8fba306a7..481b1c1841 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -92,6 +92,7 @@ from test_framework.mininode import (
NODE_WITNESS,
P2PInterface,
mininode_lock,
+ MSG_BLOCK,
msg_block,
msg_getblocks,
msg_getdata,
@@ -120,7 +121,7 @@ class BaseNode(P2PInterface):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
- msg.inv.append(CInv(2, x))
+ msg.inv.append(CInv(MSG_BLOCK, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
@@ -131,7 +132,7 @@ class BaseNode(P2PInterface):
def send_block_inv(self, blockhash):
msg = msg_inv()
- msg.inv = [CInv(2, blockhash)]
+ msg.inv = [CInv(MSG_BLOCK, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index a999fba818..10f5eea0e5 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -63,7 +63,7 @@ class TxDownloadTest(BitcoinTestFramework):
txid = 0xdeadbeef
self.log.info("Announce the txid from each incoming peer to node 0")
- msg = msg_inv([CInv(t=1, h=txid)])
+ msg = msg_inv([CInv(t=MSG_TX, h=txid)])
for p in self.nodes[0].p2ps:
p.send_and_ping(msg)
@@ -104,7 +104,7 @@ class TxDownloadTest(BitcoinTestFramework):
self.log.info(
"Announce the transaction to all nodes from all {} incoming peers, but never send it".format(NUM_INBOUND))
- msg = msg_inv([CInv(t=1, h=txid)])
+ msg = msg_inv([CInv(t=MSG_TX, h=txid)])
for p in self.peers:
p.send_and_ping(msg)
@@ -135,13 +135,13 @@ class TxDownloadTest(BitcoinTestFramework):
with mininode_lock:
p.tx_getdata_count = 0
- p.send_message(msg_inv([CInv(t=1, h=i) for i in txids]))
+ p.send_message(msg_inv([CInv(t=MSG_TX, h=i) for i in txids]))
wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT, lock=mininode_lock)
with mininode_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
- p.send_message(msg_notfound(vec=[CInv(t=1, h=txids[0])]))
+ p.send_message(msg_notfound(vec=[CInv(t=MSG_TX, h=txids[0])]))
wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10, lock=mininode_lock)
with mininode_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
@@ -154,7 +154,7 @@ class TxDownloadTest(BitcoinTestFramework):
def test_spurious_notfound(self):
self.log.info('Check that spurious notfound is ignored')
- self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(1, 1)]))
+ self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
def run_test(self):
# Setup the p2p connections
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
index 3aaf4b9977..c323168848 100755
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -54,7 +54,7 @@ Node1 is unused in tests 3-7:
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
-from test_framework.messages import CBlockHeader, CInv, msg_block, msg_headers, msg_inv
+from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -210,7 +210,7 @@ class AcceptBlockTest(BitcoinTestFramework):
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
- test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
+ test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index ef5ef49eaf..d178e79541 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -54,6 +54,7 @@ NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
+MSG_CMPCT_BLOCK = 4
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
@@ -1515,6 +1516,59 @@ class msg_no_witness_blocktxn(msg_blocktxn):
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
+class msg_getcfheaders:
+ __slots__ = ("filter_type", "start_height", "stop_hash")
+ msgtype = b"getcfheaders"
+
+ def __init__(self, filter_type, start_height, stop_hash):
+ self.filter_type = filter_type
+ self.start_height = start_height
+ self.stop_hash = stop_hash
+
+ def deserialize(self, f):
+ self.filter_type = struct.unpack("<B", f.read(1))[0]
+ self.start_height = struct.unpack("<I", f.read(4))[0]
+ self.stop_hash = deser_uint256(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<B", self.filter_type)
+ r += struct.pack("<I", self.start_height)
+ r += ser_uint256(self.stop_hash)
+ return r
+
+ def __repr__(self):
+ return "msg_getcfheaders(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
+ self.filter_type, self.start_height, self.stop_hash)
+
+class msg_cfheaders:
+ __slots__ = ("filter_type", "stop_hash", "prev_header", "hashes")
+ msgtype = b"cfheaders"
+
+ def __init__(self, filter_type=None, stop_hash=None, prev_header=None, hashes=None):
+ self.filter_type = filter_type
+ self.stop_hash = stop_hash
+ self.prev_header = prev_header
+ self.hashes = hashes
+
+ def deserialize(self, f):
+ self.filter_type = struct.unpack("<B", f.read(1))[0]
+ self.stop_hash = deser_uint256(f)
+ self.prev_header = deser_uint256(f)
+ self.hashes = deser_uint256_vector(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<B", self.filter_type)
+ r += ser_uint256(self.stop_hash)
+ r += ser_uint256(self.prev_header)
+ r += ser_uint256_vector(self.hashes)
+ return r
+
+ def __repr__(self):
+ return "msg_cfheaders(filter_type={:#x}, stop_hash={:x})".format(
+ self.filter_type, self.stop_hash)
+
class msg_getcfcheckpt:
__slots__ = ("filter_type", "stop_hash")
msgtype = b"getcfcheckpt"
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index bbd7350bf1..d1e982ac3e 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -31,6 +31,7 @@ from test_framework.messages import (
msg_block,
MSG_BLOCK,
msg_blocktxn,
+ msg_cfheaders,
msg_cfcheckpt,
msg_cmpctblock,
msg_feefilter,
@@ -68,6 +69,7 @@ MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
+ b"cfheaders": msg_cfheaders,
b"cfcheckpt": msg_cfcheckpt,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
@@ -330,6 +332,7 @@ class P2PInterface(P2PConnection):
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
+ def on_cfheaders(self, message): pass
def on_cfcheckpt(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
@@ -371,7 +374,7 @@ class P2PInterface(P2PConnection):
# Connection helper methods
- def wait_until(self, test_function, timeout):
+ def wait_until(self, test_function, timeout=60):
wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor)
def wait_for_disconnect(self, timeout=60):
@@ -645,6 +648,7 @@ class P2PTxInvStore(P2PInterface):
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
+ super().on_inv(message) # Send getdata in response.
# Store how many times invs have been received for each tx.
for i in message.inv:
if i.type == MSG_TX:
@@ -654,3 +658,12 @@ class P2PTxInvStore(P2PInterface):
def get_invs(self):
with mininode_lock:
return list(self.tx_invs_received.keys())
+
+ def wait_for_broadcast(self, txns, timeout=60):
+ """Waits for the txns (list of txids) to complete initial broadcast.
+ The mempool should mark unbroadcast=False for these transactions.
+ """
+ # Wait until invs have been received (and getdatas sent) for each txid.
+ self.wait_until(lambda: set(self.get_invs()) == set([int(tx, 16) for tx in txns]), timeout)
+ # Flush messages and wait for the getdatas to be processed
+ self.sync_with_ping()
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 6126efd842..716fa1d845 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -140,6 +140,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
sys.exit(exit_code)
def parse_args(self):
+ previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
@@ -154,6 +155,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
+ parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
+ default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
+ help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
@@ -174,6 +178,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
self.add_options(parser)
self.options = parser.parse_args()
+ self.options.previous_releases_path = previous_releases_path
def setup(self):
"""Call this method to start up the test framework object with options set."""
@@ -190,18 +195,16 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
fname_bitcoind = os.path.join(
config["environment"]["BUILDDIR"],
"src",
- "bitcoind" + config["environment"]["EXEEXT"]
+ "bitcoind" + config["environment"]["EXEEXT"],
)
fname_bitcoincli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
- "bitcoin-cli" + config["environment"]["EXEEXT"]
+ "bitcoin-cli" + config["environment"]["EXEEXT"],
)
self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind)
self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli)
- self.options.previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
-
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
@@ -684,17 +687,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
- if os.getenv("TEST_PREVIOUS_RELEASES") == "false":
- # disabled
- return False
-
if not os.path.isdir(self.options.previous_releases_path):
- if os.getenv("TEST_PREVIOUS_RELEASES") == "true":
- raise AssertionError("TEST_PREVIOUS_RELEASES=true but releases missing: {}".format(
+ if self.options.prev_releases:
+ raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
- # missing
- return False
- return True
+ return self.options.prev_releases
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index d52aff6f7e..ebc0501e11 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -219,7 +219,12 @@ class TestNode():
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
- rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
+ rpc = get_rpc_proxy(
+ rpc_url(self.datadir, self.index, self.chain, self.rpchost),
+ self.index,
+ timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
+ coveragedir=self.coverage_dir,
+ )
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
@@ -260,7 +265,11 @@ class TestNode():
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
- if e.errno != errno.ECONNREFUSED: # Port not yet open?
+ if e.errno == errno.ETIMEDOUT:
+ pass # Treat identical to ConnectionResetError
+ elif e.errno == errno.ECONNREFUSED:
+ pass # Port not yet open?
+ else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
if "No RPC credentials" not in str(e):
diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py
index 09f89eb59d..5b083a5398 100755
--- a/test/functional/wallet_hd.py
+++ b/test/functional/wallet_hd.py
@@ -170,5 +170,101 @@ class WalletHDTest(BitcoinTestFramework):
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress()))
+ self.log.info('Test sethdseed restoring with keys outside of the initial keypool')
+ self.nodes[0].generate(10)
+ # Restart node 1 with keypool of 3 and a different wallet
+ self.nodes[1].createwallet(wallet_name='origin', blank=True)
+ self.stop_node(1)
+ self.start_node(1, extra_args=['-keypool=3', '-wallet=origin'])
+ connect_nodes(self.nodes[0], 1)
+
+ # sethdseed restoring and seeing txs to addresses out of the keypool
+ origin_rpc = self.nodes[1].get_wallet_rpc('origin')
+ seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())
+ origin_rpc.sethdseed(True, seed)
+
+ self.nodes[1].createwallet(wallet_name='restore', blank=True)
+ restore_rpc = self.nodes[1].get_wallet_rpc('restore')
+ restore_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc
+ restore_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive
+
+ self.nodes[1].createwallet(wallet_name='restore2', blank=True)
+ restore2_rpc = self.nodes[1].get_wallet_rpc('restore2')
+ restore2_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc
+ restore2_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive
+
+ # Check persistence of inactive seed by reloading restore. restore2 is still loaded to test the case where the wallet is not reloaded
+ restore_rpc.unloadwallet()
+ self.nodes[1].loadwallet('restore')
+ restore_rpc = self.nodes[1].get_wallet_rpc('restore')
+
+ # Empty origin keypool and get an address that is beyond the initial keypool
+ origin_rpc.getnewaddress()
+ origin_rpc.getnewaddress()
+ last_addr = origin_rpc.getnewaddress() # Last address of initial keypool
+ addr = origin_rpc.getnewaddress() # First address beyond initial keypool
+
+ # Check that the restored seed has last_addr but does not have addr
+ info = restore_rpc.getaddressinfo(last_addr)
+ assert_equal(info['ismine'], True)
+ info = restore_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], False)
+ info = restore2_rpc.getaddressinfo(last_addr)
+ assert_equal(info['ismine'], True)
+ info = restore2_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], False)
+ # Check that the origin seed has addr
+ info = origin_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], True)
+
+ # Send a transaction to addr, which is out of the initial keypool.
+ # The wallet that has set a new seed (restore_rpc) should not detect this transaction.
+ txid = self.nodes[0].sendtoaddress(addr, 1)
+ origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])
+ self.nodes[0].generate(1)
+ origin_rpc.gettransaction(txid)
+ assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, txid)
+ out_of_kp_txid = txid
+
+ # Send a transaction to last_addr, which is in the initial keypool.
+ # The wallet that has set a new seed (restore_rpc) should detect this transaction and generate 3 new keys from the initial seed.
+ # The previous transaction (out_of_kp_txid) should still not be detected as a rescan is required.
+ txid = self.nodes[0].sendtoaddress(last_addr, 1)
+ origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])
+ self.nodes[0].generate(1)
+ origin_rpc.gettransaction(txid)
+ restore_rpc.gettransaction(txid)
+ assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, out_of_kp_txid)
+ restore2_rpc.gettransaction(txid)
+ assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore2_rpc.gettransaction, out_of_kp_txid)
+
+ # After rescanning, restore_rpc should now see out_of_kp_txid and generate an additional key.
+ # addr should now be part of restore_rpc and be ismine
+ restore_rpc.rescanblockchain()
+ restore_rpc.gettransaction(out_of_kp_txid)
+ info = restore_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], True)
+ restore2_rpc.rescanblockchain()
+ restore2_rpc.gettransaction(out_of_kp_txid)
+ info = restore2_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], True)
+
+ # Check again that 3 keys were derived.
+ # Empty keypool and get an address that is beyond the initial keypool
+ origin_rpc.getnewaddress()
+ origin_rpc.getnewaddress()
+ last_addr = origin_rpc.getnewaddress()
+ addr = origin_rpc.getnewaddress()
+
+ # Check that the restored seed has last_addr but does not have addr
+ info = restore_rpc.getaddressinfo(last_addr)
+ assert_equal(info['ismine'], True)
+ info = restore_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], False)
+ info = restore2_rpc.getaddressinfo(last_addr)
+ assert_equal(info['ismine'], True)
+ info = restore2_rpc.getaddressinfo(addr)
+ assert_equal(info['ismine'], False)
+
if __name__ == '__main__':
WalletHDTest().main ()